repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/crawl_webpage.py | examples/di/crawl_webpage.py | # -*- encoding: utf-8 -*-
"""
@Date : 2024/01/24 15:11:27
@Author : orange-crow
@File : crawl_webpage.py
"""
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.tools.libs.web_scraping import view_page_element_to_scrape
PAPER_LIST_REQ = """"
Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`.
**Notice: view the page element before writing scraping code**
"""
ECOMMERCE_REQ = """
Get products data from website https://scrapeme.live/shop/ and save it as a csv file.
The first page product name, price, product URL, and image URL must be saved in the csv.
**Notice: view the page element before writing scraping code**
"""
NEWS_36KR_REQ = """从36kr创投平台https://pitchhub.36kr.com/financing-flash 所有初创企业融资的信息, **注意: 这是一个中文网站**;
下面是一个大致流程, 你会根据每一步的运行结果对当前计划中的任务做出适当调整:
1. 爬取并本地保存html结构;
2. 直接打印第7个*`快讯`*关键词后2000个字符的html内容, 作为*快讯的html内容示例*;
3. 反思*快讯的html内容示例*中的规律, 设计正则匹配表达式来获取*`快讯`*的标题、链接、时间;
4. 筛选最近3天的初创企业融资*`快讯`*, 以list[dict]形式打印前5个。
5. 将全部结果存在本地csv中
**Notice: view the page element before writing scraping code**
"""
async def main():
di = DataInterpreter(tools=[view_page_element_to_scrape.__name__])
await di.run(ECOMMERCE_REQ)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/use_browser.py | examples/di/use_browser.py | import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
MG_LLM_CONFIG_REQ = """
This is a link to the doc site of MetaGPT project: https://docs.deepwisdom.ai/main/en/
Check where you can go to on the site and try to find out the list of LLM APIs supported by MetaGPT.
Don't write all codes in one response, each time, just write code for one step.
"""
PAPER_LIST_REQ = """"
At https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
find the first paper whose title includes `multiagent`, open it and summarize its abstract.
Don't write all codes in one response, each time, just write code for one step.
"""
DESCRIBE_GITHUB_ISSUE_REQ = """
Visit https://github.com/geekan/MetaGPT, navigate to Issues page, open the first issue related to DataInterpreter, then summarize what the issue is in one sentence.
Don't write all codes in one response, each time, just write code for one step.
"""
async def main():
di = DataInterpreter(tools=["Browser"], react_mode="react")
await di.run(MG_LLM_CONFIG_REQ)
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/machine_learning_with_tools.py | examples/di/machine_learning_with_tools.py | import asyncio
from metagpt.roles.di.data_interpreter import DataInterpreter
async def main(requirement: str):
role = DataInterpreter(use_reflection=True, tools=["<all>"])
await role.run(requirement)
if __name__ == "__main__":
data_path = "your/path/to/titanic"
train_path = f"{data_path}/split_train.csv"
eval_path = f"{data_path}/split_eval.csv"
requirement = f"This is a titanic passenger survival dataset, your goal is to predict passenger survival outcome. The target column is Survived. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report accuracy on the eval data. Train data path: '{train_path}', eval data path: '{eval_path}'."
asyncio.run(main(requirement))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/arxiv_reader.py | examples/di/arxiv_reader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.tools.libs.web_scraping import view_page_element_to_scrape
async def main():
template = "https://arxiv.org/list/{tag}/pastweek?skip=0&show=300"
tags = ["cs.ai", "cs.cl", "cs.lg", "cs.se"]
urls = [template.format(tag=tag) for tag in tags]
prompt = f"""This is a collection of arxiv urls: '{urls}' .
Record each article, remove duplicates by title (they may have multiple tags), filter out papers related to
large language model / agent / llm, print top 100 and visualize the word count of the titles"""
di = DataInterpreter(react_mode="react", tools=[view_page_element_to_scrape.__name__])
await di.run(prompt)
if __name__ == "__main__":
import asyncio
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/InfiAgent-DABench/run_InfiAgent-DABench.py | examples/di/InfiAgent-DABench/run_InfiAgent-DABench.py | import asyncio
import json
from DABench import DABench
from metagpt.logs import logger
from metagpt.roles.di.data_interpreter import DataInterpreter
async def get_prediction(agent, requirement):
"""Helper function to obtain a prediction from a new instance of the agent.
This function runs the agent with the provided requirement and extracts the prediction
from the result. If an error occurs during processing, it logs the error and returns None.
Args:
agent: The agent instance used to generate predictions.
requirement: The input requirement for which the prediction is to be made.
Returns:
The predicted result if successful, otherwise None.
"""
try:
# Run the agent with the given requirement and await the result
result = await agent.run(requirement)
# Parse the result to extract the prediction from the JSON response
prediction_json = json.loads(str(result).split("Current Plan")[1].split("## Current Task")[0])
prediction = prediction_json[-1]["result"] # Extract the last result from the parsed JSON
return prediction # Return the extracted prediction
except Exception as e:
# Log an error message if an exception occurs during processing
logger.info(f"Error processing requirement: {requirement}. Error: {e}")
return None # Return None in case of an error
async def evaluate_all(agent, k):
"""Evaluate all tasks in DABench using the specified baseline agent.
Tasks are divided into groups of size k and processed in parallel.
Args:
agent: The baseline agent used for making predictions.
k (int): The number of tasks to process in each group concurrently.
"""
bench = DABench() # Create an instance of DABench to access its methods and data
id_list, predictions = [], [] # Initialize lists to store IDs and predictions
tasks = [] # Initialize a list to hold the tasks
# Iterate over the answers in DABench to generate tasks
for key, value in bench.answers.items():
requirement = bench.generate_formatted_prompt(key) # Generate a formatted prompt for the current key
tasks.append(get_prediction(agent, requirement)) # Append the prediction task to the tasks list
id_list.append(key) # Append the current key to the ID list
# Process tasks in groups of size k and execute them concurrently
for i in range(0, len(tasks), k):
# Get the current group of tasks
current_group = tasks[i : i + k]
# Execute the current group of tasks in parallel
group_predictions = await asyncio.gather(*current_group)
# Filter out any None values from the predictions and extend the predictions list
predictions.extend(pred for pred in group_predictions if pred is not None)
# Evaluate the results using all valid predictions and logger.info the evaluation
logger.info(bench.eval_all(id_list, predictions))
def main(k=5):
"""Main function to run the evaluation process."""
agent = DataInterpreter() # Create an instance of the DataInterpreter agent
asyncio.run(evaluate_all(agent, k)) # Run the evaluate_all function asynchronously
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/InfiAgent-DABench/run_InfiAgent-DABench_single.py | examples/di/InfiAgent-DABench/run_InfiAgent-DABench_single.py | import fire
from DABench import DABench
from metagpt.logs import logger
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.utils.recovery_util import save_history
async def main(id=0):
"""Evaluate one task"""
bench = DABench()
requirement = bench.generate_formatted_prompt(id)
di = DataInterpreter()
result = await di.run(requirement)
logger.info(result)
save_history(role=di)
_, is_correct = bench.eval(id, str(result))
logger.info(f"Prediction is {'correct' if is_correct else 'incorrect'}.")
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/InfiAgent-DABench/run_InfiAgent-DABench_all.py | examples/di/InfiAgent-DABench/run_InfiAgent-DABench_all.py | import fire
import pandas as pd
from DABench import DABench
from metagpt.logs import logger
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.utils.recovery_util import save_history
async def main():
"""Evaluate all"""
bench = DABench()
id_list, predictions, labels, is_true = [], [], [], []
for key, value in bench.answers.items():
id_list.append(key)
labels.append(str(bench.get_answer(key)))
try:
requirement = bench.generate_formatted_prompt(key)
di = DataInterpreter()
result = await di.run(requirement)
logger.info(result)
save_history(role=di)
temp_prediction, temp_istrue = bench.eval(key, str(result))
is_true.append(str(temp_istrue))
predictions.append(str(temp_prediction))
except:
is_true.append(str(bench.eval(key, "")))
predictions.append(str(""))
df = pd.DataFrame({"Label": labels, "Prediction": predictions, "T/F": is_true})
df.to_excel("DABench_output.xlsx", index=False)
logger.info(bench.eval_all(id_list, predictions))
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/di/InfiAgent-DABench/DABench.py | examples/di/InfiAgent-DABench/DABench.py | import asyncio
import json
import re
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import nest_asyncio
from examples.di.requirements_prompt import DABENCH
from metagpt.const import DABENCH_PATH
from metagpt.logs import logger
from metagpt.utils.exceptions import handle_exception
def evaluate_accuracy_by_question(results: dict) -> float:
"""
Calculate the accuracy of results based on complete correctness of each question.
This function is referenced from https://github.com/InfiAgent/InfiAgent/blob/main/examples/DA-Agent/eval_closed_form.py
This function checks whether each result is entirely correct, meaning all sub-questions
within that result are answered correctly. It computes the proportion of correct results
by dividing the number of fully correct results by the total number of results.
Args:
results (dict): A collection of results where each result may contain a 'correctness' field.
Returns:
float: The proportion of correct results, rounded to four decimal places.
Returns 0 if there are no results.
"""
correct = sum("correctness" in result and all(result["correctness"].values()) for result in results)
total = len(results)
return round(correct / total, 4) if total > 0 else 0
def evaluate_accuracy_by_sub_question(results: dict) -> float:
"""
Evaluate the correctness of all sub-questions across the results.
This function is referenced from https://github.com/InfiAgent/InfiAgent/blob/main/examples/DA-Agent/eval_closed_form.py
This function calculates the total number of correct sub-questions and the overall
number of sub-questions present in all results. It returns the ratio of correct
sub-questions to the total number of sub-questions.
Args:
results (dict): A collection of results where each result may contain a 'correctness' field.
Returns:
float: The ratio of correct sub-questions, rounded to four decimal places.
Returns 0 if there are no sub-questions.
"""
correct = sum(sum(result["correctness"].values()) for result in results if "correctness" in result)
total = sum(len(result["correctness"]) for result in results if "correctness" in result)
return round(correct / total, 4) if total > 0 else 0
def evaluate_accuracy_proportional_by_sub_question_adjusted(results: dict) -> float:
"""
Adjust the score based on the number of sub-questions in each result.
This function is referenced from https://github.com/InfiAgent/InfiAgent/blob/main/examples/DA-Agent/eval_closed_form.py
This function calculates a score for each result by considering the number of sub-questions
it contains. Each sub-question is assigned a score of 1 divided by the number of sub-questions.
The total score for each result is computed as the sum of all correct sub-questions multiplied
by the score per sub-question. Finally, it returns the average score across all results.
Args:
results (dict): A collection of results where each result may contain a 'correctness' field.
Returns:
float: The average score across all results, rounded to four decimal places.
Returns 0 if there are no results.
"""
total_score = 0
for result in results:
if "correctness" in result:
sub_question_count = len(result["correctness"])
score_per_sub_question = 1 / sub_question_count if sub_question_count > 0 else 0
question_score = sum(result["correctness"].values()) * score_per_sub_question
total_score += question_score
return round(total_score / len(results), 4) if results else 0
async def reformat(question: str, format: str, response: str) -> str:
"""
Asynchronously reformats a given response based on specified formatting requirements.
This function is referenced from https://github.com/InfiAgent/InfiAgent/blob/main/examples/DA-Agent/reformat.py
This function constructs a prompt for the LLM (Large Language Model) to reformat
the provided response according to the specified format. It includes a system prompt
to guide the LLM's behavior and a template that outlines the expected output structure.
Args:
question (str): The original question posed by the user.
format (str): The specific formatting requirements that the response must adhere to.
response (str): The initial response from the LLM that needs to be reformatted.
Returns:
str: The reformatted response generated by the LLM based on the provided question
and formatting requirements.
"""
system_prompt = "You are a helpful assistant."
demons = """\Format{{
@shapiro_wilk_statistic[test_statistic]
@shapiro_wilk_p_value[p_value]
where "test_statistic" is a number between 0 and 1 representing the Shapiro-Wilk test statistic. Rounding off the answer to two decimal places.
where "p_value" is a number between 0 and 1 representing the p-value from the Shapiro-Wilk test. Rounding off the answer to four decimal places.
}}
\Answer{{
@shapiro_wilk_statistic[0.56]
@shapiro_wilk_p_value[0.0002]
}}
\Format{{
@total_votes_outliers_num[outlier_num]
where "outlier_num" is an integer representing the number of values considered outliers in the 'total_votes' column.
}}
\Answer{{
@total_votes_outliers[10]
}}
"""
reformat_template = """You should strictly follow the output requirements in the Format part. Here're some examples: {demons}.
Your answer should contain all the \"@answer_name[answer]\" in the order mentioned, each \"answer\" should be in the range of value as required. You need to keep the original numbers and text, just reformat without making any changes.
The format requirements of this question is:
{format}. You need to keep the original numbers and text, just reformat without making any changes. Please give your answer:"""
messages = [
{"role": "user", "content": question},
{"role": "assistant", "content": response},
{"role": "user", "content": reformat_template.format(demons=demons, format=format)},
]
rsp = await ask(messages, system_prompt)
return rsp
def load_jsonl(file_path: Union[Path, str]) -> List[Dict[str, Any]]:
"""
Load data from a JSONL file into a list of dictionaries.
Args:
file_path (Union[Path, str]): The path to the JSONL file to be loaded.
Returns:
List[Dict[str, Any]]: A list of dictionaries containing the data from the JSONL file.
"""
# Convert file_path to Path if it's a string
if isinstance(file_path, str):
file_path = Path(file_path)
data = []
with open(file_path, "r", encoding="utf-8") as file:
for line in file:
data.append(json.loads(line))
return data
def compare_predictions(pred_dict: dict, true_label: list) -> bool:
"""
Compares each prediction against the corresponding true label.
This function checks whether the predicted values match the true values for each
metric. It sorts the true labels to ensure the comparison is made in the correct
order. The function returns True if all predictions are accurate within a small
tolerance for numerical values, or if string values match case-insensitively.
Args:
pred_dict (dict): A dictionary of predicted metrics and their values.
true_label (list): A list of tuples containing true metrics and their values.
Returns:
bool: True if all predictions match the true labels, False otherwise.
"""
sorted_true_label = sorted(true_label, key=lambda x: x[0]) # Sort true labels by metric name
for metric, true_value in sorted_true_label:
try:
true_value = float(true_value) # Attempt to convert the true value to float
except ValueError:
true_value = true_value.replace(",", "") # Clean the true value if conversion fails
# Check if the true value is numeric and compare with the prediction
if isinstance(true_value, (int, float)) and (
metric not in pred_dict or abs(pred_dict[metric] - true_value) > 1e-6
):
return False # Return False if the prediction is inaccurate
# Check if the true value is a string and compare with the prediction
if isinstance(true_value, str) and (
metric not in pred_dict or str(pred_dict[metric]).lower() != str(true_value).lower()
):
return False # Return False if the string prediction does not match
return True # Return True if all predictions are accurate
async def ask(question: str, system_prompt: str) -> str:
"""
Asynchronously sends a question to the LLM (Large Language Model) and retrieves the response.
This function initializes an instance of the LLM and uses it to ask a question
along with a system prompt. The response from the LLM is awaited and returned.
Args:
question (str): The question to be asked to the LLM.
system_prompt (str): A prompt that provides context or instructions to the LLM.
Returns:
str: The response from the LLM based on the provided question and system prompt.
"""
from metagpt.llm import LLM # Importing the LLM class from the metagpt module
llm = LLM() # Create an instance of the LLM
rsp = await llm.aask(question, system_msgs=[system_prompt]) # Await the response from the LLM
return rsp # Return the response
def parse_prediction(prediction: str) -> dict:
"""
Parses a prediction string into a dictionary of metric-value pairs.
This function takes a formatted string containing metrics and their corresponding
values, separated by the "@" symbol. Each metric may be enclosed in brackets and
may include commas. The function processes the input to extract and clean the
metrics and their values, returning them in a structured dictionary format.
Args:
prediction (str): A string representation of metrics and their values.
Returns:
dict: A dictionary where each key is a metric name and each value is the
corresponding value, either as a float or a string.
"""
pred_dict = {}
for pred in prediction.split("@"):
if pred == "":
continue # Skip any empty segments resulting from the split
temp = re.split(r"[\[\]]", pred.strip()) # Split the string by brackets
temp = [s.replace(",", "") for s in temp] # Remove commas from the segments
parts = [s for s in temp if s] # Filter out any empty strings
metric = parts[0].strip().replace(",", "") # Extract and clean the metric name
value = parts[-1].replace(",", "").replace(":", "") # Extract and clean the value
try:
value = float(value) # Attempt to convert the value to a float
except ValueError:
pass # If conversion fails, retain the value as a string
pred_dict[metric] = value # Store the metric-value pair in the dictionary
return pred_dict
class DABench:
def __init__(
self,
questions_file: Path = Path(DABENCH_PATH) / "da-dev-questions.jsonl",
answers_file: Path = Path(DABENCH_PATH) / "da-dev-labels.jsonl",
template: str = "",
):
"""
Initializes the DABench instance with questions and answers.
This constructor loads questions and answers from specified JSONL files.
It also sets a template for formatting prompts. If no template is provided,
a default template is used.
Args:
questions_file (Path): The path to the JSONL file containing questions.
answers_file (Path): The path to the JSONL file containing answers.
template (str): A string template for formatting prompts.
"""
self.questions = {
int(line["id"]): line for line in load_jsonl(questions_file)
} # Load questions from the specified file
self.answers = {
int(line["id"]): line for line in load_jsonl(answers_file)
} # Load answers from the specified file
self.template = template if template else DABENCH # Set the template, defaulting if necessary
def get_question(self, question_id: str) -> dict:
"""
Retrieve the question associated with the given ID.
This method looks up a question by its unique identifier. If the question
is found, it returns the question data; otherwise, it returns a message
indicating that the question was not found.
Args:
question_id (str): The unique identifier for the question.
Returns:
dict: The question data if found, otherwise a "Question not found." message.
"""
return self.questions.get(question_id, "Question not found.") # Return the question or an error message
def generate_formatted_prompt(self, question_id: str) -> str:
"""
Generate a formatted prompt for the specified question ID.
This method retrieves the question data and formats it using the specified
template. The formatted prompt includes the question, constraints, format,
file name, and level, allowing for a structured output.
Args:
question_id (str): The unique identifier for the question.
Returns:
str: A formatted prompt string based on the question data.
"""
temp = self.get_question(question_id) # Retrieve the question data
return self.template.format(
question=temp["question"],
constraints=temp["constraints"],
format=temp["format"],
file_name=str(DABENCH_PATH) + "/da-dev-tables/" + temp["file_name"],
level=temp["level"],
) # Format and return the prompt
def get_answer(self, answer_id: str) -> list:
"""
Retrieve the answer list associated with the given ID.
This method looks up an answer by its unique identifier. If the answer
is found, it returns the answer data; otherwise, it returns a message
indicating that the answer was not found.
Args:
answer_id (str): The unique identifier for the answer.
Returns:
list: The answer data if found, otherwise an "Answer not found." message.
"""
return self.answers.get(answer_id, "Answer not found.") # Return the answer or an error message
@handle_exception(exception_msg="Error parsing cleaned prediction", default_return=(None, False))
def parse_cleaned_prediction(self, cleaned_prediction: str, true_label: Any) -> Tuple[str, bool]:
"""
Parse the cleaned prediction and compare it with the true label.
Args:
cleaned_prediction (str): The cleaned prediction string.
true_label (Any): The true label to compare against.
Returns:
Tuple[str, bool]: A tuple containing the cleaned prediction and a boolean indicating
whether it matches the true label.
"""
if cleaned_prediction: # Ensure the cleaned prediction is not empty
pred_dict = parse_prediction(cleaned_prediction) # Parse the prediction
if pred_dict is not None and compare_predictions(pred_dict, true_label):
return cleaned_prediction, True # Return if the prediction matches the true label
return cleaned_prediction, False # Return the cleaned prediction with a False match
@handle_exception(exception_msg="Error during async reformat", default_return=(None, False))
def async_reformat_prediction(self, id: str, result: str) -> str:
"""
Reformat the prediction asynchronously and extract the answer.
Args:
id (str): The identifier for the question.
result (str): The original prediction result.
Returns:
str: The reformatted prediction or the original prediction if extraction fails.
"""
question = self.get_question(id)["question"] # Retrieve the question based on the ID
question_format = self.get_question(id)["format"] # Get the format of the question
prediction = asyncio.run(reformat(question, question_format, result)) # Asynchronously reformat the prediction
# Attempt to extract the answer from the reformatted prediction
answer_part = prediction.split("Answer{{") if "Answer{{" in prediction else []
if len(answer_part) > 1:
return answer_part[1].split("}}")[0].strip() # Return the extracted answer
return prediction # If extraction fails, return the original prediction
def eval(self, id: str, result: str) -> Tuple[str, bool]:
"""
Evaluate the prediction against the true label.
Args:
id (str): The identifier for the question.
result (str): The original prediction result.
Returns:
Tuple[str, bool]: A tuple containing the final prediction and a boolean indicating
whether it matches the true label.
"""
true_label = self.get_answer(id)["common_answers"] # Retrieve the true label for comparison
nest_asyncio.apply() # Apply nested asyncio to allow for async calls
result = json.loads(str(result).split("Current Plan")[1].split("## Current Task")[0])[-1]["result"].strip()
cleaned_prediction = result.replace("{", "").replace("}", "").replace("'", "") # Clean the prediction string
# Use the decorated function to handle exceptions while parsing the cleaned prediction
parsed_result = self.parse_cleaned_prediction(cleaned_prediction, true_label)
if parsed_result[1]: # If the parsed prediction is valid
return parsed_result # Return the valid prediction
# If the cleaned prediction is not valid, attempt to asynchronously reformat it
prediction = self.async_reformat_prediction(id, result)
pred_dict = parse_prediction(prediction) # Parse the reformatted prediction
if pred_dict is not None and compare_predictions(pred_dict, true_label):
return prediction, True # Return if the reformatted prediction matches the true label
return prediction, False # Return the final prediction with a False match
@handle_exception(exception_msg="Error evaluating single prediction", default_return={})
def single_eval(self, id: str, prediction: str) -> dict:
"""
Evaluate the prediction against the true label for a single question.
just using in eval_all
Args:
id (str): The identifier for the question.
prediction (str): The prediction string to evaluate.
Returns:
dict: A dictionary indicating the correctness of each metric.
"""
true_label = self.get_answer(id)["common_answers"] # Retrieve the true label for the question
prediction = prediction.replace("{", "").replace("}", "").replace("'", "") # Clean the prediction string
pred_dict = parse_prediction(prediction) # Parse the prediction into a dictionary
# Initialize the correctness dictionary with False values for each metric
correctness = {metric: False for metric, _ in true_label}
# Check each metric's prediction against the true label
for metric, true_value in true_label:
try:
true_value = float(true_value) # Attempt to convert the true value to float
except ValueError:
true_value = true_value.replace(",", "") # Handle non-numeric values
if metric in pred_dict:
# Consider the prediction correct if it's within a small tolerance
if (
isinstance(true_value, (int, float))
and isinstance(pred_dict[metric], (int, float))
and abs(pred_dict[metric] - true_value) < 1e-6
):
correctness[metric] = True # Mark as correct if within tolerance
if isinstance(true_value, str) and (
metric not in pred_dict or str(pred_dict[metric]).lower() != str(true_value).lower()
):
correctness[metric] = True # Mark as correct for string comparison
return correctness # Return the correctness dictionary
def eval_all(self, id_list: list, predictions: list) -> dict:
"""
Evaluate all predictions and calculate accuracy rates.
Args:
id_list (list): A list of question identifiers.
predictions (list): A list of prediction strings corresponding to the questions.
Returns:
dict: A dictionary containing accuracy rates by question and sub-question.
"""
results = [] # Initialize a list to store results for each question
# Evaluate each prediction against its corresponding question ID
for id, prediction in zip(id_list, predictions):
correct = self.single_eval(id, prediction) # Evaluate the single prediction
results.append({"id": id, "correctness": correct}) # Append the result to the list
# Calculate the three accuracy rates based on the results
accuracy_by_question = evaluate_accuracy_by_question(results)
accuracy_by_sub_question = evaluate_accuracy_by_sub_question(results)
proportional_accuracy_by_sub_question = evaluate_accuracy_proportional_by_sub_question_adjusted(results)
return {
"accuracy_by_question": accuracy_by_question,
"accuracy_by_sub_question": accuracy_by_sub_question,
"proportional_accuracy_by_sub_question": proportional_accuracy_by_sub_question,
}
if __name__ == "__main__":
bench = DABench()
id = 0
prediction = "@mean_fare[34.65]"
logger.info(bench.eval(id, prediction))
ids = [0, 5, 6]
predictions = [
"@mean_fare[34.89]",
"@correlation_coefficient[0.21]",
"@mean_fare_child[31.09], @mean_fare_teenager[31.98], @mean_fare_adult[35.17], @mean_fare_elderly[43.47]",
]
logger.info(bench.eval_all(ids, predictions))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/stanford_town/run_st_game.py | examples/stanford_town/run_st_game.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : entry of Stanford Town(ST/st) game
# README see `metagpt/ext/stanford_town/README.md`
import asyncio
from typing import Optional
import fire
from metagpt.ext.stanford_town.roles.st_role import STRole
from metagpt.ext.stanford_town.stanford_town import StanfordTown
from metagpt.ext.stanford_town.utils.const import STORAGE_PATH
from metagpt.ext.stanford_town.utils.mg_ga_transform import (
get_reverie_meta,
write_curr_sim_code,
write_curr_step,
)
from metagpt.ext.stanford_town.utils.utils import copy_folder
from metagpt.logs import logger
async def startup(
idea: str, fork_sim_code: str, sim_code: str, temp_storage_path: str, investment: float = 30.0, n_round: int = 500
):
town = StanfordTown()
logger.info("StanfordTown init environment")
# copy `storage/{fork_sim_code}` to `storage/{sim_code}`
copy_folder(str(STORAGE_PATH.joinpath(fork_sim_code)), str(STORAGE_PATH.joinpath(sim_code)))
# get role names from `storage/{simulation_name}/reverie/meta.json` and then init roles
reverie_meta = get_reverie_meta(fork_sim_code)
roles = []
sim_path = STORAGE_PATH.joinpath(sim_code)
sim_path.mkdir(exist_ok=True)
for idx, role_name in enumerate(reverie_meta["persona_names"]):
has_inner_voice = True if idx == 0 else False
role = STRole(
name=role_name,
profile=role_name,
sim_code=sim_code,
step=reverie_meta.get("step", 0),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
has_inner_voice=has_inner_voice,
)
roles.append(role)
# init temp_storage
write_curr_sim_code({"sim_code": sim_code}, temp_storage_path)
write_curr_step({"step": reverie_meta.get("step", 0)}, temp_storage_path)
await town.hire(roles)
town.invest(investment)
town.run_project(idea)
await town.run(n_round)
def main(
idea: str,
fork_sim_code: str,
sim_code: str,
temp_storage_path: Optional[str] = None,
investment: float = 30.0,
n_round: int = 500,
):
"""
Args:
idea: idea works as an `inner voice` to the first agent.
fork_sim_code: old simulation name to start with, choose one inside `generative_agents/environment/frontend_server/storage/`
sim_code: new simulation name to save simulation result
temp_storage_path: generative_agents temp_storage path inside `environment/frontend_server` to interact.
investment: the investment of running agents
n_round: rounds to run agents
"""
asyncio.run(
startup(
idea=idea,
fork_sim_code=fork_sim_code,
sim_code=sim_code,
temp_storage_path=temp_storage_path,
investment=investment,
n_round=n_round,
)
)
if __name__ == "__main__":
fire.Fire(main)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/stanford_town/__init__.py | examples/stanford_town/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/exp_pool/load_exps_from_log.py | examples/exp_pool/load_exps_from_log.py | """Load and save experiences from the log file."""
import json
from pathlib import Path
from metagpt.exp_pool import get_exp_manager
from metagpt.exp_pool.schema import LOG_NEW_EXPERIENCE_PREFIX, Experience
from metagpt.logs import logger
def load_exps(log_file_path: str) -> list[Experience]:
"""Loads experiences from a log file.
Args:
log_file_path (str): The path to the log file.
Returns:
list[Experience]: A list of Experience objects loaded from the log file.
"""
if not Path(log_file_path).exists():
logger.warning(f"`load_exps` called with a non-existent log file path: {log_file_path}")
return
exps = []
with open(log_file_path, "r") as log_file:
for line in log_file:
if LOG_NEW_EXPERIENCE_PREFIX in line:
json_str = line.split(LOG_NEW_EXPERIENCE_PREFIX, 1)[1].strip()
exp_data = json.loads(json_str)
exp = Experience(**exp_data)
exps.append(exp)
logger.info(f"Loaded {len(exps)} experiences from log file: {log_file_path}")
return exps
def save_exps(exps: list[Experience]):
"""Saves a list of experiences to the experience pool.
Args:
exps (list[Experience]): The list of experiences to save.
"""
if not exps:
logger.warning("`save_exps` called with an empty list of experiences.")
return
manager = get_exp_manager()
manager.is_writable = True
manager.create_exps(exps)
logger.info(f"Saved {len(exps)} experiences.")
def get_log_file_path() -> str:
"""Retrieves the path to the log file.
Returns:
str: The path to the log file.
Raises:
ValueError: If the log file path cannot be found.
"""
handlers = logger._core.handlers
for handler in handlers.values():
if "log" in handler._name:
return handler._name[1:-1]
raise ValueError("Log file not found")
def main():
log_file_path = get_log_file_path()
exps = load_exps(log_file_path)
save_exps(exps)
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/exp_pool/scorer.py | examples/exp_pool/scorer.py | import asyncio
from metagpt.exp_pool.scorers import SimpleScorer
# Request to implement quicksort in Python
REQ = "Write a program to implement quicksort in python."
# First response: Quicksort implementation without base case
RESP1 = """
def quicksort(arr):
return quicksort([x for x in arr[1:] if x <= arr[0]]) + [arr[0]] + quicksort([x for x in arr[1:] if x > arr[0]])
"""
# Second response: Quicksort implementation with base case
RESP2 = """
def quicksort(arr):
if len(arr) <= 1:
return arr
return quicksort([x for x in arr[1:] if x <= arr[0]]) + [arr[0]] + quicksort([x for x in arr[1:] if x > arr[0]])
"""
async def simple():
"""Evaluates two quicksort implementations using SimpleScorer.
Example:
{
"val": 3,
"reason": "The response attempts to implement quicksort but contains a critical flaw: it lacks a base case to terminate the recursion, which will lead to a maximum recursion depth exceeded error for non-empty lists. Additionally, the function does not handle empty lists properly. A correct implementation should include a base case to handle lists of length 0 or 1."
}
"""
scorer = SimpleScorer()
await scorer.evaluate(req=REQ, resp=RESP1)
await scorer.evaluate(req=REQ, resp=RESP2)
async def main():
await simple()
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/exp_pool/init_exp_pool.py | examples/exp_pool/init_exp_pool.py | """Init experience pool.
Put some useful experiences into the experience pool.
"""
import asyncio
import json
from pathlib import Path
from metagpt.const import EXAMPLE_DATA_PATH
from metagpt.exp_pool import get_exp_manager
from metagpt.exp_pool.schema import EntryType, Experience, Metric, Score
from metagpt.logs import logger
from metagpt.utils.common import aread
async def load_file(filepath) -> list[dict]:
"""Asynchronously loads and parses a JSON file.
Args:
filepath: Path to the JSON file.
Returns:
A list of dictionaries parsed from the JSON file.
"""
return json.loads(await aread(filepath))
async def add_exp(req: str, resp: str, tag: str, metric: Metric = None):
"""Adds a new experience to the experience pool.
Args:
req: The request string.
resp: The response string.
tag: A tag for categorizing the experience.
metric: Optional metric for the experience. Defaults to a score of 10.
"""
exp = Experience(
req=req,
resp=resp,
entry_type=EntryType.MANUAL,
tag=tag,
metric=metric or Metric(score=Score(val=10, reason="Manual")),
)
exp_manager = get_exp_manager()
exp_manager.is_writable = True
exp_manager.create_exp(exp)
logger.info(f"New experience created for the request `{req[:10]}`.")
async def add_exps(exps: list, tag: str):
"""Adds multiple experiences to the experience pool.
Args:
exps: A list of experience dictionaries.
tag: A tag for categorizing the experiences.
"""
tasks = [
add_exp(req=exp["req"] if isinstance(exp["req"], str) else json.dumps(exp["req"]), resp=exp["resp"], tag=tag)
for exp in exps
]
await asyncio.gather(*tasks)
async def add_exps_from_file(tag: str, filepath: Path):
"""Loads experiences from a file and adds them to the experience pool.
Args:
tag: A tag for categorizing the experiences.
filepath: Path to the file containing experiences.
"""
exps = await load_file(filepath)
await add_exps(exps, tag)
def query_exps_count():
"""Queries and logs the total count of experiences in the pool."""
exp_manager = get_exp_manager()
count = exp_manager.get_exps_count()
logger.info(f"Experiences Count: {count}")
async def main():
await add_exps_from_file("TeamLeader.llm_cached_aask", EXAMPLE_DATA_PATH / "exp_pool/team_leader_exps.json")
await add_exps_from_file("Engineer2.llm_cached_aask", EXAMPLE_DATA_PATH / "exp_pool/engineer_exps.json")
query_exps_count()
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/exp_pool/decorator.py | examples/exp_pool/decorator.py | """
This script demonstrates how to automatically store experiences using @exp_cache and query the stored experiences.
"""
import asyncio
import uuid
from metagpt.exp_pool import exp_cache, get_exp_manager
from metagpt.logs import logger
@exp_cache()
async def produce(req=""):
return f"{req} {uuid.uuid4().hex}"
async def main():
req = "Water"
resp = await produce(req=req)
logger.info(f"The response of `produce({req})` is: {resp}")
exps = await get_exp_manager().query_exps(req)
logger.info(f"Find experiences: {exps}")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/examples/exp_pool/manager.py | examples/exp_pool/manager.py | """
Demonstrate the creation and querying of experiences.
This script creates a new experience, logs its creation, and then queries for experiences matching the same request.
"""
import asyncio
from metagpt.exp_pool import get_exp_manager
from metagpt.exp_pool.schema import EntryType, Experience
from metagpt.logs import logger
async def main():
# Define the simple request and response
req = "Simple req"
resp = "Simple resp"
# Add the new experience
exp = Experience(req=req, resp=resp, entry_type=EntryType.MANUAL)
exp_manager = get_exp_manager()
exp_manager.create_exp(exp)
logger.info(f"New experience created for the request `{req}`.")
# Query for experiences matching the request
exps = await exp_manager.query_exps(req)
logger.info(f"Got experiences: {exps}")
if __name__ == "__main__":
asyncio.run(main())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/config2.py | metagpt/config2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4 01:25
@Author : alexanderwu
@File : config2.py
"""
import os
from pathlib import Path
from typing import Dict, Iterable, List, Literal, Optional
from pydantic import BaseModel, Field, model_validator
from metagpt.configs.browser_config import BrowserConfig
from metagpt.configs.embedding_config import EmbeddingConfig
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.configs.mermaid_config import MermaidConfig
from metagpt.configs.omniparse_config import OmniParseConfig
from metagpt.configs.redis_config import RedisConfig
from metagpt.configs.role_custom_config import RoleCustomConfig
from metagpt.configs.role_zero_config import RoleZeroConfig
from metagpt.configs.s3_config import S3Config
from metagpt.configs.search_config import SearchConfig
from metagpt.configs.workspace_config import WorkspaceConfig
from metagpt.const import CONFIG_ROOT, METAGPT_ROOT
from metagpt.utils.yaml_model import YamlModel
class CLIParams(BaseModel):
"""CLI parameters"""
project_path: str = ""
project_name: str = ""
inc: bool = False
reqa_file: str = ""
max_auto_summarize_code: int = 0
git_reinit: bool = False
@model_validator(mode="after")
def check_project_path(self):
"""Check project_path and project_name"""
if self.project_path:
self.inc = True
self.project_name = self.project_name or Path(self.project_path).name
return self
class Config(CLIParams, YamlModel):
"""Configurations for MetaGPT"""
# Key Parameters
llm: LLMConfig
# RAG Embedding
embedding: EmbeddingConfig = EmbeddingConfig()
# omniparse
omniparse: OmniParseConfig = OmniParseConfig()
# Global Proxy. Will be used if llm.proxy is not set
proxy: str = ""
# Tool Parameters
search: SearchConfig = SearchConfig()
enable_search: bool = False
browser: BrowserConfig = BrowserConfig()
mermaid: MermaidConfig = MermaidConfig()
# Storage Parameters
s3: Optional[S3Config] = None
redis: Optional[RedisConfig] = None
# Misc Parameters
repair_llm_output: bool = False
prompt_schema: Literal["json", "markdown", "raw"] = "json"
workspace: WorkspaceConfig = Field(default_factory=WorkspaceConfig)
enable_longterm_memory: bool = False
code_validate_k_times: int = 2
# Experience Pool Parameters
exp_pool: ExperiencePoolConfig = Field(default_factory=ExperiencePoolConfig)
# Will be removed in the future
metagpt_tti_url: str = ""
language: str = "English"
redis_key: str = "placeholder"
iflytek_app_id: str = ""
iflytek_api_secret: str = ""
iflytek_api_key: str = ""
azure_tts_subscription_key: str = ""
azure_tts_region: str = ""
_extra: dict = dict() # extra config dict
# Role's custom configuration
roles: Optional[List[RoleCustomConfig]] = None
# RoleZero's configuration
role_zero: RoleZeroConfig = Field(default_factory=RoleZeroConfig)
@classmethod
def from_home(cls, path):
"""Load config from ~/.metagpt/config2.yaml"""
pathname = CONFIG_ROOT / path
if not pathname.exists():
return None
return Config.from_yaml_file(pathname)
@classmethod
def default(cls, reload: bool = False, **kwargs) -> "Config":
"""Load default config
- Priority: env < default_config_paths
- Inside default_config_paths, the latter one overwrites the former one
"""
default_config_paths = (
METAGPT_ROOT / "config/config2.yaml",
CONFIG_ROOT / "config2.yaml",
)
if reload or default_config_paths not in _CONFIG_CACHE:
dicts = [dict(os.environ), *(Config.read_yaml(path) for path in default_config_paths), kwargs]
final = merge_dict(dicts)
_CONFIG_CACHE[default_config_paths] = Config(**final)
return _CONFIG_CACHE[default_config_paths]
@classmethod
def from_llm_config(cls, llm_config: dict):
"""user config llm
example:
llm_config = {"api_type": "xxx", "api_key": "xxx", "model": "xxx"}
gpt4 = Config.from_llm_config(llm_config)
A = Role(name="A", profile="Democratic candidate", goal="Win the election", actions=[a1], watch=[a2], config=gpt4)
"""
llm_config = LLMConfig.model_validate(llm_config)
dicts = [dict(os.environ)]
dicts += [{"llm": llm_config}]
final = merge_dict(dicts)
return Config(**final)
def update_via_cli(self, project_path, project_name, inc, reqa_file, max_auto_summarize_code):
"""update config via cli"""
# Use in the PrepareDocuments action according to Section 2.2.3.5.1 of RFC 135.
if project_path:
inc = True
project_name = project_name or Path(project_path).name
self.project_path = project_path
self.project_name = project_name
self.inc = inc
self.reqa_file = reqa_file
self.max_auto_summarize_code = max_auto_summarize_code
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value: dict):
self._extra = value
def get_openai_llm(self) -> Optional[LLMConfig]:
"""Get OpenAI LLMConfig by name. If no OpenAI, raise Exception"""
if self.llm.api_type == LLMType.OPENAI:
return self.llm
return None
def get_azure_llm(self) -> Optional[LLMConfig]:
"""Get Azure LLMConfig by name. If no Azure, raise Exception"""
if self.llm.api_type == LLMType.AZURE:
return self.llm
return None
def merge_dict(dicts: Iterable[Dict]) -> Dict:
"""Merge multiple dicts into one, with the latter dict overwriting the former"""
result = {}
for dictionary in dicts:
result.update(dictionary)
return result
_CONFIG_CACHE = {}
config = Config.default()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/subscription.py | metagpt/subscription.py | import asyncio
from typing import AsyncGenerator, Awaitable, Callable
from pydantic import BaseModel, ConfigDict, Field
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
class SubscriptionRunner(BaseModel):
"""A simple wrapper to manage subscription tasks for different roles using asyncio.
Example:
>>> import asyncio
>>> from metagpt.address import SubscriptionRunner
>>> from metagpt.roles import Searcher
>>> from metagpt.schema import Message
>>> async def trigger():
... while True:
... yield Message(content="the latest news about OpenAI")
... await asyncio.sleep(3600 * 24)
>>> async def callback(msg: Message):
... print(msg.content)
>>> async def main():
... pb = SubscriptionRunner()
... await pb.subscribe(Searcher(), trigger(), callback)
... await pb.run()
>>> asyncio.run(main())
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
tasks: dict[Role, asyncio.Task] = Field(default_factory=dict)
async def subscribe(
self,
role: Role,
trigger: AsyncGenerator[Message, None],
callback: Callable[
[
Message,
],
Awaitable[None],
],
):
"""Subscribes a role to a trigger and sets up a callback to be called with the role's response.
Args:
role: The role to subscribe.
trigger: An asynchronous generator that yields Messages to be processed by the role.
callback: An asynchronous function to be called with the response from the role.
"""
loop = asyncio.get_running_loop()
async def _start_role():
async for msg in trigger:
resp = await role.run(msg)
await callback(resp)
self.tasks[role] = loop.create_task(_start_role(), name=f"Subscription-{role}")
async def unsubscribe(self, role: Role):
"""Unsubscribes a role from its trigger and cancels the associated task.
Args:
role: The role to unsubscribe.
"""
task = self.tasks.pop(role)
task.cancel()
async def run(self, raise_exception: bool = True):
"""Runs all subscribed tasks and handles their completion or exception.
Args:
raise_exception: _description_. Defaults to True.
Raises:
task.exception: _description_
"""
while True:
for role, task in self.tasks.items():
if task.done():
if task.exception():
if raise_exception:
raise task.exception()
logger.opt(exception=task.exception()).error(f"Task {task.get_name()} run error")
else:
logger.warning(
f"Task {task.get_name()} has completed. "
"If this is unexpected behavior, please check the trigger function."
)
self.tasks.pop(role)
break
else:
await asyncio.sleep(1)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/software_company.py | metagpt/software_company.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from pathlib import Path
import typer
from metagpt.const import CONFIG_ROOT
app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
def generate_repo(
idea,
investment=3.0,
n_round=5,
code_review=True,
run_tests=False,
implement=True,
project_name="",
inc=False,
project_path="",
reqa_file="",
max_auto_summarize_code=0,
recover_path=None,
):
"""Run the startup logic. Can be called from CLI or other Python scripts."""
from metagpt.config2 import config
from metagpt.context import Context
from metagpt.roles import (
Architect,
DataAnalyst,
Engineer2,
ProductManager,
TeamLeader,
)
from metagpt.team import Team
config.update_via_cli(project_path, project_name, inc, reqa_file, max_auto_summarize_code)
ctx = Context(config=config)
if not recover_path:
company = Team(context=ctx)
company.hire(
[
TeamLeader(),
ProductManager(),
Architect(),
Engineer2(),
# ProjectManager(),
DataAnalyst(),
]
)
# if implement or code_review:
# company.hire([Engineer(n_borg=5, use_code_review=code_review)])
#
# if run_tests:
# company.hire([QaEngineer()])
# if n_round < 8:
# n_round = 8 # If `--run-tests` is enabled, at least 8 rounds are required to run all QA actions.
else:
stg_path = Path(recover_path)
if not stg_path.exists() or not str(stg_path).endswith("team"):
raise FileNotFoundError(f"{recover_path} not exists or not endswith `team`")
company = Team.deserialize(stg_path=stg_path, context=ctx)
idea = company.idea
company.invest(investment)
asyncio.run(company.run(n_round=n_round, idea=idea))
return ctx.kwargs.get("project_path")
@app.command("", help="Start a new project.")
def startup(
idea: str = typer.Argument(None, help="Your innovative idea, such as 'Create a 2048 game.'"),
investment: float = typer.Option(default=3.0, help="Dollar amount to invest in the AI company."),
n_round: int = typer.Option(default=5, help="Number of rounds for the simulation."),
code_review: bool = typer.Option(default=True, help="Whether to use code review."),
run_tests: bool = typer.Option(default=False, help="Whether to enable QA for adding & running tests."),
implement: bool = typer.Option(default=True, help="Enable or disable code implementation."),
project_name: str = typer.Option(default="", help="Unique project name, such as 'game_2048'."),
inc: bool = typer.Option(default=False, help="Incremental mode. Use it to coop with existing repo."),
project_path: str = typer.Option(
default="",
help="Specify the directory path of the old version project to fulfill the incremental requirements.",
),
reqa_file: str = typer.Option(
default="", help="Specify the source file name for rewriting the quality assurance code."
),
max_auto_summarize_code: int = typer.Option(
default=0,
help="The maximum number of times the 'SummarizeCode' action is automatically invoked, with -1 indicating "
"unlimited. This parameter is used for debugging the workflow.",
),
recover_path: str = typer.Option(default=None, help="recover the project from existing serialized storage"),
init_config: bool = typer.Option(default=False, help="Initialize the configuration file for MetaGPT."),
):
"""Run a startup. Be a boss."""
if init_config:
copy_config_to()
return
if idea is None:
typer.echo("Missing argument 'IDEA'. Run 'metagpt --help' for more information.")
raise typer.Exit()
return generate_repo(
idea,
investment,
n_round,
code_review,
run_tests,
implement,
project_name,
inc,
project_path,
reqa_file,
max_auto_summarize_code,
recover_path,
)
DEFAULT_CONFIG = """# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
# Config Docs: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
llm:
api_type: "openai" # or azure / ollama / groq etc.
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
"""
def copy_config_to():
"""Initialize the configuration file for MetaGPT."""
target_path = CONFIG_ROOT / "config2.yaml"
# 创建目标目录(如果不存在)
target_path.parent.mkdir(parents=True, exist_ok=True)
# 如果目标文件已经存在,则重命名为 .bak
if target_path.exists():
backup_path = target_path.with_suffix(".bak")
target_path.rename(backup_path)
print(f"Existing configuration file backed up at {backup_path}")
# 复制文件
target_path.write_text(DEFAULT_CONFIG, encoding="utf-8")
print(f"Configuration file initialized at {target_path}")
if __name__ == "__main__":
app()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document.py | metagpt/document.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/8 14:03
@Author : alexanderwu
@File : document.py
@Desc : Classes and Operations Related to Files in the File System.
"""
from enum import Enum
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from llama_index.core import Document, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.readers.file import PDFReader
from pydantic import BaseModel, ConfigDict, Field
from tqdm import tqdm
from metagpt.logs import logger
from metagpt.repo_parser import RepoParser
def validate_cols(content_col: str, df: pd.DataFrame):
if content_col not in df.columns:
raise ValueError("Content column not found in DataFrame.")
def read_data(data_path: Path) -> Union[pd.DataFrame, list[Document]]:
suffix = data_path.suffix
if ".xlsx" == suffix:
data = pd.read_excel(data_path)
elif ".csv" == suffix:
data = pd.read_csv(data_path)
elif ".json" == suffix:
data = pd.read_json(data_path)
elif suffix in (".docx", ".doc"):
data = SimpleDirectoryReader(input_files=[str(data_path)]).load_data()
elif ".txt" == suffix:
data = SimpleDirectoryReader(input_files=[str(data_path)]).load_data()
node_parser = SimpleNodeParser.from_defaults(separator="\n", chunk_size=256, chunk_overlap=0)
data = node_parser.get_nodes_from_documents(data)
elif ".pdf" == suffix:
data = PDFReader.load_data(str(data_path))
else:
raise NotImplementedError("File format not supported.")
return data
class DocumentStatus(Enum):
"""Indicates document status, a mechanism similar to RFC/PEP"""
DRAFT = "draft"
UNDERREVIEW = "underreview"
APPROVED = "approved"
DONE = "done"
class Document(BaseModel):
"""
Document: Handles operations related to document files.
"""
path: Path = Field(default=None)
name: str = Field(default="")
content: str = Field(default="")
# metadata? in content perhaps.
author: str = Field(default="")
status: DocumentStatus = Field(default=DocumentStatus.DRAFT)
reviews: list = Field(default_factory=list)
@classmethod
def from_path(cls, path: Path):
"""
Create a Document instance from a file path.
"""
if not path.exists():
raise FileNotFoundError(f"File {path} not found.")
content = path.read_text()
return cls(content=content, path=path)
@classmethod
def from_text(cls, text: str, path: Optional[Path] = None):
"""
Create a Document from a text string.
"""
return cls(content=text, path=path)
def to_path(self, path: Optional[Path] = None):
"""
Save content to the specified file path.
"""
if path is not None:
self.path = path
if self.path is None:
raise ValueError("File path is not set.")
self.path.parent.mkdir(parents=True, exist_ok=True)
# TODO: excel, csv, json, etc.
self.path.write_text(self.content, encoding="utf-8")
def persist(self):
"""
Persist document to disk.
"""
return self.to_path()
class IndexableDocument(Document):
"""
Advanced document handling: For vector databases or search engines.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
data: Union[pd.DataFrame, list]
content_col: Optional[str] = Field(default="")
meta_col: Optional[str] = Field(default="")
@classmethod
def from_path(cls, data_path: Path, content_col="content", meta_col="metadata"):
if not data_path.exists():
raise FileNotFoundError(f"File {data_path} not found.")
data = read_data(data_path)
if isinstance(data, pd.DataFrame):
validate_cols(content_col, data)
return cls(data=data, content=str(data), content_col=content_col, meta_col=meta_col)
try:
content = data_path.read_text()
except Exception as e:
logger.debug(f"Load {str(data_path)} error: {e}")
content = ""
return cls(data=data, content=content, content_col=content_col, meta_col=meta_col)
def _get_docs_and_metadatas_by_df(self) -> (list, list):
df = self.data
docs = []
metadatas = []
for i in tqdm(range(len(df))):
docs.append(df[self.content_col].iloc[i])
if self.meta_col:
metadatas.append({self.meta_col: df[self.meta_col].iloc[i]})
else:
metadatas.append({})
return docs, metadatas
def _get_docs_and_metadatas_by_llamaindex(self) -> (list, list):
data = self.data
docs = [i.text for i in data]
metadatas = [i.metadata for i in data]
return docs, metadatas
def get_docs_and_metadatas(self) -> (list, list):
if isinstance(self.data, pd.DataFrame):
return self._get_docs_and_metadatas_by_df()
elif isinstance(self.data, list):
return self._get_docs_and_metadatas_by_llamaindex()
else:
raise NotImplementedError("Data type not supported for metadata extraction.")
class RepoMetadata(BaseModel):
name: str = Field(default="")
n_docs: int = Field(default=0)
n_chars: int = Field(default=0)
symbols: list = Field(default_factory=list)
class Repo(BaseModel):
# Name of this repo.
name: str = Field(default="")
# metadata: RepoMetadata = Field(default=RepoMetadata)
docs: dict[Path, Document] = Field(default_factory=dict)
codes: dict[Path, Document] = Field(default_factory=dict)
assets: dict[Path, Document] = Field(default_factory=dict)
path: Path = Field(default=None)
def _path(self, filename):
return self.path / filename
@classmethod
def from_path(cls, path: Path):
"""Load documents, code, and assets from a repository path."""
path.mkdir(parents=True, exist_ok=True)
repo = Repo(path=path, name=path.name)
for file_path in path.rglob("*"):
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if file_path.is_file() and file_path.suffix in [".json", ".txt", ".md", ".py", ".js", ".css", ".html"]:
repo._set(file_path.read_text(), file_path)
return repo
def to_path(self):
"""Persist all documents, code, and assets to the given repository path."""
for doc in self.docs.values():
doc.to_path()
for code in self.codes.values():
code.to_path()
for asset in self.assets.values():
asset.to_path()
def _set(self, content: str, path: Path):
"""Add a document to the appropriate category based on its file extension."""
suffix = path.suffix
doc = Document(content=content, path=path, name=str(path.relative_to(self.path)))
# FIXME: These judgments are difficult to support multiple programming languages and need to be more general
if suffix.lower() == ".md":
self.docs[path] = doc
elif suffix.lower() in [".py", ".js", ".css", ".html"]:
self.codes[path] = doc
else:
self.assets[path] = doc
return doc
def set(self, filename: str, content: str):
"""Set a document and persist it to disk."""
path = self._path(filename)
doc = self._set(content, path)
doc.to_path()
def get(self, filename: str) -> Optional[Document]:
"""Get a document by its filename."""
path = self._path(filename)
return self.docs.get(path) or self.codes.get(path) or self.assets.get(path)
def get_text_documents(self) -> list[Document]:
return list(self.docs.values()) + list(self.codes.values())
def eda(self) -> RepoMetadata:
n_docs = sum(len(i) for i in [self.docs, self.codes, self.assets])
n_chars = sum(sum(len(j.content) for j in i.values()) for i in [self.docs, self.codes, self.assets])
symbols = RepoParser(base_directory=self.path).generate_symbols()
return RepoMetadata(name=self.name, n_docs=n_docs, n_chars=n_chars, symbols=symbols)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/llm.py | metagpt/llm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:45
@Author : alexanderwu
@File : llm.py
"""
from typing import Optional
from metagpt.configs.llm_config import LLMConfig
from metagpt.context import Context
from metagpt.provider.base_llm import BaseLLM
def LLM(llm_config: Optional[LLMConfig] = None, context: Context = None) -> BaseLLM:
"""get the default llm provider if name is None"""
ctx = context or Context()
if llm_config is not None:
return ctx.llm_with_cost_manager_from_llm_config(llm_config)
return ctx.llm()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/schema.py | metagpt/schema.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/8 22:12
@Author : alexanderwu
@File : schema.py
@Modified By: mashenquan, 2023-10-31. According to Chapter 2.2.1 of RFC 116:
Replanned the distribution of responsibilities and functional positioning of `Message` class attributes.
@Modified By: mashenquan, 2023/11/22.
1. Add `Document` and `Documents` for `FileRepository` in Section 2.2.3.4 of RFC 135.
2. Encapsulate the common key-values set to pydantic structures to standardize and unify parameter passing
between actions.
3. Add `id` to `Message` according to Section 2.2.3.1.1 of RFC 135.
"""
from __future__ import annotations
import asyncio
import json
import os.path
import time
import uuid
from abc import ABC
from asyncio import Queue, QueueEmpty, wait_for
from enum import Enum
from json import JSONDecodeError
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar, Union
from pydantic import (
BaseModel,
ConfigDict,
Field,
PrivateAttr,
create_model,
field_serializer,
field_validator,
)
from metagpt.base.base_serialization import BaseSerialization
from metagpt.const import (
AGENT,
MESSAGE_ROUTE_CAUSE_BY,
MESSAGE_ROUTE_FROM,
MESSAGE_ROUTE_TO,
MESSAGE_ROUTE_TO_ALL,
SERDESER_PATH,
SYSTEM_DESIGN_FILE_REPO,
TASK_FILE_REPO,
)
from metagpt.logs import logger
from metagpt.repo_parser import DotClassInfo
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.common import (
CodeParser,
any_to_str,
any_to_str_set,
aread,
import_class,
read_json_file,
write_json_file,
)
from metagpt.utils.exceptions import handle_exception
from metagpt.utils.report import TaskReporter
from metagpt.utils.serialize import (
actionoutout_schema_to_mapping,
actionoutput_mapping_to_str,
actionoutput_str_to_mapping,
)
class SerializationMixin(BaseSerialization):
@handle_exception
def serialize(self, file_path: str = None) -> str:
"""Serializes the current instance to a JSON file.
If an exception occurs, `handle_exception` will catch it and return `None`.
Args:
file_path (str, optional): The path to the JSON file where the instance will be saved. Defaults to None.
Returns:
str: The path to the JSON file where the instance was saved.
"""
file_path = file_path or self.get_serialization_path()
serialized_data = self.model_dump()
write_json_file(file_path, serialized_data, use_fallback=True)
logger.debug(f"{self.__class__.__qualname__} serialization successful. File saved at: {file_path}")
return file_path
@classmethod
@handle_exception
def deserialize(cls, file_path: str = None) -> BaseModel:
"""Deserializes a JSON file to an instance of cls.
If an exception occurs, `handle_exception` will catch it and return `None`.
Args:
file_path (str, optional): The path to the JSON file to read from. Defaults to None.
Returns:
An instance of the cls.
"""
file_path = file_path or cls.get_serialization_path()
data: dict = read_json_file(file_path)
model = cls(**data)
logger.debug(f"{cls.__qualname__} deserialization successful. Instance created from file: {file_path}")
return model
@classmethod
def get_serialization_path(cls) -> str:
"""Get the serialization path for the class.
This method constructs a file path for serialization based on the class name.
The default path is constructed as './workspace/storage/ClassName.json', where 'ClassName'
is the name of the class.
Returns:
str: The path to the serialization file.
"""
return str(SERDESER_PATH / f"{cls.__qualname__}.json")
class SimpleMessage(BaseModel):
content: str
role: str
class Document(BaseModel):
"""
Represents a document.
"""
root_path: str = ""
filename: str = ""
content: str = ""
def get_meta(self) -> Document:
"""Get metadata of the document.
:return: A new Document instance with the same root path and filename.
"""
return Document(root_path=self.root_path, filename=self.filename)
@property
def root_relative_path(self):
"""Get relative path from root of git repository.
:return: relative path from root of git repository.
"""
return os.path.join(self.root_path, self.filename)
def __str__(self):
return self.content
def __repr__(self):
return self.content
@classmethod
async def load(
cls, filename: Union[str, Path], project_path: Optional[Union[str, Path]] = None
) -> Optional["Document"]:
"""
Load a document from a file.
Args:
filename (Union[str, Path]): The path to the file to load.
project_path (Optional[Union[str, Path]], optional): The path to the project. Defaults to None.
Returns:
Optional[Document]: The loaded document, or None if the file does not exist.
"""
if not filename or not Path(filename).exists():
return None
content = await aread(filename=filename)
doc = cls(content=content, filename=str(filename))
if project_path and Path(filename).is_relative_to(project_path):
doc.root_path = Path(filename).relative_to(project_path).parent
doc.filename = Path(filename).name
return doc
class Documents(BaseModel):
"""A class representing a collection of documents.
Attributes:
docs (Dict[str, Document]): A dictionary mapping document names to Document instances.
"""
docs: Dict[str, Document] = Field(default_factory=dict)
@classmethod
def from_iterable(cls, documents: Iterable[Document]) -> Documents:
"""Create a Documents instance from a list of Document instances.
:param documents: A list of Document instances.
:return: A Documents instance.
"""
docs = {doc.filename: doc for doc in documents}
return Documents(docs=docs)
def to_action_output(self) -> "ActionOutput":
"""Convert to action output string.
:return: A string representing action output.
"""
from metagpt.actions.action_output import ActionOutput
return ActionOutput(content=self.model_dump_json(), instruct_content=self)
class Resource(BaseModel):
"""Used by `Message`.`parse_resources`"""
resource_type: str # the type of resource
value: str # a string type of resource content
description: str # explanation
class Message(BaseModel):
"""list[<role>: <content>]"""
id: str = Field(default="", validate_default=True) # According to Section 2.2.3.1.1 of RFC 135
content: str # natural language for user or agent
instruct_content: Optional[BaseModel] = Field(default=None, validate_default=True)
role: str = "user" # system / user / assistant
cause_by: str = Field(default="", validate_default=True)
sent_from: str = Field(default="", validate_default=True)
send_to: set[str] = Field(default={MESSAGE_ROUTE_TO_ALL}, validate_default=True)
metadata: Dict[str, Any] = Field(default_factory=dict) # metadata for `content` and `instruct_content`
@field_validator("id", mode="before")
@classmethod
def check_id(cls, id: str) -> str:
return id if id else uuid.uuid4().hex
@field_validator("instruct_content", mode="before")
@classmethod
def check_instruct_content(cls, ic: Any) -> BaseModel:
if ic and isinstance(ic, dict) and "class" in ic:
if "mapping" in ic:
# compatible with custom-defined ActionOutput
mapping = actionoutput_str_to_mapping(ic["mapping"])
actionnode_class = import_class("ActionNode", "metagpt.actions.action_node") # avoid circular import
ic_obj = actionnode_class.create_model_class(class_name=ic["class"], mapping=mapping)
elif "module" in ic:
# subclasses of BaseModel
ic_obj = import_class(ic["class"], ic["module"])
else:
raise KeyError("missing required key to init Message.instruct_content from dict")
ic = ic_obj(**ic["value"])
return ic
@field_validator("cause_by", mode="before")
@classmethod
def check_cause_by(cls, cause_by: Any) -> str:
return any_to_str(cause_by if cause_by else import_class("UserRequirement", "metagpt.actions.add_requirement"))
@field_validator("sent_from", mode="before")
@classmethod
def check_sent_from(cls, sent_from: Any) -> str:
return any_to_str(sent_from if sent_from else "")
@field_validator("send_to", mode="before")
@classmethod
def check_send_to(cls, send_to: Any) -> set:
return any_to_str_set(send_to if send_to else {MESSAGE_ROUTE_TO_ALL})
@field_serializer("send_to", mode="plain")
def ser_send_to(self, send_to: set) -> list:
return list(send_to)
@field_serializer("instruct_content", mode="plain")
def ser_instruct_content(self, ic: BaseModel) -> Union[dict, None]:
ic_dict = None
if ic:
# compatible with custom-defined ActionOutput
schema = ic.model_json_schema()
ic_type = str(type(ic))
if "<class 'metagpt.actions.action_node" in ic_type:
# instruct_content from AutoNode.create_model_class, for now, it's single level structure.
mapping = actionoutout_schema_to_mapping(schema)
mapping = actionoutput_mapping_to_str(mapping)
ic_dict = {"class": schema["title"], "mapping": mapping, "value": ic.model_dump()}
else:
# due to instruct_content can be assigned by subclasses of BaseModel
ic_dict = {"class": schema["title"], "module": ic.__module__, "value": ic.model_dump()}
return ic_dict
def __init__(self, content: str = "", **data: Any):
data["content"] = data.get("content", content)
super().__init__(**data)
def __setattr__(self, key, val):
"""Override `@property.setter`, convert non-string parameters into string parameters."""
if key == MESSAGE_ROUTE_CAUSE_BY:
new_val = any_to_str(val)
elif key == MESSAGE_ROUTE_FROM:
new_val = any_to_str(val)
elif key == MESSAGE_ROUTE_TO:
new_val = any_to_str_set(val)
else:
new_val = val
super().__setattr__(key, new_val)
def __str__(self):
# prefix = '-'.join([self.role, str(self.cause_by)])
if self.instruct_content:
return f"{self.role}: {self.instruct_content.model_dump()}"
return f"{self.role}: {self.content}"
def __repr__(self):
return self.__str__()
def rag_key(self) -> str:
"""For search"""
return self.content
def to_dict(self) -> dict:
"""Return a dict containing `role` and `content` for the LLM call.l"""
return {"role": self.role, "content": self.content}
def dump(self) -> str:
"""Convert the object to json string"""
return self.model_dump_json(exclude_none=True, warnings=False)
@staticmethod
@handle_exception(exception_type=JSONDecodeError, default_return=None)
def load(val):
"""Convert the json string to object."""
try:
m = json.loads(val)
id = m.get("id")
if "id" in m:
del m["id"]
msg = Message(**m)
if id:
msg.id = id
return msg
except JSONDecodeError as err:
logger.error(f"parse json failed: {val}, error:{err}")
return None
async def parse_resources(self, llm: "BaseLLM", key_descriptions: Dict[str, str] = None) -> Dict:
"""
`parse_resources` corresponds to the in-context adaptation capability of the input of the atomic action,
which will be migrated to the context builder later.
Args:
llm (BaseLLM): The instance of the BaseLLM class.
key_descriptions (Dict[str, str], optional): A dictionary containing descriptions for each key,
if provided. Defaults to None.
Returns:
Dict: A dictionary containing parsed resources.
"""
if not self.content:
return {}
content = f"## Original Requirement\n```text\n{self.content}\n```\n"
return_format = (
"Return a markdown JSON object with:\n"
'- a "resources" key contain a list of objects. Each object with:\n'
' - a "resource_type" key explain the type of resource;\n'
' - a "value" key containing a string type of resource content;\n'
' - a "description" key explaining why;\n'
)
key_descriptions = key_descriptions or {}
for k, v in key_descriptions.items():
return_format += f'- a "{k}" key containing {v};\n'
return_format += '- a "reason" key explaining why;\n'
instructions = ['Lists all the resources contained in the "Original Requirement".', return_format]
rsp = await llm.aask(msg=content, system_msgs=instructions)
json_data = CodeParser.parse_code(text=rsp, lang="json")
m = json.loads(json_data)
m["resources"] = [Resource(**i) for i in m.get("resources", [])]
return m
def add_metadata(self, key: str, value: str):
self.metadata[key] = value
@staticmethod
def create_instruct_value(kvs: Dict[str, Any], class_name: str = "") -> BaseModel:
"""
Dynamically creates a Pydantic BaseModel subclass based on a given dictionary.
Parameters:
- data: A dictionary from which to create the BaseModel subclass.
Returns:
- A Pydantic BaseModel subclass instance populated with the given data.
"""
if not class_name:
class_name = "DM" + uuid.uuid4().hex[0:8]
dynamic_class = create_model(class_name, **{key: (value.__class__, ...) for key, value in kvs.items()})
return dynamic_class.model_validate(kvs)
def is_user_message(self) -> bool:
return self.role == "user"
def is_ai_message(self) -> bool:
return self.role == "assistant"
class UserMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
def __init__(self, content: str, **kwargs):
kwargs.pop("role", None)
super().__init__(content=content, role="user", **kwargs)
class SystemMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
def __init__(self, content: str, **kwargs):
kwargs.pop("role", None)
super().__init__(content=content, role="system", **kwargs)
class AIMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
def __init__(self, content: str, **kwargs):
kwargs.pop("role", None)
super().__init__(content=content, role="assistant", **kwargs)
def with_agent(self, name: str):
self.add_metadata(key=AGENT, value=name)
return self
@property
def agent(self) -> str:
return self.metadata.get(AGENT, "")
class Task(BaseModel):
task_id: str = ""
dependent_task_ids: list[str] = [] # Tasks prerequisite to this Task
instruction: str = ""
task_type: str = ""
code: str = ""
result: str = ""
is_success: bool = False
is_finished: bool = False
assignee: str = ""
def reset(self):
self.code = ""
self.result = ""
self.is_success = False
self.is_finished = False
def update_task_result(self, task_result: TaskResult):
self.code = self.code + "\n" + task_result.code
self.result = self.result + "\n" + task_result.result
self.is_success = task_result.is_success
class TaskResult(BaseModel):
"""Result of taking a task, with result and is_success required to be filled"""
code: str = ""
result: str
is_success: bool
@register_tool(
include_functions=[
"append_task",
"reset_task",
"replace_task",
"finish_current_task",
]
)
class Plan(BaseModel):
"""Plan is a sequence of tasks towards a goal."""
goal: str
context: str = ""
tasks: list[Task] = []
task_map: dict[str, Task] = {}
current_task_id: str = ""
def _topological_sort(self, tasks: list[Task]):
task_map = {task.task_id: task for task in tasks}
dependencies = {task.task_id: set(task.dependent_task_ids) for task in tasks}
sorted_tasks = []
visited = set()
def visit(task_id):
if task_id in visited:
return
visited.add(task_id)
for dependent_id in dependencies.get(task_id, []):
visit(dependent_id)
sorted_tasks.append(task_map[task_id])
for task in tasks:
visit(task.task_id)
return sorted_tasks
def add_tasks(self, tasks: list[Task]):
"""
Integrates new tasks into the existing plan, ensuring dependency order is maintained.
This method performs two primary functions based on the current state of the task list:
1. If there are no existing tasks, it topologically sorts the provided tasks to ensure
correct execution order based on dependencies, and sets these as the current tasks.
2. If there are existing tasks, it merges the new tasks with the existing ones. It maintains
any common prefix of tasks (based on task_id and instruction) and appends the remainder
of the new tasks. The current task is updated to the first unfinished task in this merged list.
Args:
tasks (list[Task]): A list of tasks (may be unordered) to add to the plan.
Returns:
None: The method updates the internal state of the plan but does not return anything.
"""
if not tasks:
return
# Topologically sort the new tasks to ensure correct dependency order
new_tasks = self._topological_sort(tasks)
if not self.tasks:
# If there are no existing tasks, set the new tasks as the current tasks
self.tasks = new_tasks
else:
# Find the length of the common prefix between existing and new tasks
prefix_length = 0
for old_task, new_task in zip(self.tasks, new_tasks):
if old_task.task_id != new_task.task_id or old_task.instruction != new_task.instruction:
break
prefix_length += 1
# Combine the common prefix with the remainder of the new tasks
final_tasks = self.tasks[:prefix_length] + new_tasks[prefix_length:]
self.tasks = final_tasks
# Update current_task_id to the first unfinished task in the merged list
self._update_current_task()
# Update the task map for quick access to tasks by ID
self.task_map = {task.task_id: task for task in self.tasks}
def reset_task(self, task_id: str):
"""
Reset a task based on task_id, i.e. set Task.is_finished=False and request redo. This also resets all tasks depending on it.
Args:
task_id (str): The ID of the task to be reset.
"""
if task_id in self.task_map:
task = self.task_map[task_id]
task.reset()
# reset all downstream tasks that are dependent on the reset task
for dep_task in self.tasks:
if task_id in dep_task.dependent_task_ids:
# FIXME: if LLM generates cyclic tasks, this will result in infinite recursion
self.reset_task(dep_task.task_id)
self._update_current_task()
def _replace_task(self, new_task: Task):
"""
Replace an existing task with the new input task based on task_id, and reset all tasks depending on it.
Args:
new_task (Task): The new task that will replace an existing one.
Returns:
None
"""
assert new_task.task_id in self.task_map
# Replace the task in the task map and the task list
self.task_map[new_task.task_id] = new_task
for i, task in enumerate(self.tasks):
if task.task_id == new_task.task_id:
self.tasks[i] = new_task
break
# Reset dependent tasks
for task in self.tasks:
if new_task.task_id in task.dependent_task_ids:
self.reset_task(task.task_id)
self._update_current_task()
def _append_task(self, new_task: Task):
"""
Append a new task to the end of existing task sequences
Args:
new_task (Task): The new task to be appended to the existing task sequence
Returns:
None
"""
# assert not self.has_task_id(new_task.task_id), "Task already in current plan, use replace_task instead"
if self.has_task_id(new_task.task_id):
logger.warning(
"Task already in current plan, should use replace_task instead. Overwriting the existing task."
)
assert all(
[self.has_task_id(dep_id) for dep_id in new_task.dependent_task_ids]
), "New task has unknown dependencies"
# Existing tasks do not depend on the new task, it's fine to put it to the end of the sorted task sequence
self.tasks.append(new_task)
self.task_map[new_task.task_id] = new_task
self._update_current_task()
def has_task_id(self, task_id: str) -> bool:
return task_id in self.task_map
def _update_current_task(self):
self.tasks = self._topological_sort(self.tasks)
# Update the task map for quick access to tasks by ID
self.task_map = {task.task_id: task for task in self.tasks}
current_task_id = ""
for task in self.tasks:
if not task.is_finished:
current_task_id = task.task_id
break
self.current_task_id = current_task_id
TaskReporter().report({"tasks": [i.model_dump() for i in self.tasks], "current_task_id": current_task_id})
@property
def current_task(self) -> Task:
"""Find current task to execute
Returns:
Task: the current task to be executed
"""
return self.task_map.get(self.current_task_id, None)
def finish_current_task(self):
"""Finish current task, set Task.is_finished=True, set current task to next task"""
if self.current_task_id:
self.current_task.is_finished = True
self._update_current_task() # set to next task
def finish_all_tasks(self):
"Finish all tasks."
while self.current_task:
self.finish_current_task()
def is_plan_finished(self) -> bool:
"""Check if all tasks are finished"""
return all(task.is_finished for task in self.tasks)
def get_finished_tasks(self) -> list[Task]:
"""return all finished tasks in correct linearized order
Returns:
list[Task]: list of finished tasks
"""
return [task for task in self.tasks if task.is_finished]
def append_task(
self, task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str, task_type: str = ""
):
"""
Append a new task with task_id (number) to the end of existing task sequences.
If dependent_task_ids is not empty, the task will depend on the tasks with the ids in the list.
Note that the assignee should be the 'name' of the role.
"""
new_task = Task(
task_id=task_id,
dependent_task_ids=dependent_task_ids,
instruction=instruction,
assignee=assignee,
task_type=task_type,
)
return self._append_task(new_task)
def replace_task(self, task_id: str, new_dependent_task_ids: list[str], new_instruction: str, new_assignee: str):
"""Replace an existing task (can be current task) based on task_id, and reset all tasks depending on it."""
new_task = Task(
task_id=task_id,
dependent_task_ids=new_dependent_task_ids,
instruction=new_instruction,
assignee=new_assignee,
)
return self._replace_task(new_task)
class MessageQueue(BaseModel):
"""Message queue which supports asynchronous updates."""
model_config = ConfigDict(arbitrary_types_allowed=True)
_queue: Queue = PrivateAttr(default_factory=Queue)
def pop(self) -> Message | None:
"""Pop one message from the queue."""
try:
item = self._queue.get_nowait()
if item:
self._queue.task_done()
return item
except QueueEmpty:
return None
def pop_all(self) -> List[Message]:
"""Pop all messages from the queue."""
ret = []
while True:
msg = self.pop()
if not msg:
break
ret.append(msg)
return ret
def push(self, msg: Message):
"""Push a message into the queue."""
self._queue.put_nowait(msg)
def empty(self):
"""Return true if the queue is empty."""
return self._queue.empty()
async def dump(self) -> str:
"""Convert the `MessageQueue` object to a json string."""
if self.empty():
return "[]"
lst = []
msgs = []
try:
while True:
item = await wait_for(self._queue.get(), timeout=1.0)
if item is None:
break
msgs.append(item)
lst.append(item.dump())
self._queue.task_done()
except asyncio.TimeoutError:
logger.debug("Queue is empty, exiting...")
finally:
for m in msgs:
self._queue.put_nowait(m)
return json.dumps(lst, ensure_ascii=False)
@staticmethod
def load(data) -> "MessageQueue":
"""Convert the json string to the `MessageQueue` object."""
queue = MessageQueue()
try:
lst = json.loads(data)
for i in lst:
msg = Message.load(i)
queue.push(msg)
except JSONDecodeError as e:
logger.warning(f"JSON load failed: {data}, error:{e}")
return queue
# 定义一个泛型类型变量
T = TypeVar("T", bound="BaseModel")
class BaseContext(BaseModel, ABC):
@classmethod
@handle_exception
def loads(cls: Type[T], val: str) -> Optional[T]:
i = json.loads(val)
return cls(**i)
class CodingContext(BaseContext):
filename: str
design_doc: Optional[Document] = None
task_doc: Optional[Document] = None
code_doc: Optional[Document] = None
code_plan_and_change_doc: Optional[Document] = None
class TestingContext(BaseContext):
filename: str
code_doc: Document
test_doc: Optional[Document] = None
class RunCodeContext(BaseContext):
mode: str = "script"
code: Optional[str] = None
code_filename: str = ""
test_code: Optional[str] = None
test_filename: str = ""
command: List[str] = Field(default_factory=list)
working_directory: str = ""
additional_python_paths: List[str] = Field(default_factory=list)
output_filename: Optional[str] = None
output: Optional[str] = None
class RunCodeResult(BaseContext):
summary: str
stdout: str
stderr: str
class CodeSummarizeContext(BaseModel):
design_filename: str = ""
task_filename: str = ""
codes_filenames: List[str] = Field(default_factory=list)
reason: str = ""
@staticmethod
def loads(filenames: List) -> CodeSummarizeContext:
ctx = CodeSummarizeContext()
for filename in filenames:
if Path(filename).is_relative_to(SYSTEM_DESIGN_FILE_REPO):
ctx.design_filename = str(filename)
continue
if Path(filename).is_relative_to(TASK_FILE_REPO):
ctx.task_filename = str(filename)
continue
return ctx
def __hash__(self):
return hash((self.design_filename, self.task_filename))
class CodePlanAndChangeContext(BaseModel):
requirement: str = ""
issue: str = ""
prd_filename: str = ""
design_filename: str = ""
task_filename: str = ""
# mermaid class view
class UMLClassMeta(BaseModel):
name: str = ""
visibility: str = ""
@staticmethod
def name_to_visibility(name: str) -> str:
if name == "__init__":
return "+"
if name.startswith("__"):
return "-"
elif name.startswith("_"):
return "#"
return "+"
class UMLClassAttribute(UMLClassMeta):
value_type: str = ""
default_value: str = ""
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + self.visibility
if self.value_type:
content += self.value_type.replace(" ", "") + " "
name = self.name.split(":", 1)[1] if ":" in self.name else self.name
content += name
if self.default_value:
content += "="
if self.value_type not in ["str", "string", "String"]:
content += self.default_value
else:
content += '"' + self.default_value.replace('"', "") + '"'
# if self.abstraction:
# content += "*"
# if self.static:
# content += "$"
return content
class UMLClassMethod(UMLClassMeta):
args: List[UMLClassAttribute] = Field(default_factory=list)
return_type: str = ""
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + self.visibility
name = self.name.split(":", 1)[1] if ":" in self.name else self.name
content += name + "(" + ",".join([v.get_mermaid(align=0) for v in self.args]) + ")"
if self.return_type:
content += " " + self.return_type.replace(" ", "")
# if self.abstraction:
# content += "*"
# if self.static:
# content += "$"
return content
class UMLClassView(UMLClassMeta):
attributes: List[UMLClassAttribute] = Field(default_factory=list)
methods: List[UMLClassMethod] = Field(default_factory=list)
def get_mermaid(self, align=1) -> str:
content = "".join(["\t" for i in range(align)]) + "class " + self.name + "{\n"
for v in self.attributes:
content += v.get_mermaid(align=align + 1) + "\n"
for v in self.methods:
content += v.get_mermaid(align=align + 1) + "\n"
content += "".join(["\t" for i in range(align)]) + "}\n"
return content
@classmethod
def load_dot_class_info(cls, dot_class_info: DotClassInfo) -> UMLClassView:
visibility = UMLClassView.name_to_visibility(dot_class_info.name)
class_view = cls(name=dot_class_info.name, visibility=visibility)
for i in dot_class_info.attributes.values():
visibility = UMLClassAttribute.name_to_visibility(i.name)
attr = UMLClassAttribute(name=i.name, visibility=visibility, value_type=i.type_, default_value=i.default_)
class_view.attributes.append(attr)
for i in dot_class_info.methods.values():
visibility = UMLClassMethod.name_to_visibility(i.name)
method = UMLClassMethod(name=i.name, visibility=visibility, return_type=i.return_args.type_)
for j in i.args:
arg = UMLClassAttribute(name=j.name, value_type=j.type_, default_value=j.default_)
method.args.append(arg)
method.return_type = i.return_args.type_
class_view.methods.append(method)
return class_view
class BaseEnum(Enum):
"""Base class for enums."""
def __new__(cls, value, desc=None):
"""
Construct an instance of the enum member.
Args:
cls: The class.
value: The value of the enum member.
desc: The description of the enum member. Defaults to None.
"""
if issubclass(cls, str):
obj = str.__new__(cls, value)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | true |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/_compat.py | metagpt/_compat.py | import platform
import sys
import warnings
if sys.implementation.name == "cpython" and platform.system() == "Windows":
import asyncio
if sys.version_info[:2] == (3, 9):
from asyncio.proactor_events import _ProactorBasePipeTransport
# https://github.com/python/cpython/pull/92842
def pacth_del(self, _warn=warnings.warn):
if self._sock is not None:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self._sock.close()
_ProactorBasePipeTransport.__del__ = pacth_del
if sys.version_info >= (3, 9, 0):
from semantic_kernel.orchestration import sk_function as _ # noqa: F401
# caused by https://github.com/microsoft/semantic-kernel/pull/1416
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/repo_parser.py | metagpt/repo_parser.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build a symbols repository from source code.
This script is designed to create a symbols repository from the provided source code.
@Time : 2023/11/17 17:58
@Author : alexanderwu
@File : repo_parser.py
"""
from __future__ import annotations
import ast
import json
import re
import subprocess
from pathlib import Path
from typing import Dict, List, Optional
import pandas as pd
from pydantic import BaseModel, Field, field_validator
from metagpt.const import AGGREGATION, COMPOSITION, GENERALIZATION
from metagpt.logs import logger
from metagpt.utils.common import any_to_str, aread, remove_white_spaces
from metagpt.utils.exceptions import handle_exception
class RepoFileInfo(BaseModel):
"""
Repository data element that represents information about a file.
Attributes:
file (str): The name or path of the file.
classes (List): A list of class names present in the file.
functions (List): A list of function names present in the file.
globals (List): A list of global variable names present in the file.
page_info (List): A list of page-related information associated with the file.
"""
file: str
classes: List = Field(default_factory=list)
functions: List = Field(default_factory=list)
globals: List = Field(default_factory=list)
page_info: List = Field(default_factory=list)
class CodeBlockInfo(BaseModel):
"""
Repository data element representing information about a code block.
Attributes:
lineno (int): The starting line number of the code block.
end_lineno (int): The ending line number of the code block.
type_name (str): The type or category of the code block.
tokens (List): A list of tokens present in the code block.
properties (Dict): A dictionary containing additional properties associated with the code block.
"""
lineno: int
end_lineno: int
type_name: str
tokens: List = Field(default_factory=list)
properties: Dict = Field(default_factory=dict)
class DotClassAttribute(BaseModel):
"""
Repository data element representing a class attribute in dot format.
Attributes:
name (str): The name of the class attribute.
type_ (str): The type of the class attribute.
default_ (str): The default value of the class attribute.
description (str): A description of the class attribute.
compositions (List[str]): A list of compositions associated with the class attribute.
"""
name: str = ""
type_: str = ""
default_: str = ""
description: str
compositions: List[str] = Field(default_factory=list)
@classmethod
def parse(cls, v: str) -> "DotClassAttribute":
"""
Parses dot format text and returns a DotClassAttribute object.
Args:
v (str): Dot format text to be parsed.
Returns:
DotClassAttribute: An instance of the DotClassAttribute class representing the parsed data.
"""
val = ""
meet_colon = False
meet_equals = False
for c in v:
if c == ":":
meet_colon = True
elif c == "=":
meet_equals = True
if not meet_colon:
val += ":"
meet_colon = True
val += c
if not meet_colon:
val += ":"
if not meet_equals:
val += "="
cix = val.find(":")
eix = val.rfind("=")
name = val[0:cix].strip()
type_ = val[cix + 1 : eix]
default_ = val[eix + 1 :].strip()
type_ = remove_white_spaces(type_) # remove white space
if type_ == "NoneType":
type_ = ""
if "Literal[" in type_:
pre_l, literal, post_l = cls._split_literal(type_)
composition_val = pre_l + "Literal" + post_l # replace Literal[...] with Literal
type_ = pre_l + literal + post_l
else:
type_ = re.sub(r"['\"]+", "", type_) # remove '"
composition_val = type_
if default_ == "None":
default_ = ""
compositions = cls.parse_compositions(composition_val)
return cls(name=name, type_=type_, default_=default_, description=v, compositions=compositions)
@staticmethod
def parse_compositions(types_part) -> List[str]:
"""
Parses the type definition code block of source code and returns a list of compositions.
Args:
types_part: The type definition code block to be parsed.
Returns:
List[str]: A list of compositions extracted from the type definition code block.
"""
if not types_part:
return []
modified_string = re.sub(r"[\[\],\(\)]", "|", types_part)
types = modified_string.split("|")
filters = {
"str",
"frozenset",
"set",
"int",
"float",
"complex",
"bool",
"dict",
"list",
"Union",
"Dict",
"Set",
"Tuple",
"NoneType",
"None",
"Any",
"Optional",
"Iterator",
"Literal",
"List",
}
result = set()
for t in types:
t = re.sub(r"['\"]+", "", t.strip())
if t and t not in filters:
result.add(t)
return list(result)
@staticmethod
def _split_literal(v):
"""
Parses the literal definition code block and returns three parts: pre-part, literal-part, and post-part.
Args:
v: The literal definition code block to be parsed.
Returns:
Tuple[str, str, str]: A tuple containing the pre-part, literal-part, and post-part of the code block.
"""
tag = "Literal["
bix = v.find(tag)
eix = len(v) - 1
counter = 1
for i in range(bix + len(tag), len(v) - 1):
c = v[i]
if c == "[":
counter += 1
continue
if c == "]":
counter -= 1
if counter > 0:
continue
eix = i
break
pre_l = v[0:bix]
post_l = v[eix + 1 :]
pre_l = re.sub(r"['\"]", "", pre_l) # remove '"
pos_l = re.sub(r"['\"]", "", post_l) # remove '"
return pre_l, v[bix : eix + 1], pos_l
@field_validator("compositions", mode="after")
@classmethod
def sort(cls, lst: List) -> List:
"""
Auto-sorts a list attribute after making changes.
Args:
lst (List): The list attribute to be sorted.
Returns:
List: The sorted list.
"""
lst.sort()
return lst
class DotClassInfo(BaseModel):
"""
Repository data element representing information about a class in dot format.
Attributes:
name (str): The name of the class.
package (Optional[str]): The package to which the class belongs (optional).
attributes (Dict[str, DotClassAttribute]): A dictionary of attributes associated with the class.
methods (Dict[str, DotClassMethod]): A dictionary of methods associated with the class.
compositions (List[str]): A list of compositions associated with the class.
aggregations (List[str]): A list of aggregations associated with the class.
"""
name: str
package: Optional[str] = None
attributes: Dict[str, DotClassAttribute] = Field(default_factory=dict)
methods: Dict[str, DotClassMethod] = Field(default_factory=dict)
compositions: List[str] = Field(default_factory=list)
aggregations: List[str] = Field(default_factory=list)
@field_validator("compositions", "aggregations", mode="after")
@classmethod
def sort(cls, lst: List) -> List:
"""
Auto-sorts a list attribute after making changes.
Args:
lst (List): The list attribute to be sorted.
Returns:
List: The sorted list.
"""
lst.sort()
return lst
class DotClassRelationship(BaseModel):
"""
Repository data element representing a relationship between two classes in dot format.
Attributes:
src (str): The source class of the relationship.
dest (str): The destination class of the relationship.
relationship (str): The type or nature of the relationship.
label (Optional[str]): An optional label associated with the relationship.
"""
src: str = ""
dest: str = ""
relationship: str = ""
label: Optional[str] = None
class DotReturn(BaseModel):
"""
Repository data element representing a function or method return type in dot format.
Attributes:
type_ (str): The type of the return.
description (str): A description of the return type.
compositions (List[str]): A list of compositions associated with the return type.
"""
type_: str = ""
description: str
compositions: List[str] = Field(default_factory=list)
@classmethod
def parse(cls, v: str) -> "DotReturn" | None:
"""
Parses the return type part of dot format text and returns a DotReturn object.
Args:
v (str): The dot format text containing the return type part to be parsed.
Returns:
DotReturn | None: An instance of the DotReturn class representing the parsed return type,
or None if parsing fails.
"""
if not v:
return DotReturn(description=v)
type_ = remove_white_spaces(v)
compositions = DotClassAttribute.parse_compositions(type_)
return cls(type_=type_, description=v, compositions=compositions)
@field_validator("compositions", mode="after")
@classmethod
def sort(cls, lst: List) -> List:
"""
Auto-sorts a list attribute after making changes.
Args:
lst (List): The list attribute to be sorted.
Returns:
List: The sorted list.
"""
lst.sort()
return lst
class DotClassMethod(BaseModel):
name: str
args: List[DotClassAttribute] = Field(default_factory=list)
return_args: Optional[DotReturn] = None
description: str
aggregations: List[str] = Field(default_factory=list)
@classmethod
def parse(cls, v: str) -> "DotClassMethod":
"""
Parses a dot format method text and returns a DotClassMethod object.
Args:
v (str): The dot format text containing method information to be parsed.
Returns:
DotClassMethod: An instance of the DotClassMethod class representing the parsed method.
"""
bix = v.find("(")
eix = v.rfind(")")
rix = v.rfind(":")
if rix < 0 or rix < eix:
rix = eix
name_part = v[0:bix].strip()
args_part = v[bix + 1 : eix].strip()
return_args_part = v[rix + 1 :].strip()
name = cls._parse_name(name_part)
args = cls._parse_args(args_part)
return_args = DotReturn.parse(return_args_part)
aggregations = set()
for i in args:
aggregations.update(set(i.compositions))
aggregations.update(set(return_args.compositions))
return cls(name=name, args=args, description=v, return_args=return_args, aggregations=list(aggregations))
@staticmethod
def _parse_name(v: str) -> str:
"""
Parses the dot format method name part and returns the method name.
Args:
v (str): The dot format text containing the method name part to be parsed.
Returns:
str: The parsed method name.
"""
tags = [">", "</"]
if tags[0] in v:
bix = v.find(tags[0]) + len(tags[0])
eix = v.rfind(tags[1])
return v[bix:eix].strip()
return v.strip()
@staticmethod
def _parse_args(v: str) -> List[DotClassAttribute]:
"""
Parses the dot format method arguments part and returns the parsed arguments.
Args:
v (str): The dot format text containing the arguments part to be parsed.
Returns:
str: The parsed method arguments.
"""
if not v:
return []
parts = []
bix = 0
counter = 0
for i in range(0, len(v)):
c = v[i]
if c == "[":
counter += 1
continue
elif c == "]":
counter -= 1
continue
elif c == "," and counter == 0:
parts.append(v[bix:i].strip())
bix = i + 1
parts.append(v[bix:].strip())
attrs = []
for p in parts:
if p:
attr = DotClassAttribute.parse(p)
attrs.append(attr)
return attrs
class RepoParser(BaseModel):
"""
Tool to build a symbols repository from a project directory.
Attributes:
base_directory (Path): The base directory of the project.
"""
base_directory: Path = Field(default=None)
@classmethod
@handle_exception(exception_type=Exception, default_return=[])
def _parse_file(cls, file_path: Path) -> list:
"""
Parses a Python file in the repository.
Args:
file_path (Path): The path to the Python file to be parsed.
Returns:
list: A list containing the parsed symbols from the file.
"""
return ast.parse(file_path.read_text()).body
def extract_class_and_function_info(self, tree, file_path) -> RepoFileInfo:
"""
Extracts class, function, and global variable information from the Abstract Syntax Tree (AST).
Args:
tree: The Abstract Syntax Tree (AST) of the Python file.
file_path: The path to the Python file.
Returns:
RepoFileInfo: A RepoFileInfo object containing the extracted information.
"""
file_info = RepoFileInfo(file=str(file_path.relative_to(self.base_directory)))
for node in tree:
info = RepoParser.node_to_str(node)
if info:
file_info.page_info.append(info)
if isinstance(node, ast.ClassDef):
class_methods = [m.name for m in node.body if is_func(m)]
file_info.classes.append({"name": node.name, "methods": class_methods})
elif is_func(node):
file_info.functions.append(node.name)
elif isinstance(node, (ast.Assign, ast.AnnAssign)):
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
if isinstance(target, ast.Name):
file_info.globals.append(target.id)
return file_info
def generate_symbols(self) -> List[RepoFileInfo]:
"""
Builds a symbol repository from '.py' and '.js' files in the project directory.
Returns:
List[RepoFileInfo]: A list of RepoFileInfo objects containing the extracted information.
"""
files_classes = []
directory = self.base_directory
matching_files = []
extensions = ["*.py"]
for ext in extensions:
matching_files += directory.rglob(ext)
for path in matching_files:
tree = self._parse_file(path)
file_info = self.extract_class_and_function_info(tree, path)
files_classes.append(file_info)
return files_classes
def generate_json_structure(self, output_path: Path):
"""
Generates a JSON file documenting the repository structure.
Args:
output_path (Path): The path to the JSON file to be generated.
"""
files_classes = [i.model_dump() for i in self.generate_symbols()]
output_path.write_text(json.dumps(files_classes, indent=4))
def generate_dataframe_structure(self, output_path: Path):
"""
Generates a DataFrame documenting the repository structure and saves it as a CSV file.
Args:
output_path (Path): The path to the CSV file to be generated.
"""
files_classes = [i.model_dump() for i in self.generate_symbols()]
df = pd.DataFrame(files_classes)
df.to_csv(output_path, index=False)
def generate_structure(self, output_path: str | Path = None, mode="json") -> Path:
"""
Generates the structure of the repository in a specified format.
Args:
output_path (str | Path): The path to the output file or directory. Default is None.
mode (str): The output format mode. Options: "json" (default), "csv", etc.
Returns:
Path: The path to the generated output file or directory.
"""
output_file = self.base_directory / f"{self.base_directory.name}-structure.{mode}"
output_path = Path(output_path) if output_path else output_file
if mode == "json":
self.generate_json_structure(output_path)
elif mode == "csv":
self.generate_dataframe_structure(output_path)
return output_path
@staticmethod
def node_to_str(node) -> CodeBlockInfo | None:
"""
Parses and converts an Abstract Syntax Tree (AST) node to a CodeBlockInfo object.
Args:
node: The AST node to be converted.
Returns:
CodeBlockInfo | None: A CodeBlockInfo object representing the parsed AST node,
or None if the conversion fails.
"""
if isinstance(node, ast.Try):
return None
if any_to_str(node) == any_to_str(ast.Expr):
return CodeBlockInfo(
lineno=node.lineno,
end_lineno=node.end_lineno,
type_name=any_to_str(node),
tokens=RepoParser._parse_expr(node),
)
mappings = {
any_to_str(ast.Import): lambda x: [RepoParser._parse_name(n) for n in x.names],
any_to_str(ast.Assign): RepoParser._parse_assign,
any_to_str(ast.ClassDef): lambda x: x.name,
any_to_str(ast.FunctionDef): lambda x: x.name,
any_to_str(ast.ImportFrom): lambda x: {
"module": x.module,
"names": [RepoParser._parse_name(n) for n in x.names],
},
any_to_str(ast.If): RepoParser._parse_if,
any_to_str(ast.AsyncFunctionDef): lambda x: x.name,
any_to_str(ast.AnnAssign): lambda x: RepoParser._parse_variable(x.target),
}
func = mappings.get(any_to_str(node))
if func:
code_block = CodeBlockInfo(lineno=node.lineno, end_lineno=node.end_lineno, type_name=any_to_str(node))
val = func(node)
if isinstance(val, dict):
code_block.properties = val
elif isinstance(val, list):
code_block.tokens = val
elif isinstance(val, str):
code_block.tokens = [val]
else:
raise NotImplementedError(f"Not implement:{val}")
return code_block
logger.warning(f"Unsupported code block:{node.lineno}, {node.end_lineno}, {any_to_str(node)}")
return None
@staticmethod
def _parse_expr(node) -> List:
"""
Parses an expression Abstract Syntax Tree (AST) node.
Args:
node: The AST node representing an expression.
Returns:
List: A list containing the parsed information from the expression node.
"""
funcs = {
any_to_str(ast.Constant): lambda x: [any_to_str(x.value), RepoParser._parse_variable(x.value)],
any_to_str(ast.Call): lambda x: [any_to_str(x.value), RepoParser._parse_variable(x.value.func)],
any_to_str(ast.Tuple): lambda x: [any_to_str(x.value), RepoParser._parse_variable(x.value)],
}
func = funcs.get(any_to_str(node.value))
if func:
return func(node)
raise NotImplementedError(f"Not implement: {node.value}")
@staticmethod
def _parse_name(n):
"""
Gets the 'name' value of an Abstract Syntax Tree (AST) node.
Args:
n: The AST node.
Returns:
The 'name' value of the AST node.
"""
if n.asname:
return f"{n.name} as {n.asname}"
return n.name
@staticmethod
def _parse_if(n):
"""
Parses an 'if' statement Abstract Syntax Tree (AST) node.
Args:
n: The AST node representing an 'if' statement.
Returns:
None or Parsed information from the 'if' statement node.
"""
tokens = []
try:
if isinstance(n.test, ast.BoolOp):
tokens = []
for v in n.test.values:
tokens.extend(RepoParser._parse_if_compare(v))
return tokens
if isinstance(n.test, ast.Compare):
v = RepoParser._parse_variable(n.test.left)
if v:
tokens.append(v)
if isinstance(n.test, ast.Name):
v = RepoParser._parse_variable(n.test)
tokens.append(v)
if hasattr(n.test, "comparators"):
for item in n.test.comparators:
v = RepoParser._parse_variable(item)
if v:
tokens.append(v)
return tokens
except Exception as e:
logger.warning(f"Unsupported if: {n}, err:{e}")
return tokens
@staticmethod
def _parse_if_compare(n):
"""
Parses an 'if' condition Abstract Syntax Tree (AST) node.
Args:
n: The AST node representing an 'if' condition.
Returns:
None or Parsed information from the 'if' condition node.
"""
if hasattr(n, "left"):
return RepoParser._parse_variable(n.left)
else:
return []
@staticmethod
def _parse_variable(node):
"""
Parses a variable Abstract Syntax Tree (AST) node.
Args:
node: The AST node representing a variable.
Returns:
None or Parsed information from the variable node.
"""
try:
funcs = {
any_to_str(ast.Constant): lambda x: x.value,
any_to_str(ast.Name): lambda x: x.id,
any_to_str(ast.Attribute): lambda x: f"{x.value.id}.{x.attr}"
if hasattr(x.value, "id")
else f"{x.attr}",
any_to_str(ast.Call): lambda x: RepoParser._parse_variable(x.func),
any_to_str(ast.Tuple): lambda x: [d.value for d in x.dims],
}
func = funcs.get(any_to_str(node))
if not func:
raise NotImplementedError(f"Not implement:{node}")
return func(node)
except Exception as e:
logger.warning(f"Unsupported variable:{node}, err:{e}")
@staticmethod
def _parse_assign(node):
"""
Parses an assignment Abstract Syntax Tree (AST) node.
Args:
node: The AST node representing an assignment.
Returns:
None or Parsed information from the assignment node.
"""
return [RepoParser._parse_variable(t) for t in node.targets]
async def rebuild_class_views(self, path: str | Path = None):
"""
Executes `pylint` to reconstruct the dot format class view repository file.
Args:
path (str | Path): The path to the target directory or file. Default is None.
"""
if not path:
path = self.base_directory
path = Path(path)
if not path.exists():
return
init_file = path / "__init__.py"
if not init_file.exists():
raise ValueError("Failed to import module __init__ with error:No module named __init__.")
command = f"pyreverse {str(path)} -o dot"
output_dir = path / "__dot__"
output_dir.mkdir(parents=True, exist_ok=True)
result = subprocess.run(command, shell=True, check=True, cwd=str(output_dir))
if result.returncode != 0:
raise ValueError(f"{result}")
class_view_pathname = output_dir / "classes.dot"
class_views = await self._parse_classes(class_view_pathname)
relationship_views = await self._parse_class_relationships(class_view_pathname)
packages_pathname = output_dir / "packages.dot"
class_views, relationship_views, package_root = RepoParser._repair_namespaces(
class_views=class_views, relationship_views=relationship_views, path=path
)
class_view_pathname.unlink(missing_ok=True)
packages_pathname.unlink(missing_ok=True)
return class_views, relationship_views, package_root
@staticmethod
async def _parse_classes(class_view_pathname: Path) -> List[DotClassInfo]:
"""
Parses a dot format class view repository file.
Args:
class_view_pathname (Path): The path to the dot format class view repository file.
Returns:
List[DotClassInfo]: A list of DotClassInfo objects representing the parsed classes.
"""
class_views = []
if not class_view_pathname.exists():
return class_views
data = await aread(filename=class_view_pathname, encoding="utf-8")
lines = data.split("\n")
for line in lines:
package_name, info = RepoParser._split_class_line(line)
if not package_name:
continue
class_name, members, functions = re.split(r"(?<!\\)\|", info)
class_info = DotClassInfo(name=class_name)
class_info.package = package_name
for m in members.split("\n"):
if not m:
continue
attr = DotClassAttribute.parse(m)
class_info.attributes[attr.name] = attr
for i in attr.compositions:
if i not in class_info.compositions:
class_info.compositions.append(i)
for f in functions.split("\n"):
if not f:
continue
method = DotClassMethod.parse(f)
class_info.methods[method.name] = method
for i in method.aggregations:
if i not in class_info.compositions and i not in class_info.aggregations:
class_info.aggregations.append(i)
class_views.append(class_info)
return class_views
@staticmethod
async def _parse_class_relationships(class_view_pathname: Path) -> List[DotClassRelationship]:
"""
Parses a dot format class view repository file.
Args:
class_view_pathname (Path): The path to the dot format class view repository file.
Returns:
List[DotClassRelationship]: A list of DotClassRelationship objects representing the parsed class relationships.
"""
relationship_views = []
if not class_view_pathname.exists():
return relationship_views
data = await aread(filename=class_view_pathname, encoding="utf-8")
lines = data.split("\n")
for line in lines:
relationship = RepoParser._split_relationship_line(line)
if not relationship:
continue
relationship_views.append(relationship)
return relationship_views
@staticmethod
def _split_class_line(line: str) -> (str, str):
"""
Parses a dot format line about class info and returns the class name part and class members part.
Args:
line (str): The dot format line containing class information.
Returns:
Tuple[str, str]: A tuple containing the class name part and class members part.
"""
part_splitor = '" ['
if part_splitor not in line:
return None, None
ix = line.find(part_splitor)
class_name = line[0:ix].replace('"', "")
left = line[ix:]
begin_flag = "label=<{"
end_flag = "}>"
if begin_flag not in left or end_flag not in left:
return None, None
bix = left.find(begin_flag)
eix = left.rfind(end_flag)
info = left[bix + len(begin_flag) : eix]
info = re.sub(r"<br[^>]*>", "\n", info)
return class_name, info
@staticmethod
def _split_relationship_line(line: str) -> DotClassRelationship:
"""
Parses a dot format line about the relationship of two classes and returns 'Generalize', 'Composite',
or 'Aggregate'.
Args:
line (str): The dot format line containing relationship information.
Returns:
DotClassRelationship: The object of relationship representing either 'Generalize', 'Composite',
or 'Aggregate' relationship.
"""
splitters = [" -> ", " [", "];"]
idxs = []
for tag in splitters:
if tag not in line:
return None
idxs.append(line.find(tag))
ret = DotClassRelationship()
ret.src = line[0 : idxs[0]].strip('"')
ret.dest = line[idxs[0] + len(splitters[0]) : idxs[1]].strip('"')
properties = line[idxs[1] + len(splitters[1]) : idxs[2]].strip(" ")
mappings = {
'arrowhead="empty"': GENERALIZATION,
'arrowhead="diamond"': COMPOSITION,
'arrowhead="odiamond"': AGGREGATION,
}
for k, v in mappings.items():
if k in properties:
ret.relationship = v
if v != GENERALIZATION:
ret.label = RepoParser._get_label(properties)
break
return ret
@staticmethod
def _get_label(line: str) -> str:
"""
Parses a dot format line and returns the label information.
Args:
line (str): The dot format line containing label information.
Returns:
str: The label information parsed from the line.
"""
tag = 'label="'
if tag not in line:
return ""
ix = line.find(tag)
eix = line.find('"', ix + len(tag))
return line[ix + len(tag) : eix]
@staticmethod
def _create_path_mapping(path: str | Path) -> Dict[str, str]:
"""
Creates a mapping table between source code files' paths and module names.
Args:
path (str | Path): The path to the source code files or directory.
Returns:
Dict[str, str]: A dictionary mapping source code file paths to their corresponding module names.
"""
mappings = {
str(path).replace("/", "."): str(path),
}
files = []
try:
directory_path = Path(path)
if not directory_path.exists():
return mappings
for file_path in directory_path.iterdir():
if file_path.is_file():
files.append(str(file_path))
else:
subfolder_files = RepoParser._create_path_mapping(path=file_path)
mappings.update(subfolder_files)
except Exception as e:
logger.error(f"Error: {e}")
for f in files:
mappings[str(Path(f).with_suffix("")).replace("/", ".")] = str(f)
return mappings
@staticmethod
def _repair_namespaces(
class_views: List[DotClassInfo], relationship_views: List[DotClassRelationship], path: str | Path
) -> (List[DotClassInfo], List[DotClassRelationship], str):
"""
Augments namespaces to the path-prefixed classes and relationships.
Args:
class_views (List[DotClassInfo]): List of DotClassInfo objects representing class views.
relationship_views (List[DotClassRelationship]): List of DotClassRelationship objects representing
relationships.
path (str | Path): The path to the source code files or directory.
Returns:
Tuple[List[DotClassInfo], List[DotClassRelationship], str]: A tuple containing the augmented class views,
relationships, and the root path of the package.
"""
if not class_views:
return [], [], ""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | true |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/context_mixin.py | metagpt/context_mixin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/11 17:25
@Author : alexanderwu
@File : context_mixin.py
"""
from typing import Optional
from pydantic import BaseModel, ConfigDict, Field, model_validator
from metagpt.config2 import Config
from metagpt.context import Context
from metagpt.provider.base_llm import BaseLLM
class ContextMixin(BaseModel):
"""Mixin class for context and config"""
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
# Pydantic has bug on _private_attr when using inheritance, so we use private_* instead
# - https://github.com/pydantic/pydantic/issues/7142
# - https://github.com/pydantic/pydantic/issues/7083
# - https://github.com/pydantic/pydantic/issues/7091
# Env/Role/Action will use this context as private context, or use self.context as public context
private_context: Optional[Context] = Field(default=None, exclude=True)
# Env/Role/Action will use this config as private config, or use self.context.config as public config
private_config: Optional[Config] = Field(default=None, exclude=True)
# Env/Role/Action will use this llm as private llm, or use self.context._llm instance
private_llm: Optional[BaseLLM] = Field(default=None, exclude=True)
@model_validator(mode="after")
def validate_context_mixin_extra(self):
self._process_context_mixin_extra()
return self
def _process_context_mixin_extra(self):
"""Process the extra field"""
kwargs = self.model_extra or {}
self.set_context(kwargs.pop("context", None))
self.set_config(kwargs.pop("config", None))
self.set_llm(kwargs.pop("llm", None))
def set(self, k, v, override=False):
"""Set attribute"""
if override or not self.__dict__.get(k):
self.__dict__[k] = v
def set_context(self, context: Context, override=True):
"""Set context"""
self.set("private_context", context, override)
def set_config(self, config: Config, override=False):
"""Set config"""
self.set("private_config", config, override)
if config is not None:
_ = self.llm # init llm
def set_llm(self, llm: BaseLLM, override=False):
"""Set llm"""
self.set("private_llm", llm, override)
@property
def config(self) -> Config:
"""Role config: role config > context config"""
if self.private_config:
return self.private_config
return self.context.config
@config.setter
def config(self, config: Config) -> None:
"""Set config"""
self.set_config(config)
@property
def context(self) -> Context:
"""Role context: role context > context"""
if self.private_context:
return self.private_context
return Context()
@context.setter
def context(self, context: Context) -> None:
"""Set context"""
self.set_context(context)
@property
def llm(self) -> BaseLLM:
"""Role llm: if not existed, init from role.config"""
# print(f"class:{self.__class__.__name__}({self.name}), llm: {self._llm}, llm_config: {self._llm_config}")
if not self.private_llm:
self.private_llm = self.context.llm_with_cost_manager_from_llm_config(self.config.llm)
return self.private_llm
@llm.setter
def llm(self, llm: BaseLLM) -> None:
"""Set llm"""
self.private_llm = llm
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/const.py | metagpt/const.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from loguru import logger
import metagpt
def get_metagpt_package_root():
"""Get the root directory of the installed package."""
package_root = Path(metagpt.__file__).parent.parent
logger.info(f"Package root set to {str(package_root)}")
return package_root
def get_metagpt_root():
"""Get the project root directory."""
# Check if a project root is specified in the environment variable
project_root_env = os.getenv("METAGPT_PROJECT_ROOT")
if project_root_env:
project_root = Path(project_root_env)
logger.info(f"PROJECT_ROOT set from environment variable to {str(project_root)}")
else:
# Fallback to package root if no environment variable is set
project_root = get_metagpt_package_root()
for i in (".git", ".project_root", ".gitignore"):
if (project_root / i).exists():
break
else:
project_root = Path.cwd()
return project_root
# METAGPT PROJECT ROOT AND VARS
CONFIG_ROOT = Path.home() / ".metagpt"
METAGPT_ROOT = get_metagpt_root() # Dependent on METAGPT_PROJECT_ROOT
DEFAULT_WORKSPACE_ROOT = METAGPT_ROOT / "workspace"
EXAMPLE_PATH = METAGPT_ROOT / "examples"
EXAMPLE_DATA_PATH = EXAMPLE_PATH / "data"
DATA_PATH = METAGPT_ROOT / "data"
DABENCH_PATH = EXAMPLE_PATH / "di/InfiAgent-DABench/data"
EXAMPLE_BENCHMARK_PATH = EXAMPLE_PATH / "data/rag_bm"
TEST_DATA_PATH = METAGPT_ROOT / "tests/data"
RESEARCH_PATH = DATA_PATH / "research"
TUTORIAL_PATH = DATA_PATH / "tutorial_docx"
INVOICE_OCR_TABLE_PATH = DATA_PATH / "invoice_table"
UT_PATH = DATA_PATH / "ut"
SWAGGER_PATH = UT_PATH / "files/api/"
UT_PY_PATH = UT_PATH / "files/ut/"
API_QUESTIONS_PATH = UT_PATH / "files/question/"
SERDESER_PATH = DEFAULT_WORKSPACE_ROOT / "storage" # TODO to store `storage` under the individual generated project
TMP = METAGPT_ROOT / "tmp"
SOURCE_ROOT = METAGPT_ROOT / "metagpt"
PROMPT_PATH = SOURCE_ROOT / "prompts"
SKILL_DIRECTORY = SOURCE_ROOT / "skills"
TOOL_SCHEMA_PATH = METAGPT_ROOT / "metagpt/tools/schemas"
TOOL_LIBS_PATH = METAGPT_ROOT / "metagpt/tools/libs"
# TEMPLATE PATH
TEMPLATE_FOLDER_PATH = METAGPT_ROOT / "template"
VUE_TEMPLATE_PATH = TEMPLATE_FOLDER_PATH / "vue_template"
REACT_TEMPLATE_PATH = TEMPLATE_FOLDER_PATH / "react_template"
# REAL CONSTS
MEM_TTL = 24 * 30 * 3600
MESSAGE_ROUTE_FROM = "sent_from"
MESSAGE_ROUTE_TO = "send_to"
MESSAGE_ROUTE_CAUSE_BY = "cause_by"
MESSAGE_META_ROLE = "role"
MESSAGE_ROUTE_TO_ALL = "<all>"
MESSAGE_ROUTE_TO_NONE = "<none>"
MESSAGE_ROUTE_TO_SELF = "<self>" # Add this tag to replace `ActionOutput`
REQUIREMENT_FILENAME = "requirement.txt"
BUGFIX_FILENAME = "bugfix.txt"
PACKAGE_REQUIREMENTS_FILENAME = "requirements.txt"
DOCS_FILE_REPO = "docs"
PRDS_FILE_REPO = "docs/prd"
SYSTEM_DESIGN_FILE_REPO = "docs/system_design"
TASK_FILE_REPO = "docs/task"
CODE_PLAN_AND_CHANGE_FILE_REPO = "docs/code_plan_and_change"
COMPETITIVE_ANALYSIS_FILE_REPO = "resources/competitive_analysis"
DATA_API_DESIGN_FILE_REPO = "resources/data_api_design"
SEQ_FLOW_FILE_REPO = "resources/seq_flow"
SYSTEM_DESIGN_PDF_FILE_REPO = "resources/system_design"
PRD_PDF_FILE_REPO = "resources/prd"
TASK_PDF_FILE_REPO = "resources/api_spec_and_task"
CODE_PLAN_AND_CHANGE_PDF_FILE_REPO = "resources/code_plan_and_change"
TEST_CODES_FILE_REPO = "tests"
TEST_OUTPUTS_FILE_REPO = "test_outputs"
CODE_SUMMARIES_FILE_REPO = "docs/code_summary"
CODE_SUMMARIES_PDF_FILE_REPO = "resources/code_summary"
RESOURCES_FILE_REPO = "resources"
SD_OUTPUT_FILE_REPO = DEFAULT_WORKSPACE_ROOT
GRAPH_REPO_FILE_REPO = "docs/graph_repo"
VISUAL_GRAPH_REPO_FILE_REPO = "resources/graph_db"
CLASS_VIEW_FILE_REPO = "docs/class_view"
YAPI_URL = "http://yapi.deepwisdomai.com/"
SD_URL = "http://172.31.0.51:49094"
DEFAULT_LANGUAGE = "English"
DEFAULT_MAX_TOKENS = 1500
COMMAND_TOKENS = 500
BRAIN_MEMORY = "BRAIN_MEMORY"
SKILL_PATH = "SKILL_PATH"
SERPER_API_KEY = "SERPER_API_KEY"
DEFAULT_TOKEN_SIZE = 500
# format
BASE64_FORMAT = "base64"
# REDIS
REDIS_KEY = "REDIS_KEY"
# Message id
IGNORED_MESSAGE_ID = "0"
# Class Relationship
GENERALIZATION = "Generalize"
COMPOSITION = "Composite"
AGGREGATION = "Aggregate"
# Timeout
USE_CONFIG_TIMEOUT = 0 # Using llm.timeout configuration.
LLM_API_TIMEOUT = 300
# Assistant alias
ASSISTANT_ALIAS = "response"
# Markdown
MARKDOWN_TITLE_PREFIX = "## "
# Reporter
METAGPT_REPORTER_DEFAULT_URL = os.environ.get("METAGPT_REPORTER_URL", "")
# Metadata defines
AGENT = "agent"
IMAGES = "images"
# SWE agent
SWE_SETUP_PATH = get_metagpt_package_root() / "metagpt/tools/swe_agent_commands/setup_default.sh"
# experience pool
EXPERIENCE_MASK = "<experience>"
# TeamLeader's name
TEAMLEADER_NAME = "Mike"
DEFAULT_MIN_TOKEN_COUNT = 10000
DEFAULT_MAX_TOKEN_COUNT = 100000000
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/startup.py | metagpt/startup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/3/11 19:16
@Author : alexanderwu
@File : startup.py
"""
# DEPRECATED: This file is deprecated and will be removed in the future.
# The startup.py implementation has been moved to software_company.py
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/team.py | metagpt/team.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/12 00:30
@Author : alexanderwu
@File : team.py
@Modified By: mashenquan, 2023/11/27. Add an archiving operation after completing the project, as specified in
Section 2.2.3.3 of RFC 135.
"""
import warnings
from pathlib import Path
from typing import Any, Optional
from pydantic import BaseModel, ConfigDict, Field
from metagpt.const import SERDESER_PATH
from metagpt.context import Context
from metagpt.environment import Environment
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.utils.common import (
NoMoneyException,
read_json_file,
serialize_decorator,
write_json_file,
)
class Team(BaseModel):
"""
Team: Possesses one or more roles (agents), SOP (Standard Operating Procedures), and a env for instant messaging,
dedicated to env any multi-agent activity, such as collaboratively writing executable code.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
env: Optional[Environment] = None
investment: float = Field(default=10.0)
idea: str = Field(default="")
use_mgx: bool = Field(default=True)
def __init__(self, context: Context = None, **data: Any):
super(Team, self).__init__(**data)
ctx = context or Context()
if not self.env and not self.use_mgx:
self.env = Environment(context=ctx)
elif not self.env and self.use_mgx:
self.env = MGXEnv(context=ctx)
else:
self.env.context = ctx # The `env` object is allocated by deserialization
if "roles" in data:
self.hire(data["roles"])
if "env_desc" in data:
self.env.desc = data["env_desc"]
def serialize(self, stg_path: Path = None):
stg_path = SERDESER_PATH.joinpath("team") if stg_path is None else stg_path
team_info_path = stg_path.joinpath("team.json")
serialized_data = self.model_dump()
serialized_data["context"] = self.env.context.serialize()
write_json_file(team_info_path, serialized_data)
@classmethod
def deserialize(cls, stg_path: Path, context: Context = None) -> "Team":
"""stg_path = ./storage/team"""
# recover team_info
team_info_path = stg_path.joinpath("team.json")
if not team_info_path.exists():
raise FileNotFoundError(
"recover storage meta file `team.json` not exist, " "not to recover and please start a new project."
)
team_info: dict = read_json_file(team_info_path)
ctx = context or Context()
ctx.deserialize(team_info.pop("context", None))
team = Team(**team_info, context=ctx)
return team
def hire(self, roles: list[Role]):
"""Hire roles to cooperate"""
self.env.add_roles(roles)
@property
def cost_manager(self):
"""Get cost manager"""
return self.env.context.cost_manager
def invest(self, investment: float):
"""Invest company. raise NoMoneyException when exceed max_budget."""
self.investment = investment
self.cost_manager.max_budget = investment
logger.info(f"Investment: ${investment}.")
def _check_balance(self):
if self.cost_manager.total_cost >= self.cost_manager.max_budget:
raise NoMoneyException(self.cost_manager.total_cost, f"Insufficient funds: {self.cost_manager.max_budget}")
def run_project(self, idea, send_to: str = ""):
"""Run a project from publishing user requirement."""
self.idea = idea
# Human requirement.
self.env.publish_message(Message(content=idea))
def start_project(self, idea, send_to: str = ""):
"""
Deprecated: This method will be removed in the future.
Please use the `run_project` method instead.
"""
warnings.warn(
"The 'start_project' method is deprecated and will be removed in the future. "
"Please use the 'run_project' method instead.",
DeprecationWarning,
stacklevel=2,
)
return self.run_project(idea=idea, send_to=send_to)
@serialize_decorator
async def run(self, n_round=3, idea="", send_to="", auto_archive=True):
"""Run company until target round or no money"""
if idea:
self.run_project(idea=idea, send_to=send_to)
while n_round > 0:
if self.env.is_idle:
logger.debug("All roles are idle.")
break
n_round -= 1
self._check_balance()
await self.env.run()
logger.debug(f"max {n_round=} left.")
self.env.archive(auto_archive)
return self.env.history
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/__init__.py | metagpt/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/4/24 22:26
# @Author : alexanderwu
# @File : __init__.py
from metagpt import _compat as _ # noqa: F401
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/context.py | metagpt/context.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4 16:32
@Author : alexanderwu
@File : context.py
"""
from __future__ import annotations
import os
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, Field
from metagpt.config2 import Config
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.llm_provider_registry import create_llm_instance
from metagpt.utils.cost_manager import (
CostManager,
FireworksCostManager,
TokenCostManager,
)
class AttrDict(BaseModel):
"""A dict-like object that allows access to keys as attributes, compatible with Pydantic."""
model_config = ConfigDict(extra="allow")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.__dict__.update(kwargs)
def __getattr__(self, key):
return self.__dict__.get(key, None)
def __setattr__(self, key, value):
self.__dict__[key] = value
def __delattr__(self, key):
if key in self.__dict__:
del self.__dict__[key]
else:
raise AttributeError(f"No such attribute: {key}")
def set(self, key, val: Any):
self.__dict__[key] = val
def get(self, key, default: Any = None):
return self.__dict__.get(key, default)
def remove(self, key):
if key in self.__dict__:
self.__delattr__(key)
class Context(BaseModel):
"""Env context for MetaGPT"""
model_config = ConfigDict(arbitrary_types_allowed=True)
kwargs: AttrDict = AttrDict()
config: Config = Field(default_factory=Config.default)
cost_manager: CostManager = CostManager()
_llm: Optional[BaseLLM] = None
def new_environ(self):
"""Return a new os.environ object"""
env = os.environ.copy()
# i = self.options
# env.update({k: v for k, v in i.items() if isinstance(v, str)})
return env
def _select_costmanager(self, llm_config: LLMConfig) -> CostManager:
"""Return a CostManager instance"""
if llm_config.api_type == LLMType.FIREWORKS:
return FireworksCostManager()
elif llm_config.api_type == LLMType.OPEN_LLM:
return TokenCostManager()
else:
return self.cost_manager
def llm(self) -> BaseLLM:
"""Return a LLM instance, fixme: support cache"""
# if self._llm is None:
self._llm = create_llm_instance(self.config.llm)
if self._llm.cost_manager is None:
self._llm.cost_manager = self._select_costmanager(self.config.llm)
return self._llm
def llm_with_cost_manager_from_llm_config(self, llm_config: LLMConfig) -> BaseLLM:
"""Return a LLM instance, fixme: support cache"""
# if self._llm is None:
llm = create_llm_instance(llm_config)
if llm.cost_manager is None:
llm.cost_manager = self._select_costmanager(llm_config)
return llm
def serialize(self) -> Dict[str, Any]:
"""Serialize the object's attributes into a dictionary.
Returns:
Dict[str, Any]: A dictionary containing serialized data.
"""
return {
"kwargs": {k: v for k, v in self.kwargs.__dict__.items()},
"cost_manager": self.cost_manager.model_dump_json(),
}
def deserialize(self, serialized_data: Dict[str, Any]):
"""Deserialize the given serialized data and update the object's attributes accordingly.
Args:
serialized_data (Dict[str, Any]): A dictionary containing serialized data.
"""
if not serialized_data:
return
kwargs = serialized_data.get("kwargs")
if kwargs:
for k, v in kwargs.items():
self.kwargs.set(k, v)
cost_manager = serialized_data.get("cost_manager")
if cost_manager:
self.cost_manager.model_validate_json(cost_manager)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/logs.py | metagpt/logs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/1 12:41
@Author : alexanderwu
@File : logs.py
"""
from __future__ import annotations
import asyncio
import inspect
import sys
from contextvars import ContextVar
from datetime import datetime
from functools import partial
from typing import Any
from loguru import logger as _logger
from pydantic import BaseModel, Field
from metagpt.const import METAGPT_ROOT
LLM_STREAM_QUEUE: ContextVar[asyncio.Queue] = ContextVar("llm-stream")
class ToolLogItem(BaseModel):
type_: str = Field(alias="type", default="str", description="Data type of `value` field.")
name: str
value: Any
TOOL_LOG_END_MARKER = ToolLogItem(
type="str", name="end_marker", value="\x18\x19\x1B\x18"
) # A special log item to suggest the end of a stream log
_print_level = "INFO"
def define_log_level(print_level="INFO", logfile_level="DEBUG", name: str = None):
"""Adjust the log level to above level"""
global _print_level
_print_level = print_level
current_date = datetime.now()
formatted_date = current_date.strftime("%Y%m%d")
log_name = f"{name}_{formatted_date}" if name else formatted_date # name a log with prefix name
_logger.remove()
_logger.add(sys.stderr, level=print_level)
_logger.add(METAGPT_ROOT / f"logs/{log_name}.txt", level=logfile_level)
return _logger
logger = define_log_level()
def log_llm_stream(msg):
"""
Logs a message to the LLM stream.
Args:
msg: The message to be logged.
Notes:
If the LLM_STREAM_QUEUE has not been set (e.g., if `create_llm_stream_queue` has not been called),
the message will not be added to the LLM stream queue.
"""
queue = get_llm_stream_queue()
if queue:
queue.put_nowait(msg)
_llm_stream_log(msg)
def log_tool_output(output: ToolLogItem | list[ToolLogItem], tool_name: str = ""):
"""interface for logging tool output, can be set to log tool output in different ways to different places with set_tool_output_logfunc"""
_tool_output_log(output=output, tool_name=tool_name)
async def log_tool_output_async(output: ToolLogItem | list[ToolLogItem], tool_name: str = ""):
"""async interface for logging tool output, used when output contains async object"""
await _tool_output_log_async(output=output, tool_name=tool_name)
async def get_human_input(prompt: str = ""):
"""interface for getting human input, can be set to get input from different sources with set_human_input_func"""
if inspect.iscoroutinefunction(_get_human_input):
return await _get_human_input(prompt)
else:
return _get_human_input(prompt)
def set_llm_stream_logfunc(func):
global _llm_stream_log
_llm_stream_log = func
def set_tool_output_logfunc(func):
global _tool_output_log
_tool_output_log = func
async def set_tool_output_logfunc_async(func):
# async version
global _tool_output_log_async
_tool_output_log_async = func
def set_human_input_func(func):
global _get_human_input
_get_human_input = func
_llm_stream_log = partial(print, end="")
_tool_output_log = (
lambda *args, **kwargs: None
) # a dummy function to avoid errors if set_tool_output_logfunc is not called
async def _tool_output_log_async(*args, **kwargs):
# async version
pass
def create_llm_stream_queue():
"""Creates a new LLM stream queue and sets it in the context variable.
Returns:
The newly created asyncio.Queue instance.
"""
queue = asyncio.Queue()
LLM_STREAM_QUEUE.set(queue)
return queue
def get_llm_stream_queue():
"""Retrieves the current LLM stream queue from the context variable.
Returns:
The asyncio.Queue instance if set, otherwise None.
"""
return LLM_STREAM_QUEUE.get(None)
_get_human_input = input # get human input from console by default
def _llm_stream_log(msg):
if _print_level in ["INFO"]:
print(msg, end="")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/management/skill_manager.py | metagpt/management/skill_manager.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/5 01:44
@Author : alexanderwu
@File : skill_manager.py
@Modified By: mashenquan, 2023/8/20. Remove useless `llm`
"""
from metagpt.actions import Action
from metagpt.const import PROMPT_PATH
from metagpt.document_store.chromadb_store import ChromaStore
from metagpt.logs import logger
Skill = Action
class SkillManager:
"""Used to manage all skills"""
def __init__(self):
self._store = ChromaStore("skill_manager")
self._skills: dict[str:Skill] = {}
def add_skill(self, skill: Skill):
"""
Add a skill, add the skill to the skill pool and searchable storage
:param skill: Skill
:return:
"""
self._skills[skill.name] = skill
self._store.add(skill.desc, {"name": skill.name, "desc": skill.desc}, skill.name)
def del_skill(self, skill_name: str):
"""
Delete a skill, remove the skill from the skill pool and searchable storage
:param skill_name: Skill name
:return:
"""
self._skills.pop(skill_name)
self._store.delete(skill_name)
def get_skill(self, skill_name: str) -> Skill:
"""
Obtain a specific skill by skill name
:param skill_name: Skill name
:return: Skill
"""
return self._skills.get(skill_name)
def retrieve_skill(self, desc: str, n_results: int = 2) -> list[Skill]:
"""
Obtain skills through the search engine
:param desc: Skill description
:return: Multiple skills
"""
return self._store.search(desc, n_results=n_results)["ids"][0]
def retrieve_skill_scored(self, desc: str, n_results: int = 2) -> dict:
"""
Obtain skills through the search engine
:param desc: Skill description
:return: Dictionary consisting of skills and scores
"""
return self._store.search(desc, n_results=n_results)
def generate_skill_desc(self, skill: Skill) -> str:
"""
Generate descriptive text for each skill
:param skill:
:return:
"""
path = PROMPT_PATH / "generate_skill.md"
text = path.read_text()
logger.info(text)
if __name__ == "__main__":
manager = SkillManager()
manager.generate_skill_desc(Action())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/management/__init__.py | metagpt/management/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/30 20:58
@Author : alexanderwu
@File : __init__.py
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/tool_data_type.py | metagpt/tools/tool_data_type.py | from pydantic import BaseModel
class ToolSchema(BaseModel):
description: str
class Tool(BaseModel):
name: str
path: str
schemas: dict = {}
code: str = ""
tags: list[str] = []
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/moderation.py | metagpt/tools/moderation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/9/26 14:27
@Author : zhanglei
@File : moderation.py
"""
from typing import Union
from metagpt.provider.base_llm import BaseLLM
class Moderation:
def __init__(self, llm: BaseLLM):
self.llm = llm
def handle_moderation_results(self, results):
resp = []
for item in results:
categories = item.categories.dict()
true_categories = [category for category, item_flagged in categories.items() if item_flagged]
resp.append({"flagged": item.flagged, "true_categories": true_categories})
return resp
async def amoderation_with_categories(self, content: Union[str, list[str]]):
resp = []
if content:
moderation_results = await self.llm.amoderation(content=content)
resp = self.handle_moderation_results(moderation_results.results)
return resp
async def amoderation(self, content: Union[str, list[str]]):
resp = []
if content:
moderation_results = await self.llm.amoderation(content=content)
results = moderation_results.results
for item in results:
resp.append(item.flagged)
return resp
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/metagpt_oas3_api_svc.py | metagpt/tools/metagpt_oas3_api_svc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/17
@Author : mashenquan
@File : metagpt_oas3_api_svc.py
@Desc : MetaGPT OpenAPI Specification 3.0 REST API service
curl -X 'POST' \
'http://localhost:8080/openapi/greeting/dave' \
-H 'accept: text/plain' \
-H 'Content-Type: application/json' \
-d '{}'
"""
from pathlib import Path
import connexion
def oas_http_svc():
"""Start the OAS 3.0 OpenAPI HTTP service"""
print("http://localhost:8080/oas3/ui/")
specification_dir = Path(__file__).parent.parent.parent / "docs/.well-known"
app = connexion.AsyncApp(__name__, specification_dir=str(specification_dir))
app.add_api("metagpt_oas3_api.yaml")
app.add_api("openapi.yaml")
app.run(port=8080)
if __name__ == "__main__":
oas_http_svc()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_serpapi.py | metagpt/tools/search_engine_serpapi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/23 18:27
@Author : alexanderwu
@File : search_engine_serpapi.py
"""
import warnings
from typing import Any, Dict, Optional
import aiohttp
from pydantic import BaseModel, ConfigDict, Field, model_validator
class SerpAPIWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
api_key: str
params: dict = Field(
default_factory=lambda: {
"engine": "google",
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
)
url: str = "https://serpapi.com/search"
aiosession: Optional[aiohttp.ClientSession] = None
proxy: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_serpapi(cls, values: dict) -> dict:
if "serpapi_api_key" in values:
values.setdefault("api_key", values["serpapi_api_key"])
warnings.warn("`serpapi_api_key` is deprecated, use `api_key` instead", DeprecationWarning, stacklevel=2)
if "api_key" not in values:
raise ValueError(
"To use serpapi search engine, make sure you provide the `api_key` when constructing an object. You can obtain"
" an API key from https://serpapi.com/."
)
return values
async def run(self, query, max_results: int = 8, as_string: bool = True, **kwargs: Any) -> str:
"""Run query through SerpAPI and parse result async."""
result = await self.results(query, max_results)
return self._process_response(result, as_string=as_string)
async def results(self, query: str, max_results: int) -> dict:
"""Use aiohttp to run query through SerpAPI and return the results async."""
params = self.get_params(query)
params["source"] = "python"
params["num"] = max_results
params["output"] = "json"
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(self.url, params=params, proxy=self.proxy) as response:
response.raise_for_status()
res = await response.json()
else:
async with self.aiosession.get(self.url, params=params, proxy=self.proxy) as response:
response.raise_for_status()
res = await response.json()
return res
def get_params(self, query: str) -> Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {
"api_key": self.api_key,
"q": query,
}
params = {**self.params, **_params}
return params
@staticmethod
def _process_response(res: dict, as_string: bool) -> str:
"""Process response from SerpAPI."""
# logger.debug(res)
focus = ["title", "snippet", "link"]
get_focused = lambda x: {i: j for i, j in x.items() if i in focus}
if "error" in res.keys():
if res["error"] == "Google hasn't returned any results for this query.":
toret = "No good search result found"
else:
raise ValueError(f"Got error from SerpAPI: {res['error']}")
elif "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]
elif "answer_box" in res.keys() and "snippet_highlighted_words" in res["answer_box"].keys():
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif "sports_results" in res.keys() and "game_spotlight" in res["sports_results"].keys():
toret = res["sports_results"]["game_spotlight"]
elif "knowledge_graph" in res.keys() and "description" in res["knowledge_graph"].keys():
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
else:
toret = "No good search result found"
toret_l = []
if "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret_l += [get_focused(res["answer_box"])]
if res.get("organic_results"):
toret_l += [get_focused(i) for i in res.get("organic_results")]
return str(toret) + "\n" + str(toret_l) if as_string else toret_l
if __name__ == "__main__":
import fire
fire.Fire(SerpAPIWrapper().run)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_ddg.py | metagpt/tools/search_engine_ddg.py | #!/usr/bin/env python
from __future__ import annotations
import asyncio
import json
from concurrent import futures
from typing import Literal, Optional, overload
from pydantic import BaseModel, ConfigDict
try:
from duckduckgo_search import DDGS
except ImportError:
raise ImportError(
"To use this module, you should have the `duckduckgo_search` Python package installed. "
"You can install it by running the command: `pip install -e.[search-ddg]`"
)
class DDGAPIWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
loop: Optional[asyncio.AbstractEventLoop] = None
executor: Optional[futures.Executor] = None
proxy: Optional[str] = None
@property
def ddgs(self):
return DDGS(proxies=self.proxy)
@overload
def run(
self,
query: str,
max_results: int = 8,
as_string: Literal[True] = True,
focus: list[str] | None = None,
) -> str:
...
@overload
def run(
self,
query: str,
max_results: int = 8,
as_string: Literal[False] = False,
focus: list[str] | None = None,
) -> list[dict[str, str]]:
...
async def run(
self,
query: str,
max_results: int = 8,
as_string: bool = True,
) -> str | list[dict]:
"""Return the results of a Google search using the official Google API
Args:
query: The search query.
max_results: The number of results to return.
as_string: A boolean flag to determine the return type of the results. If True, the function will
return a formatted string with the search results. If False, it will return a list of dictionaries
containing detailed information about each search result.
Returns:
The results of the search.
"""
loop = self.loop or asyncio.get_event_loop()
future = loop.run_in_executor(
self.executor,
self._search_from_ddgs,
query,
max_results,
)
search_results = await future
# Return the list of search result URLs
if as_string:
return json.dumps(search_results, ensure_ascii=False)
return search_results
def _search_from_ddgs(self, query: str, max_results: int):
return [
{"link": i["href"], "snippet": i["body"], "title": i["title"]}
for (_, i) in zip(range(max_results), self.ddgs.text(query))
]
if __name__ == "__main__":
import fire
fire.Fire(DDGAPIWrapper().run)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/metagpt_text_to_image.py | metagpt/tools/metagpt_text_to_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/18
@Author : mashenquan
@File : metagpt_text_to_image.py
@Desc : MetaGPT Text-to-Image OAS3 api, which provides text-to-image functionality.
"""
import base64
from typing import Dict, List
import aiohttp
import requests
from pydantic import BaseModel
from metagpt.logs import logger
class MetaGPTText2Image:
def __init__(self, model_url):
"""
:param model_url: Model reset api url
"""
self.model_url = model_url
async def text_2_image(self, text, size_type="512x512"):
"""Text to image
:param text: The text used for image conversion.
:param size_type: One of ['512x512', '512x768']
:return: The image data is returned in Base64 encoding.
"""
headers = {"Content-Type": "application/json"}
dims = size_type.split("x")
data = {
"prompt": text,
"negative_prompt": "(easynegative:0.8),black, dark,Low resolution",
"override_settings": {"sd_model_checkpoint": "galaxytimemachinesGTM_photoV20"},
"seed": -1,
"batch_size": 1,
"n_iter": 1,
"steps": 20,
"cfg_scale": 11,
"width": int(dims[0]),
"height": int(dims[1]), # 768,
"restore_faces": False,
"tiling": False,
"do_not_save_samples": False,
"do_not_save_grid": False,
"enable_hr": False,
"hr_scale": 2,
"hr_upscaler": "Latent",
"hr_second_pass_steps": 0,
"hr_resize_x": 0,
"hr_resize_y": 0,
"hr_upscale_to_x": 0,
"hr_upscale_to_y": 0,
"truncate_x": 0,
"truncate_y": 0,
"applied_old_hires_behavior_to": None,
"eta": None,
"sampler_index": "DPM++ SDE Karras",
"alwayson_scripts": {},
}
class ImageResult(BaseModel):
images: List
parameters: Dict
try:
async with aiohttp.ClientSession() as session:
async with session.post(self.model_url, headers=headers, json=data) as response:
result = ImageResult(**await response.json())
if len(result.images) == 0:
return 0
data = base64.b64decode(result.images[0])
return data
except requests.exceptions.RequestException as e:
logger.error(f"An error occurred:{e}")
return 0
# Export
async def oas3_metagpt_text_to_image(text, size_type: str = "512x512", model_url=""):
"""Text to image
:param text: The text used for image conversion.
:param model_url: Model reset api
:param size_type: One of ['512x512', '512x768']
:return: The image data is returned in Base64 encoding.
"""
if not text:
return ""
return await MetaGPTText2Image(model_url).text_2_image(text, size_type=size_type)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/openai_text_to_image.py | metagpt/tools/openai_text_to_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/17
@Author : mashenquan
@File : openai_text_to_image.py
@Desc : OpenAI Text-to-Image OAS3 api, which provides text-to-image functionality.
"""
import aiohttp
import requests
from metagpt.logs import logger
from metagpt.provider.base_llm import BaseLLM
class OpenAIText2Image:
def __init__(self, llm: BaseLLM):
self.llm = llm
async def text_2_image(self, text, size_type="1024x1024"):
"""Text to image
:param text: The text used for image conversion.
:param size_type: One of ['256x256', '512x512', '1024x1024']
:return: The image data is returned in Base64 encoding.
"""
try:
result = await self.llm.aclient.images.generate(prompt=text, n=1, size=size_type)
except Exception as e:
logger.error(f"An error occurred:{e}")
return ""
if result and len(result.data) > 0:
return await OpenAIText2Image.get_image_data(result.data[0].url)
return ""
@staticmethod
async def get_image_data(url):
"""Fetch image data from a URL and encode it as Base64
:param url: Image url
:return: Base64-encoded image data.
"""
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
response.raise_for_status() # 如果是 4xx 或 5xx 响应,会引发异常
image_data = await response.read()
return image_data
except requests.exceptions.RequestException as e:
logger.error(f"An error occurred:{e}")
return 0
# Export
async def oas3_openai_text_to_image(text, size_type: str = "1024x1024", llm: BaseLLM = None):
"""Text to image
:param text: The text used for image conversion.
:param size_type: One of ['256x256', '512x512', '1024x1024']
:param llm: LLM instance
:return: The image data is returned in Base64 encoding.
"""
if not text:
return ""
return await OpenAIText2Image(llm).text_2_image(text, size_type=size_type)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/tool_registry.py | metagpt/tools/tool_registry.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/01/12 17:07
@Author : garylin2099
@File : tool_registry.py
"""
from __future__ import annotations
import contextlib
import inspect
import os
from collections import defaultdict
from pathlib import Path
from pydantic import BaseModel
from metagpt.const import TOOL_SCHEMA_PATH
from metagpt.logs import logger
from metagpt.tools.tool_convert import (
convert_code_to_tool_schema,
convert_code_to_tool_schema_ast,
)
from metagpt.tools.tool_data_type import Tool, ToolSchema
class ToolRegistry(BaseModel):
tools: dict = {}
tools_by_tags: dict = defaultdict(dict) # two-layer k-v, {tag: {tool_name: {...}, ...}, ...}
def register_tool(
self,
tool_name: str,
tool_path: str,
schemas: dict = None,
schema_path: str = "",
tool_code: str = "",
tags: list[str] = None,
tool_source_object=None, # can be any classes or functions
include_functions: list[str] = None,
verbose: bool = False,
):
if self.has_tool(tool_name):
return
schema_path = schema_path or TOOL_SCHEMA_PATH / f"{tool_name}.yml"
if not schemas:
schemas = make_schema(tool_source_object, include_functions, schema_path)
if not schemas:
return
schemas["tool_path"] = tool_path # corresponding code file path of the tool
try:
ToolSchema(**schemas) # validation
except Exception:
pass
# logger.warning(
# f"{tool_name} schema not conforms to required format, but will be used anyway. Mismatch: {e}"
# )
tags = tags or []
tool = Tool(name=tool_name, path=tool_path, schemas=schemas, code=tool_code, tags=tags)
self.tools[tool_name] = tool
for tag in tags:
self.tools_by_tags[tag].update({tool_name: tool})
if verbose:
logger.info(f"{tool_name} registered")
logger.info(f"schema made at {str(schema_path)}, can be used for checking")
def has_tool(self, key: str) -> Tool:
return key in self.tools
def get_tool(self, key) -> Tool:
return self.tools.get(key)
def get_tools_by_tag(self, key) -> dict[str, Tool]:
return self.tools_by_tags.get(key, {})
def get_all_tools(self) -> dict[str, Tool]:
return self.tools
def has_tool_tag(self, key) -> bool:
return key in self.tools_by_tags
def get_tool_tags(self) -> list[str]:
return list(self.tools_by_tags.keys())
# Registry instance
TOOL_REGISTRY = ToolRegistry()
def register_tool(tags: list[str] = None, schema_path: str = "", **kwargs):
"""register a tool to registry"""
def decorator(cls):
# Get the file path where the function / class is defined and the source code
file_path = inspect.getfile(cls)
if "metagpt" in file_path:
# split to handle ../metagpt/metagpt/tools/... where only metapgt/tools/... is needed
file_path = "metagpt" + file_path.split("metagpt")[-1]
source_code = ""
with contextlib.suppress(OSError):
source_code = inspect.getsource(cls)
TOOL_REGISTRY.register_tool(
tool_name=cls.__name__,
tool_path=file_path,
schema_path=schema_path,
tool_code=source_code,
tags=tags,
tool_source_object=cls,
**kwargs,
)
return cls
return decorator
def make_schema(tool_source_object, include, path):
try:
schema = convert_code_to_tool_schema(tool_source_object, include=include)
except Exception as e:
schema = {}
logger.error(f"Fail to make schema: {e}")
return schema
def validate_tool_names(tools: list[str]) -> dict[str, Tool]:
assert isinstance(tools, list), "tools must be a list of str"
valid_tools = {}
for key in tools:
# one can define either tool names OR tool tags OR tool path, take union to get the whole set
# if tool paths are provided, they will be registered on the fly
if os.path.isdir(key) or os.path.isfile(key):
valid_tools.update(register_tools_from_path(key))
elif TOOL_REGISTRY.has_tool(key.split(":")[0]):
if ":" in key:
# handle class tools with methods specified, such as Editor:read,write
class_tool_name = key.split(":")[0]
method_names = key.split(":")[1].split(",")
class_tool = TOOL_REGISTRY.get_tool(class_tool_name)
methods_filtered = {}
for method_name in method_names:
if method_name in class_tool.schemas["methods"]:
methods_filtered[method_name] = class_tool.schemas["methods"][method_name]
else:
logger.warning(f"invalid method {method_name} under tool {class_tool_name}, skipped")
class_tool_filtered = class_tool.model_copy(deep=True)
class_tool_filtered.schemas["methods"] = methods_filtered
valid_tools.update({class_tool_name: class_tool_filtered})
else:
valid_tools.update({key: TOOL_REGISTRY.get_tool(key)})
elif TOOL_REGISTRY.has_tool_tag(key):
valid_tools.update(TOOL_REGISTRY.get_tools_by_tag(key))
else:
logger.warning(f"invalid tool name or tool type name: {key}, skipped")
return valid_tools
def register_tools_from_file(file_path) -> dict[str, Tool]:
file_name = Path(file_path).name
if not file_name.endswith(".py") or file_name == "setup.py" or file_name.startswith("test"):
return {}
registered_tools = {}
code = Path(file_path).read_text(encoding="utf-8")
tool_schemas = convert_code_to_tool_schema_ast(code)
for name, schemas in tool_schemas.items():
tool_code = schemas.pop("code", "")
TOOL_REGISTRY.register_tool(
tool_name=name,
tool_path=file_path,
schemas=schemas,
tool_code=tool_code,
)
registered_tools.update({name: TOOL_REGISTRY.get_tool(name)})
return registered_tools
def register_tools_from_path(path) -> dict[str, Tool]:
tools_registered = {}
if os.path.isfile(path):
tools_registered.update(register_tools_from_file(path))
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
file_path = os.path.join(root, file)
tools_registered.update(register_tools_from_file(file_path))
return tools_registered
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/openai_text_to_embedding.py | metagpt/tools/openai_text_to_embedding.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/18
@Author : mashenquan
@File : openai_text_to_embedding.py
@Desc : OpenAI Text-to-Embedding OAS3 api, which provides text-to-embedding functionality.
For more details, checkout: `https://platform.openai.com/docs/api-reference/embeddings/object`
"""
from typing import List
import aiohttp
import requests
from pydantic import BaseModel, Field
from metagpt.logs import logger
class Embedding(BaseModel):
"""Represents an embedding vector returned by embedding endpoint."""
object: str # The object type, which is always "embedding".
embedding: List[
float
] # The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.
index: int # The index of the embedding in the list of embeddings.
class Usage(BaseModel):
prompt_tokens: int = 0
total_tokens: int = 0
class ResultEmbedding(BaseModel):
class Config:
alias = {"object_": "object"}
object_: str = ""
data: List[Embedding] = []
model: str = ""
usage: Usage = Field(default_factory=Usage)
class OpenAIText2Embedding:
def __init__(self, api_key: str, proxy: str):
"""
:param openai_api_key: OpenAI API key, For more details, checkout: `https://platform.openai.com/account/api-keys`
"""
self.api_key = api_key
self.proxy = proxy
async def text_2_embedding(self, text, model="text-embedding-ada-002"):
"""Text to embedding
:param text: The text used for embedding.
:param model: One of ['text-embedding-ada-002'], ID of the model to use. For more details, checkout: `https://api.openai.com/v1/models`.
:return: A json object of :class:`ResultEmbedding` class if successful, otherwise `{}`.
"""
proxies = {"proxy": self.proxy} if self.proxy else {}
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"}
data = {"input": text, "model": model}
url = "https://api.openai.com/v1/embeddings"
try:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=data, **proxies) as response:
data = await response.json()
return ResultEmbedding(**data)
except requests.exceptions.RequestException as e:
logger.error(f"An error occurred:{e}")
return ResultEmbedding()
# Export
async def oas3_openai_text_to_embedding(text, openai_api_key: str, model="text-embedding-ada-002", proxy: str = ""):
"""Text to embedding
:param text: The text used for embedding.
:param model: One of ['text-embedding-ada-002'], ID of the model to use. For more details, checkout: `https://api.openai.com/v1/models`.
:param config: OpenAI config with API key, For more details, checkout: `https://platform.openai.com/account/api-keys`
:return: A json object of :class:`ResultEmbedding` class if successful, otherwise `{}`.
"""
if not text:
return ""
return await OpenAIText2Embedding(api_key=openai_api_key, proxy=proxy).text_2_embedding(text, model=model)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/web_browser_engine_playwright.py | metagpt/tools/web_browser_engine_playwright.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import asyncio
import sys
from pathlib import Path
from typing import Literal, Optional
from playwright.async_api import async_playwright
from pydantic import BaseModel, Field, PrivateAttr
from metagpt.logs import logger
from metagpt.utils.parse_html import WebPage
class PlaywrightWrapper(BaseModel):
"""Wrapper around Playwright.
To use this module, you should have the `playwright` Python package installed and ensure that
the required browsers are also installed. You can install playwright by running the command
`pip install metagpt[playwright]` and download the necessary browser binaries by running the
command `playwright install` for the first time.
"""
browser_type: Literal["chromium", "firefox", "webkit"] = "chromium"
launch_kwargs: dict = Field(default_factory=dict)
proxy: Optional[str] = None
context_kwargs: dict = Field(default_factory=dict)
_has_run_precheck: bool = PrivateAttr(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
launch_kwargs = self.launch_kwargs
if self.proxy and "proxy" not in launch_kwargs:
args = launch_kwargs.get("args", [])
if not any(str.startswith(i, "--proxy-server=") for i in args):
launch_kwargs["proxy"] = {"server": self.proxy}
for key in ["ignore_https_errors", "java_script_enabled", "extra_http_headers", "user_agent"]:
if key in kwargs:
self.context_kwargs[key] = kwargs[key]
async def run(self, url: str, *urls: str, per_page_timeout: float = None) -> WebPage | list[WebPage]:
async with async_playwright() as ap:
browser_type = getattr(ap, self.browser_type)
await self._run_precheck(browser_type)
browser = await browser_type.launch(**self.launch_kwargs)
_scrape = self._scrape
if urls:
return await asyncio.gather(
_scrape(browser, url, per_page_timeout), *(_scrape(browser, i, per_page_timeout) for i in urls)
)
return await _scrape(browser, url, per_page_timeout)
async def _scrape(self, browser, url, timeout: float = None):
context = await browser.new_context(**self.context_kwargs)
if timeout is not None:
context.set_default_timeout(timeout * 1000) # playwright uses milliseconds.
page = await context.new_page()
async with page:
try:
await page.goto(url)
await page.evaluate("window.scrollTo(0, document.body.scrollHeight)")
html = await page.content()
inner_text = await page.evaluate("() => document.body.innerText")
except Exception as e:
inner_text = f"Fail to load page content for {e}"
html = ""
return WebPage(inner_text=inner_text, html=html, url=url)
async def _run_precheck(self, browser_type):
if self._has_run_precheck:
return
executable_path = Path(browser_type.executable_path)
if not executable_path.exists() and "executable_path" not in self.launch_kwargs:
kwargs = {}
if self.proxy:
kwargs["env"] = {"ALL_PROXY": self.proxy}
await _install_browsers(self.browser_type, **kwargs)
if self._has_run_precheck:
return
if not executable_path.exists():
parts = executable_path.parts
available_paths = list(Path(*parts[:-3]).glob(f"{self.browser_type}-*"))
if available_paths:
logger.warning(
"It seems that your OS is not officially supported by Playwright. "
"Try to set executable_path to the fallback build version."
)
executable_path = available_paths[0].joinpath(*parts[-2:])
self.launch_kwargs["executable_path"] = str(executable_path)
self._has_run_precheck = True
def _get_install_lock():
global _install_lock
if _install_lock is None:
_install_lock = asyncio.Lock()
return _install_lock
async def _install_browsers(*browsers, **kwargs) -> None:
async with _get_install_lock():
browsers = [i for i in browsers if i not in _install_cache]
if not browsers:
return
process = await asyncio.create_subprocess_exec(
sys.executable,
"-m",
"playwright",
"install",
*browsers,
# "--with-deps",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
**kwargs,
)
await asyncio.gather(_log_stream(process.stdout, logger.info), _log_stream(process.stderr, logger.warning))
if await process.wait() == 0:
logger.info("Install browser for playwright successfully.")
else:
logger.warning("Fail to install browser for playwright.")
_install_cache.update(browsers)
async def _log_stream(sr, log_func):
while True:
line = await sr.readline()
if not line:
return
log_func(f"[playwright install browser]: {line.decode().strip()}")
_install_lock: asyncio.Lock = None
_install_cache = set()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/web_browser_engine_selenium.py | metagpt/tools/web_browser_engine_selenium.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import asyncio
import importlib
from concurrent import futures
from copy import deepcopy
from typing import Callable, Literal, Optional
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.core.download_manager import WDMDownloadManager
from webdriver_manager.core.http import WDMHttpClient
from metagpt.utils.parse_html import WebPage
class SeleniumWrapper(BaseModel):
"""Wrapper around Selenium.
To use this module, you should check the following:
1. Run the following command: pip install metagpt[selenium].
2. Make sure you have a compatible web browser installed and the appropriate WebDriver set up
for that browser before running. For example, if you have Mozilla Firefox installed on your
computer, you can set the configuration SELENIUM_BROWSER_TYPE to firefox. After that, you
can scrape web pages using the Selenium WebBrowserEngine.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
browser_type: Literal["chrome", "firefox", "edge", "ie"] = "chrome"
launch_kwargs: dict = Field(default_factory=dict)
proxy: Optional[str] = None
loop: Optional[asyncio.AbstractEventLoop] = None
executor: Optional[futures.Executor] = None
_has_run_precheck: bool = PrivateAttr(False)
_get_driver: Optional[Callable] = PrivateAttr(None)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
if self.proxy and "proxy-server" not in self.launch_kwargs:
self.launch_kwargs["proxy-server"] = self.proxy
@property
def launch_args(self):
return [f"--{k}={v}" for k, v in self.launch_kwargs.items() if k != "executable_path"]
@property
def executable_path(self):
return self.launch_kwargs.get("executable_path")
async def run(self, url: str, *urls: str, per_page_timeout: float = None) -> WebPage | list[WebPage]:
await self._run_precheck()
_scrape = lambda url, per_page_timeout: self.loop.run_in_executor(
self.executor, self._scrape_website, url, per_page_timeout
)
if urls:
return await asyncio.gather(_scrape(url, per_page_timeout), *(_scrape(i, per_page_timeout) for i in urls))
return await _scrape(url, per_page_timeout)
async def _run_precheck(self):
if self._has_run_precheck:
return
self.loop = self.loop or asyncio.get_event_loop()
self._get_driver = await self.loop.run_in_executor(
self.executor,
lambda: _gen_get_driver_func(
self.browser_type, *self.launch_args, executable_path=self.executable_path, proxy=self.proxy
),
)
self._has_run_precheck = True
def _scrape_website(self, url, timeout: float = None):
with self._get_driver() as driver:
try:
driver.get(url)
WebDriverWait(driver, timeout or 30).until(EC.presence_of_element_located((By.TAG_NAME, "body")))
inner_text = driver.execute_script("return document.body.innerText;")
html = driver.page_source
except Exception as e:
inner_text = f"Fail to load page content for {e}"
html = ""
return WebPage(inner_text=inner_text, html=html, url=url)
_webdriver_manager_types = {
"chrome": ("webdriver_manager.chrome", "ChromeDriverManager"),
"firefox": ("webdriver_manager.firefox", "GeckoDriverManager"),
"edge": ("webdriver_manager.microsoft", "EdgeChromiumDriverManager"),
"ie": ("webdriver_manager.microsoft", "IEDriverManager"),
}
class WDMHttpProxyClient(WDMHttpClient):
def __init__(self, proxy: str = None):
super().__init__()
self.proxy = proxy
def get(self, url, **kwargs):
if "proxies" not in kwargs and self.proxy:
kwargs["proxies"] = {"all": self.proxy}
return super().get(url, **kwargs)
def _gen_get_driver_func(browser_type, *args, executable_path=None, proxy=None):
WebDriver = getattr(importlib.import_module(f"selenium.webdriver.{browser_type}.webdriver"), "WebDriver")
Service = getattr(importlib.import_module(f"selenium.webdriver.{browser_type}.service"), "Service")
Options = getattr(importlib.import_module(f"selenium.webdriver.{browser_type}.options"), "Options")
if not executable_path:
module_name, type_name = _webdriver_manager_types[browser_type]
DriverManager = getattr(importlib.import_module(module_name), type_name)
driver_manager = DriverManager(download_manager=WDMDownloadManager(http_client=WDMHttpProxyClient(proxy=proxy)))
# driver_manager.driver_cache.find_driver(driver_manager.driver))
executable_path = driver_manager.install()
def _get_driver():
options = Options()
options.add_argument("--headless")
options.add_argument("--enable-javascript")
if browser_type == "chrome":
options.add_argument("--disable-gpu") # This flag can help avoid renderer issue
options.add_argument("--disable-dev-shm-usage") # Overcome limited resource problems
options.add_argument("--no-sandbox")
for i in args:
options.add_argument(i)
return WebDriver(options=deepcopy(options), service=Service(executable_path=executable_path))
return _get_driver
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/ut_writer.py | metagpt/tools/ut_writer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from pathlib import Path
from metagpt.config2 import config
from metagpt.provider.openai_api import OpenAILLM as GPTAPI
from metagpt.utils.common import awrite
ICL_SAMPLE = """Interface definition:
```text
Interface Name: Element Tagging
Interface Path: /projects/{project_key}/node-tags
Method: POST
Request parameters:
Path parameters:
project_key
Body parameters:
Name Type Required Default Value Remarks
nodes array Yes Nodes
node_key string No Node key
tags array No Original node tag list
node_type string No Node type DATASET / RECIPE
operations array Yes
tags array No Operation tag list
mode string No Operation type ADD / DELETE
Return data:
Name Type Required Default Value Remarks
code integer Yes Status code
msg string Yes Prompt message
data object Yes Returned data
list array No Node list true / false
node_type string No Node type DATASET / RECIPE
node_key string No Node key
```
Unit test:
```python
@pytest.mark.parametrize(
"project_key, nodes, operations, expected_msg",
[
("project_key", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "success"),
("project_key", [{"node_key": "dataset_002", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["tag1"], "mode": "DELETE"}], "success"),
("", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Missing the required parameter project_key"),
(123, [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Incorrect parameter type"),
("project_key", [{"node_key": "a"*201, "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "Request parameter exceeds field boundary")
]
)
def test_node_tags(project_key, nodes, operations, expected_msg):
pass
# The above is an interface definition and a unit test example.
# Next, please play the role of an expert test manager with 20 years of experience at Google. When I give the interface definition,
# reply to me with a unit test. There are several requirements:
# 1. Only output one `@pytest.mark.parametrize` and the corresponding test_<interface name> function (inside pass, do not implement).
# -- The function parameter contains expected_msg for result verification.
# 2. The generated test cases use shorter text or numbers and are as compact as possible.
# 3. If comments are needed, use Chinese.
# If you understand, please wait for me to give the interface definition and just answer "Understood" to save tokens.
"""
ACT_PROMPT_PREFIX = """Refer to the test types: such as missing request parameters, field boundary verification, incorrect field type.
Please output 10 test cases within one `@pytest.mark.parametrize` scope.
```text
"""
YFT_PROMPT_PREFIX = """Refer to the test types: such as SQL injection, cross-site scripting (XSS), unauthorized access and privilege escalation,
authentication and authorization, parameter verification, exception handling, file upload and download.
Please output 10 test cases within one `@pytest.mark.parametrize` scope.
```text
"""
OCR_API_DOC = """```text
Interface Name: OCR recognition
Interface Path: /api/v1/contract/treaty/task/ocr
Method: POST
Request Parameters:
Path Parameters:
Body Parameters:
Name Type Required Default Value Remarks
file_id string Yes
box array Yes
contract_id number Yes Contract id
start_time string No yyyy-mm-dd
end_time string No yyyy-mm-dd
extract_type number No Recognition type 1- During import 2- After import Default 1
Response Data:
Name Type Required Default Value Remarks
code integer Yes
message string Yes
data object Yes
```
"""
class UTGenerator:
"""UT Generator: Construct UT through API documentation"""
def __init__(
self,
swagger_file: str,
ut_py_path: str,
questions_path: str,
chatgpt_method: str = "API",
template_prefix=YFT_PROMPT_PREFIX,
) -> None:
"""Initialize UT Generator
Args:
swagger_file: path to the swagger file
ut_py_path: path to store test cases
questions_path: path to store the template, facilitating subsequent checks
chatgpt_method: API method
template_prefix: use the template, default is YFT_UT_PROMPT
"""
self.swagger_file = swagger_file
self.ut_py_path = ut_py_path
self.questions_path = questions_path
assert chatgpt_method in ["API"], "Invalid chatgpt_method"
self.chatgpt_method = chatgpt_method
# ICL: In-Context Learning, provide an example here for GPT to mimic
self.icl_sample = ICL_SAMPLE
self.template_prefix = template_prefix
def get_swagger_json(self) -> dict:
"""Load Swagger JSON from a local file"""
with open(self.swagger_file, "r", encoding="utf-8") as file:
swagger_json = json.load(file)
return swagger_json
def __para_to_str(self, prop, required, name=""):
name = name or prop["name"]
ptype = prop["type"]
title = prop.get("title", "")
desc = prop.get("description", "")
return f'{name}\t{ptype}\t{"Yes" if required else "No"}\t{title}\t{desc}'
def _para_to_str(self, prop):
required = prop.get("required", False)
return self.__para_to_str(prop, required)
def para_to_str(self, name, prop, prop_object_required):
required = name in prop_object_required
return self.__para_to_str(prop, required, name)
def build_object_properties(self, node, prop_object_required, level: int = 0) -> str:
"""Recursively output properties of object and array[object] types
Args:
node (_type_): value of the child item
prop_object_required (_type_): whether it's a required field
level: current recursion depth
"""
doc = ""
def dive_into_object(node):
"""If it's an object type, recursively output its properties"""
if node.get("type") == "object":
sub_properties = node.get("properties", {})
return self.build_object_properties(sub_properties, prop_object_required, level=level + 1)
return ""
if node.get("in", "") in ["query", "header", "formData"]:
doc += f'{" " * level}{self._para_to_str(node)}\n'
doc += dive_into_object(node)
return doc
for name, prop in node.items():
if not isinstance(prop, dict):
doc += f'{" " * level}{self._para_to_str(node)}\n'
break
doc += f'{" " * level}{self.para_to_str(name, prop, prop_object_required)}\n'
doc += dive_into_object(prop)
if prop["type"] == "array":
items = prop.get("items", {})
doc += dive_into_object(items)
return doc
def get_tags_mapping(self) -> dict:
"""Process tag and path mappings
Returns:
Dict: mapping of tag to path
"""
swagger_data = self.get_swagger_json()
paths = swagger_data["paths"]
tags = {}
for path, path_obj in paths.items():
for method, method_obj in path_obj.items():
for tag in method_obj["tags"]:
if tag not in tags:
tags[tag] = {}
if path not in tags[tag]:
tags[tag][path] = {}
tags[tag][path][method] = method_obj
return tags
async def generate_ut(self, include_tags) -> bool:
"""Generate test case files"""
tags = self.get_tags_mapping()
for tag, paths in tags.items():
if include_tags is None or tag in include_tags:
await self._generate_ut(tag, paths)
return True
def build_api_doc(self, node: dict, path: str, method: str) -> str:
summary = node["summary"]
doc = f"API Name: {summary}\nAPI Path: {path}\nMethod: {method.upper()}\n"
doc += "\nRequest Parameters:\n"
if "parameters" in node:
parameters = node["parameters"]
doc += "Path Parameters:\n"
# param["in"]: path / formData / body / query / header
for param in parameters:
if param["in"] == "path":
doc += f'{param["name"]} \n'
doc += "\nBody Parameters:\n"
doc += "Name\tType\tRequired\tDefault Value\tRemarks\n"
for param in parameters:
if param["in"] == "body":
schema = param.get("schema", {})
prop_properties = schema.get("properties", {})
prop_required = schema.get("required", [])
doc += self.build_object_properties(prop_properties, prop_required)
else:
doc += self.build_object_properties(param, [])
# Display response data information
doc += "\nResponse Data:\n"
doc += "Name\tType\tRequired\tDefault Value\tRemarks\n"
responses = node["responses"]
response = responses.get("200", {})
schema = response.get("schema", {})
properties = schema.get("properties", {})
required = schema.get("required", {})
doc += self.build_object_properties(properties, required)
doc += "\n"
doc += "```"
return doc
async def ask_gpt_and_save(self, question: str, tag: str, fname: str):
"""Generate questions and store both questions and answers"""
messages = [self.icl_sample, question]
result = await self.gpt_msgs_to_code(messages=messages)
await awrite(Path(self.questions_path) / tag / f"{fname}.txt", question)
data = result.get("code", "") if result else ""
await awrite(Path(self.ut_py_path) / tag / f"{fname}.py", data)
async def _generate_ut(self, tag, paths):
"""Process the structure under a data path
Args:
tag (_type_): module name
paths (_type_): Path Object
"""
for path, path_obj in paths.items():
for method, node in path_obj.items():
summary = node["summary"]
question = self.template_prefix
question += self.build_api_doc(node, path, method)
await self.ask_gpt_and_save(question, tag, summary)
async def gpt_msgs_to_code(self, messages: list) -> str:
"""Choose based on different calling methods"""
result = ""
if self.chatgpt_method == "API":
result = await GPTAPI(config.get_openai_llm()).aask_code(messages=messages)
return result
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine.py | metagpt/tools/search_engine.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/6 20:15
@Author : alexanderwu
@File : search_engine.py
"""
import importlib
from typing import Annotated, Callable, Coroutine, Literal, Optional, Union, overload
from pydantic import BaseModel, ConfigDict, Field, model_validator
from metagpt.configs.search_config import SearchConfig
from metagpt.logs import logger
from metagpt.tools import SearchEngineType
class SearchEngine(BaseModel):
"""A model for configuring and executing searches with different search engines.
Attributes:
model_config: Configuration for the model allowing arbitrary types.
engine: The type of search engine to use.
run_func: An optional callable for running the search. If not provided, it will be determined based on the engine.
api_key: An optional API key for the search engine.
proxy: An optional proxy for the search engine requests.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
engine: SearchEngineType = SearchEngineType.SERPER_GOOGLE
run_func: Annotated[
Optional[Callable[[str, int, bool], Coroutine[None, None, Union[str, list[str]]]]], Field(exclude=True)
] = None
api_key: Optional[str] = None
proxy: Optional[str] = None
@model_validator(mode="after")
def validate_extra(self):
"""Validates extra fields provided to the model and updates the run function accordingly."""
data = self.model_dump(exclude={"engine"}, exclude_none=True, exclude_defaults=True)
if self.model_extra:
data.update(self.model_extra)
self._process_extra(**data)
return self
def _process_extra(
self,
run_func: Optional[Callable[[str, int, bool], Coroutine[None, None, Union[str, list[str]]]]] = None,
**kwargs,
):
"""Processes extra configuration and updates the run function based on the search engine type.
Args:
run_func: An optional callable for running the search. If not provided, it will be determined based on the engine.
"""
if self.engine == SearchEngineType.SERPAPI_GOOGLE:
module = "metagpt.tools.search_engine_serpapi"
run_func = importlib.import_module(module).SerpAPIWrapper(**kwargs).run
elif self.engine == SearchEngineType.SERPER_GOOGLE:
module = "metagpt.tools.search_engine_serper"
run_func = importlib.import_module(module).SerperWrapper(**kwargs).run
elif self.engine == SearchEngineType.DIRECT_GOOGLE:
module = "metagpt.tools.search_engine_googleapi"
run_func = importlib.import_module(module).GoogleAPIWrapper(**kwargs).run
elif self.engine == SearchEngineType.DUCK_DUCK_GO:
module = "metagpt.tools.search_engine_ddg"
run_func = importlib.import_module(module).DDGAPIWrapper(**kwargs).run
elif self.engine == SearchEngineType.CUSTOM_ENGINE:
run_func = self.run_func
elif self.engine == SearchEngineType.BING:
module = "metagpt.tools.search_engine_bing"
run_func = importlib.import_module(module).BingAPIWrapper(**kwargs).run
else:
raise NotImplementedError
self.run_func = run_func
@classmethod
def from_search_config(cls, config: SearchConfig, **kwargs):
"""Creates a SearchEngine instance from a SearchConfig.
Args:
config: The search configuration to use for creating the SearchEngine instance.
"""
data = config.model_dump(exclude={"api_type", "search_func"})
if config.search_func is not None:
data["run_func"] = config.search_func
return cls(engine=config.api_type, **data, **kwargs)
@classmethod
def from_search_func(
cls, search_func: Callable[[str, int, bool], Coroutine[None, None, Union[str, list[str]]]], **kwargs
):
"""Creates a SearchEngine instance from a custom search function.
Args:
search_func: A callable that executes the search.
"""
return cls(engine=SearchEngineType.CUSTOM_ENGINE, run_func=search_func, **kwargs)
@overload
def run(
self,
query: str,
max_results: int = 8,
as_string: Literal[True] = True,
) -> str:
...
@overload
def run(
self,
query: str,
max_results: int = 8,
as_string: Literal[False] = False,
) -> list[dict[str, str]]:
...
async def run(
self,
query: str,
max_results: int = 8,
as_string: bool = True,
ignore_errors: bool = False,
) -> Union[str, list[dict[str, str]]]:
"""Run a search query.
Args:
query: The search query.
max_results: The maximum number of results to return. Defaults to 8.
as_string: Whether to return the results as a string or a list of dictionaries. Defaults to True.
ignore_errors: Whether to ignore errors during the search. Defaults to False.
Returns:
The search results as a string or a list of dictionaries.
"""
try:
return await self.run_func(query, max_results=max_results, as_string=as_string)
except Exception as e:
# Handle errors in the API call
logger.exception(f"fail to search {query} for {e}")
if not ignore_errors:
raise e
return "" if as_string else []
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_meilisearch.py | metagpt/tools/search_engine_meilisearch.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/22 21:33
@Author : alexanderwu
@File : search_engine_meilisearch.py
"""
from typing import List
import meilisearch
from meilisearch.index import Index
from metagpt.utils.exceptions import handle_exception
class DataSource:
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class MeilisearchEngine:
def __init__(self, url, token):
self.client = meilisearch.Client(url, token)
self._index: Index = None
def set_index(self, index):
self._index = index
def add_documents(self, data_source: DataSource, documents: List[dict]):
index_name = f"{data_source.name}_index"
if index_name not in self.client.get_indexes():
self.client.create_index(uid=index_name, options={"primaryKey": "id"})
index = self.client.get_index(index_name)
index.add_documents(documents)
self.set_index(index)
@handle_exception(exception_type=Exception, default_return=[])
def search(self, query):
search_results = self._index.search(query)
return search_results["hits"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/prompt_writer.py | metagpt/tools/prompt_writer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 16:03
@Author : alexanderwu
@File : prompt_writer.py
"""
from typing import Union
class GPTPromptGenerator:
"""Using LLM, given an output, request LLM to provide input (supporting instruction, chatbot, and query styles)"""
def __init__(self):
self._generators = {i: getattr(self, f"gen_{i}_style") for i in ["instruction", "chatbot", "query"]}
def gen_instruction_style(self, example):
"""Instruction style: Given an output, request LLM to provide input"""
return f"""Instruction: X
Output: {example}
What kind of instruction might this output come from?
X:"""
def gen_chatbot_style(self, example):
"""Chatbot style: Given an output, request LLM to provide input"""
return f"""You are a chatbot. A user sent you an informal message, and you replied as follows.
Message: X
Reply: {example}
What could the informal message X be?
X:"""
def gen_query_style(self, example):
"""Query style: Given an output, request LLM to provide input"""
return f"""You are a search engine. Someone made a detailed query, and the most relevant document to this query is as follows.
Query: X
Document: {example} What is the detailed query X?
X:"""
def gen(self, example: str, style: str = "all") -> Union[list[str], str]:
"""
Generate one or multiple outputs using the example, allowing LLM to reply with the corresponding input
:param example: Expected LLM output sample
:param style: (all|instruction|chatbot|query)
:return: Expected LLM input sample (one or multiple)
"""
if style != "all":
return self._generators[style](example)
return [f(example) for f in self._generators.values()]
class WikiHowTemplate:
def __init__(self):
self._prompts = """Give me {step} steps to {question}.
How to {question}?
Do you know how can I {question}?
List {step} instructions to {question}.
What are some tips to {question}?
What are some steps to {question}?
Can you provide {step} clear and concise instructions on how to {question}?
I'm interested in learning how to {question}. Could you break it down into {step} easy-to-follow steps?
For someone who is new to {question}, what would be {step} key steps to get started?
What is the most efficient way to {question}? Could you provide a list of {step} steps?
Do you have any advice on how to {question} successfully? Maybe a step-by-step guide with {step} steps?
I'm trying to accomplish {question}. Could you walk me through the process with {step} detailed instructions?
What are the essential {step} steps to {question}?
I need to {question}, but I'm not sure where to start. Can you give me {step} actionable steps?
As a beginner in {question}, what are the {step} basic steps I should take?
I'm looking for a comprehensive guide on how to {question}. Can you provide {step} detailed steps?
Could you outline {step} practical steps to achieve {question}?
What are the {step} fundamental steps to consider when attempting to {question}?"""
def gen(self, question: str, step: str) -> list[str]:
return self._prompts.format(question=question, step=step).splitlines()
class EnronTemplate:
def __init__(self):
self._prompts = """Write an email with the subject "{subj}".
Can you craft an email with the subject {subj}?
Would you be able to compose an email and use {subj} as the subject?
Create an email about {subj}.
Draft an email and include the subject "{subj}".
Generate an email about {subj}.
Hey, can you shoot me an email about {subj}?
Do you mind crafting an email for me with {subj} as the subject?
Can you whip up an email with the subject of "{subj}"?
Hey, can you write an email and use "{subj}" as the subject?
Can you send me an email about {subj}?"""
def gen(self, subj):
return self._prompts.format(subj=subj).splitlines()
class BEAGECTemplate:
def __init__(self):
self._prompts = """Edit and revise this document to improve its grammar, vocabulary, spelling, and style.
Revise this document to correct all the errors related to grammar, spelling, and style.
Refine this document by eliminating all grammatical, lexical, and orthographic errors and improving its writing style.
Polish this document by rectifying all errors related to grammar, vocabulary, and writing style.
Enhance this document by correcting all the grammar errors and style issues, and improving its overall quality.
Rewrite this document by fixing all grammatical, lexical and orthographic errors.
Fix all grammar errors and style issues and rewrite this document.
Take a stab at fixing all the mistakes in this document and make it sound better.
Give this document a once-over and clean up any grammar or spelling errors.
Tweak this document to make it read smoother and fix any mistakes you see.
Make this document sound better by fixing all the grammar, spelling, and style issues.
Proofread this document and fix any errors that make it sound weird or confusing."""
def gen(self):
return self._prompts.splitlines()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_googleapi.py | metagpt/tools/search_engine_googleapi.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import asyncio
import json
import warnings
from concurrent import futures
from typing import Optional
from urllib.parse import urlparse
import httplib2
from pydantic import BaseModel, ConfigDict, model_validator
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
"To use this module, you should have the `google-api-python-client` Python package installed. "
"You can install it by running the command: `pip install -e.[search-google]`"
)
class GoogleAPIWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
api_key: str
cse_id: str
discovery_service_url: Optional[str] = None
loop: Optional[asyncio.AbstractEventLoop] = None
executor: Optional[futures.Executor] = None
proxy: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_google(cls, values: dict) -> dict:
if "google_api_key" in values:
values.setdefault("api_key", values["google_api_key"])
warnings.warn("`google_api_key` is deprecated, use `api_key` instead", DeprecationWarning, stacklevel=2)
if "api_key" not in values:
raise ValueError(
"To use google search engine, make sure you provide the `api_key` when constructing an object. You can obtain "
"an API key from https://console.cloud.google.com/apis/credentials."
)
if "google_cse_id" in values:
values.setdefault("cse_id", values["google_cse_id"])
warnings.warn("`google_cse_id` is deprecated, use `cse_id` instead", DeprecationWarning, stacklevel=2)
if "cse_id" not in values:
raise ValueError(
"To use google search engine, make sure you provide the `cse_id` when constructing an object. You can obtain "
"the cse_id from https://programmablesearchengine.google.com/controlpanel/create."
)
return values
@property
def google_api_client(self):
build_kwargs = {"developerKey": self.api_key, "discoveryServiceUrl": self.discovery_service_url}
if self.proxy:
parse_result = urlparse(self.proxy)
proxy_type = parse_result.scheme
if proxy_type == "https":
proxy_type = "http"
build_kwargs["http"] = httplib2.Http(
proxy_info=httplib2.ProxyInfo(
getattr(httplib2.socks, f"PROXY_TYPE_{proxy_type.upper()}"),
parse_result.hostname,
parse_result.port,
),
)
service = build("customsearch", "v1", **build_kwargs)
return service.cse()
async def run(
self,
query: str,
max_results: int = 8,
as_string: bool = True,
focus: list[str] | None = None,
) -> str | list[dict]:
"""Return the results of a Google search using the official Google API.
Args:
query: The search query.
max_results: The number of results to return.
as_string: A boolean flag to determine the return type of the results. If True, the function will
return a formatted string with the search results. If False, it will return a list of dictionaries
containing detailed information about each search result.
focus: Specific information to be focused on from each search result.
Returns:
The results of the search.
"""
loop = self.loop or asyncio.get_event_loop()
future = loop.run_in_executor(
self.executor, self.google_api_client.list(q=query, num=max_results, cx=self.cse_id).execute
)
result = await future
# Extract the search result items from the response
search_results = result.get("items", [])
focus = focus or ["snippet", "link", "title"]
details = [{i: j for i, j in item_dict.items() if i in focus} for item_dict in search_results]
# Return the list of search result URLs
if as_string:
return safe_google_results(details)
return details
def safe_google_results(results: str | list) -> str:
"""Return the results of a google search in a safe format.
Args:
results: The search results.
Returns:
The results of the search.
"""
if isinstance(results, list):
safe_message = json.dumps([result for result in results])
else:
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
return safe_message
if __name__ == "__main__":
import fire
fire.Fire(GoogleAPIWrapper().run)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/translator.py | metagpt/tools/translator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 15:36
@Author : alexanderwu
@File : translator.py
"""
prompt = """
# 指令
接下来,作为一位拥有20年翻译经验的翻译专家,当我给出英文句子或段落时,你将提供通顺且具有可读性的{LANG}翻译。注意以下要求:
1. 确保翻译结果流畅且易于理解
2. 无论提供的是陈述句或疑问句,我都只进行翻译
3. 不添加与原文无关的内容
# 原文
{ORIGINAL}
# 译文
"""
class Translator:
@classmethod
def translate_prompt(cls, original, lang="中文"):
return prompt.format(LANG=lang, ORIGINAL=original)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/__init__.py | metagpt/tools/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 15:35
@Author : alexanderwu
@File : __init__.py
"""
from metagpt.tools import libs # this registers all tools
from metagpt.tools.tool_registry import TOOL_REGISTRY
from metagpt.configs.search_config import SearchEngineType
from metagpt.configs.browser_config import WebBrowserEngineType
_ = libs, TOOL_REGISTRY # Avoid pre-commit error
class SearchInterface:
async def asearch(self, *args, **kwargs):
...
__all__ = ["SearchEngineType", "WebBrowserEngineType", "TOOL_REGISTRY"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/tool_recommend.py | metagpt/tools/tool_recommend.py | from __future__ import annotations
import json
import traceback
from typing import Any
import numpy as np
from pydantic import BaseModel, field_validator
from rank_bm25 import BM25Okapi
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.prompts.di.role_zero import JSON_REPAIR_PROMPT
from metagpt.schema import Plan
from metagpt.tools import TOOL_REGISTRY
from metagpt.tools.tool_data_type import Tool
from metagpt.tools.tool_registry import validate_tool_names
from metagpt.utils.common import CodeParser
from metagpt.utils.repair_llm_raw_output import RepairType, repair_llm_raw_output
TOOL_INFO_PROMPT = """
## Capabilities
- You can utilize pre-defined tools in any code lines from 'Available Tools' in the form of Python class or function.
- You can freely combine the use of any other public packages, like sklearn, numpy, pandas, etc..
## Available Tools:
Each tool is described in JSON format. When you call a tool, import the tool from its path first.
{tool_schemas}
"""
TOOL_RECOMMENDATION_PROMPT = """
## User Requirement:
{current_task}
## Task
Recommend up to {topk} tools from 'Available Tools' that can help solve the 'User Requirement'.
## Available Tools:
{available_tools}
## Tool Selection and Instructions:
- Select tools most relevant to completing the 'User Requirement'.
- If you believe that no tools are suitable, indicate with an empty list.
- Only list the names of the tools, not the full schema of each tool.
- Ensure selected tools are listed in 'Available Tools'.
- Output a json list of tool names:
```json
["tool_name1", "tool_name2", ...]
```
"""
class ToolRecommender(BaseModel):
"""
The default ToolRecommender:
1. Recall: To be implemented in subclasses. Recall tools based on the given context and plan.
2. Rank: Use LLM to select final candidates from recalled set.
"""
tools: dict[str, Tool] = {}
force: bool = False # whether to forcedly recommend the specified tools
@field_validator("tools", mode="before")
@classmethod
def validate_tools(cls, v: list[str]) -> dict[str, Tool]:
# If `v` is already a dictionary (e.g., during deserialization), return it as is.
if isinstance(v, dict):
return v
# One can use special symbol ["<all>"] to indicate use of all registered tools
if v == ["<all>"]:
return TOOL_REGISTRY.get_all_tools()
else:
return validate_tool_names(v)
async def recommend_tools(
self, context: str = "", plan: Plan = None, recall_topk: int = 20, topk: int = 5
) -> list[Tool]:
"""
Recommends a list of tools based on the given context and plan. The recommendation process includes two stages: recall from a large pool and rank the recalled tools to select the final set.
Args:
context (str): The context for tool recommendation.
plan (Plan): The plan for tool recommendation.
recall_topk (int): The number of tools to recall in the initial step.
topk (int): The number of tools to return after rank as final recommendations.
Returns:
list[Tool]: A list of recommended tools.
"""
if not self.tools:
return []
if self.force or (not context and not plan):
# directly use what users have specified as result for forced recommendation;
# directly use the whole set if there is no useful information
return list(self.tools.values())
recalled_tools = await self.recall_tools(context=context, plan=plan, topk=recall_topk)
if not recalled_tools:
return []
ranked_tools = await self.rank_tools(recalled_tools=recalled_tools, context=context, plan=plan, topk=topk)
logger.info(f"Recommended tools: \n{[tool.name for tool in ranked_tools]}")
return ranked_tools
async def get_recommended_tool_info(self, fixed: list[str] = None, **kwargs) -> str:
"""
Wrap recommended tools with their info in a string, which can be used directly in a prompt.
"""
recommended_tools = await self.recommend_tools(**kwargs)
if fixed:
recommended_tools.extend([self.tools[tool_name] for tool_name in fixed if tool_name in self.tools])
if not recommended_tools:
return ""
tool_schemas = {tool.name: tool.schemas for tool in recommended_tools}
return TOOL_INFO_PROMPT.format(tool_schemas=tool_schemas)
async def recall_tools(self, context: str = "", plan: Plan = None, topk: int = 20) -> list[Tool]:
"""
Retrieves a list of relevant tools from a large pool, based on the given context and plan.
"""
raise NotImplementedError
async def rank_tools(
self, recalled_tools: list[Tool], context: str = "", plan: Plan = None, topk: int = 5
) -> list[Tool]:
"""
Default rank methods for a ToolRecommender. Use LLM to rank the recalled tools based on the given context, plan, and topk value.
"""
current_task = plan.current_task.instruction if plan else context
available_tools = {tool.name: tool.schemas["description"] for tool in recalled_tools}
prompt = TOOL_RECOMMENDATION_PROMPT.format(
current_task=current_task,
available_tools=available_tools,
topk=topk,
)
rsp = await LLM().aask(prompt, stream=False)
# 临时方案,待role zero的版本完成可将本注释内的代码直接替换掉
# -------------开始---------------
try:
ranked_tools = CodeParser.parse_code(block=None, lang="json", text=rsp)
ranked_tools = json.loads(
repair_llm_raw_output(output=ranked_tools, req_keys=[None], repair_type=RepairType.JSON)
)
except json.JSONDecodeError:
ranked_tools = await LLM().aask(msg=JSON_REPAIR_PROMPT.format(json_data=rsp))
ranked_tools = json.loads(CodeParser.parse_code(block=None, lang="json", text=ranked_tools))
except Exception:
tb = traceback.format_exc()
print(tb)
# 为了对LLM不按格式生成进行容错
if isinstance(ranked_tools, dict):
ranked_tools = list(ranked_tools.values())[0]
# -------------结束---------------
if not isinstance(ranked_tools, list):
logger.warning(f"Invalid rank result: {ranked_tools}, will use the recalled tools instead.")
ranked_tools = list(available_tools.keys())
valid_tools = validate_tool_names(ranked_tools)
return list(valid_tools.values())[:topk]
class TypeMatchToolRecommender(ToolRecommender):
"""
A legacy ToolRecommender using task type matching at the recall stage:
1. Recall: Find tools based on exact match between task type and tool tag;
2. Rank: LLM rank, the same as the default ToolRecommender.
"""
async def recall_tools(self, context: str = "", plan: Plan = None, topk: int = 20) -> list[Tool]:
if not plan:
return list(self.tools.values())[:topk]
# find tools based on exact match between task type and tool tag
task_type = plan.current_task.task_type
candidate_tools = TOOL_REGISTRY.get_tools_by_tag(task_type)
candidate_tool_names = set(self.tools.keys()) & candidate_tools.keys()
recalled_tools = [candidate_tools[tool_name] for tool_name in candidate_tool_names][:topk]
logger.info(f"Recalled tools: \n{[tool.name for tool in recalled_tools]}")
return recalled_tools
class BM25ToolRecommender(ToolRecommender):
"""
A ToolRecommender using BM25 at the recall stage:
1. Recall: Querying tool descriptions with task instruction if plan exists. Otherwise, return all user-specified tools;
2. Rank: LLM rank, the same as the default ToolRecommender.
"""
bm25: Any = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._init_corpus()
def _init_corpus(self):
corpus = [f"{tool.name} {tool.tags}: {tool.schemas['description']}" for tool in self.tools.values()]
tokenized_corpus = [self._tokenize(doc) for doc in corpus]
self.bm25 = BM25Okapi(tokenized_corpus)
def _tokenize(self, text):
return text.split() # FIXME: needs more sophisticated tokenization
async def recall_tools(self, context: str = "", plan: Plan = None, topk: int = 20) -> list[Tool]:
query = plan.current_task.instruction if plan else context
query_tokens = self._tokenize(query)
doc_scores = self.bm25.get_scores(query_tokens)
top_indexes = np.argsort(doc_scores)[::-1][:topk]
recalled_tools = [list(self.tools.values())[index] for index in top_indexes]
logger.info(
f"Recalled tools: \n{[tool.name for tool in recalled_tools]}; Scores: {[np.round(doc_scores[index], 4) for index in top_indexes]}"
)
return recalled_tools
class EmbeddingToolRecommender(ToolRecommender):
"""
NOTE: To be implemented.
A ToolRecommender using embeddings at the recall stage:
1. Recall: Use embeddings to calculate the similarity between query and tool info;
2. Rank: LLM rank, the same as the default ToolRecommender.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def recall_tools(self, context: str = "", plan: Plan = None, topk: int = 20) -> list[Tool]:
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_bing.py | metagpt/tools/search_engine_bing.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
import warnings
from typing import Optional
import aiohttp
from pydantic import BaseModel, ConfigDict, model_validator
class BingAPIWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
api_key: str
bing_url: str = "https://api.bing.microsoft.com/v7.0/search"
aiosession: Optional[aiohttp.ClientSession] = None
proxy: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_api_key(cls, values: dict) -> dict:
if "api_key" in values:
values.setdefault("api_key", values["api_key"])
warnings.warn("`api_key` is deprecated, use `api_key` instead", DeprecationWarning, stacklevel=2)
return values
@property
def header(self):
return {"Ocp-Apim-Subscription-Key": self.api_key}
async def run(
self,
query: str,
max_results: int = 8,
as_string: bool = True,
focus: list[str] | None = None,
) -> str | list[dict]:
"""Return the results of a Google search using the official Bing API.
Args:
query: The search query.
max_results: The number of results to return.
as_string: A boolean flag to determine the return type of the results. If True, the function will
return a formatted string with the search results. If False, it will return a list of dictionaries
containing detailed information about each search result.
focus: Specific information to be focused on from each search result.
Returns:
The results of the search.
"""
params = {
"q": query,
"count": max_results,
"textFormat": "HTML",
}
result = await self.results(params)
search_results = result["webPages"]["value"]
focus = focus or ["snippet", "link", "title"]
for item_dict in search_results:
item_dict["link"] = item_dict["url"]
item_dict["title"] = item_dict["name"]
details = [{i: j for i, j in item_dict.items() if i in focus} for item_dict in search_results]
if as_string:
return safe_results(details)
return details
async def results(self, params: dict) -> dict:
"""Use aiohttp to run query and return the results async."""
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(self.bing_url, params=params, headers=self.header, proxy=self.proxy) as response:
response.raise_for_status()
res = await response.json()
else:
async with self.aiosession.get(
self.bing_url, params=params, headers=self.header, proxy=self.proxy
) as response:
response.raise_for_status()
res = await response.json()
return res
def safe_results(results: str | list) -> str:
"""Return the results of a bing search in a safe format.
Args:
results: The search results.
Returns:
The results of the search.
"""
if isinstance(results, list):
safe_message = json.dumps([result for result in results])
else:
safe_message = results.encode("utf-8", "ignore").decode("utf-8")
return safe_message
if __name__ == "__main__":
import fire
fire.Fire(BingAPIWrapper().run)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/openapi_v3_hello.py | metagpt/tools/openapi_v3_hello.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/2 16:03
@Author : mashenquan
@File : openapi_v3_hello.py
@Desc : Implement the OpenAPI Specification 3.0 demo and use the following command to test the HTTP service:
curl -X 'POST' \
'http://localhost:8082/openapi/greeting/dave' \
-H 'accept: text/plain' \
-H 'Content-Type: application/json' \
-d '{}'
"""
from pathlib import Path
import connexion
# openapi implement
async def post_greeting(name: str) -> str:
return f"Hello {name}\n"
if __name__ == "__main__":
specification_dir = Path(__file__).parent.parent.parent / "docs/.well-known"
app = connexion.AsyncApp(__name__, specification_dir=str(specification_dir))
app.add_api("openapi.yaml", arguments={"title": "Hello World Example"})
app.run(port=8082)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/search_engine_serper.py | metagpt/tools/search_engine_serper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/23 18:27
@Author : alexanderwu
@File : search_engine_serpapi.py
"""
import json
import warnings
from typing import Any, Dict, Optional
import aiohttp
from pydantic import BaseModel, ConfigDict, Field, model_validator
class SerperWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
api_key: str
url: str = "https://google.serper.dev/search"
payload: dict = Field(default_factory=lambda: {"page": 1, "num": 10})
aiosession: Optional[aiohttp.ClientSession] = None
proxy: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_serper(cls, values: dict) -> dict:
if "serper_api_key" in values:
values.setdefault("api_key", values["serper_api_key"])
warnings.warn("`serper_api_key` is deprecated, use `api_key` instead", DeprecationWarning, stacklevel=2)
if "api_key" not in values:
raise ValueError(
"To use serper search engine, make sure you provide the `api_key` when constructing an object. You can obtain "
"an API key from https://serper.dev/."
)
return values
async def run(self, query: str, max_results: int = 8, as_string: bool = True, **kwargs: Any) -> str:
"""Run query through Serper and parse result async."""
if isinstance(query, str):
return self._process_response((await self.results([query], max_results))[0], as_string=as_string)
else:
results = [self._process_response(res, as_string) for res in await self.results(query, max_results)]
return "\n".join(results) if as_string else results
async def results(self, queries: list[str], max_results: int = 8) -> dict:
"""Use aiohttp to run query through Serper and return the results async."""
payloads = self.get_payloads(queries, max_results)
headers = self.get_headers()
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(self.url, data=payloads, headers=headers, proxy=self.proxy) as response:
response.raise_for_status()
res = await response.json()
else:
async with self.aiosession.post(self.url, data=payloads, headers=headers, proxy=self.proxy) as response:
response.raise_for_status()
res = await response.json()
return res
def get_payloads(self, queries: list[str], max_results: int) -> Dict[str, str]:
"""Get payloads for Serper."""
payloads = []
for query in queries:
_payload = {
"q": query,
"num": max_results,
}
payloads.append({**self.payload, **_payload})
return json.dumps(payloads, sort_keys=True)
def get_headers(self) -> Dict[str, str]:
headers = {"X-API-KEY": self.api_key, "Content-Type": "application/json"}
return headers
@staticmethod
def _process_response(res: dict, as_string: bool = False) -> str:
"""Process response from SerpAPI."""
# logger.debug(res)
focus = ["title", "snippet", "link"]
def get_focused(x):
return {i: j for i, j in x.items() if i in focus}
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]
elif "answer_box" in res.keys() and "snippet_highlighted_words" in res["answer_box"].keys():
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif "sports_results" in res.keys() and "game_spotlight" in res["sports_results"].keys():
toret = res["sports_results"]["game_spotlight"]
elif "knowledge_graph" in res.keys() and "description" in res["knowledge_graph"].keys():
toret = res["knowledge_graph"]["description"]
elif "snippet" in res["organic"][0].keys():
toret = res["organic"][0]["snippet"]
else:
toret = "No good search result found"
toret_l = []
if "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret_l += [get_focused(res["answer_box"])]
if res.get("organic"):
toret_l += [get_focused(i) for i in res.get("organic")]
return str(toret) + "\n" + str(toret_l) if as_string else toret_l
if __name__ == "__main__":
import fire
fire.Fire(SerperWrapper().run)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/tool_convert.py | metagpt/tools/tool_convert.py | import ast
import inspect
from metagpt.utils.parse_docstring import GoogleDocstringParser, remove_spaces
PARSER = GoogleDocstringParser
def convert_code_to_tool_schema(obj, include: list[str] = None) -> dict:
"""Converts an object (function or class) to a tool schema by inspecting the object"""
docstring = inspect.getdoc(obj)
# assert docstring, "no docstring found for the objects, skip registering"
if inspect.isclass(obj):
schema = {"type": "class", "description": remove_spaces(docstring), "methods": {}}
for name, method in inspect.getmembers(obj, inspect.isfunction):
if name.startswith("_") and name != "__init__": # skip private methodss
continue
if include and name not in include:
continue
# method_doc = inspect.getdoc(method)
method_doc = get_class_method_docstring(obj, name)
schema["methods"][name] = function_docstring_to_schema(method, method_doc)
elif inspect.isfunction(obj):
schema = function_docstring_to_schema(obj, docstring)
return schema
def convert_code_to_tool_schema_ast(code: str) -> list[dict]:
"""Converts a code string to a list of tool schemas by parsing the code with AST"""
visitor = CodeVisitor(code)
parsed_code = ast.parse(code)
visitor.visit(parsed_code)
return visitor.get_tool_schemas()
def function_docstring_to_schema(fn_obj, docstring="") -> dict:
"""
Converts a function's docstring into a schema dictionary.
Args:
fn_obj: The function object.
docstring: The docstring of the function.
Returns:
A dictionary representing the schema of the function's docstring.
The dictionary contains the following keys:
- 'type': The type of the function ('function' or 'async_function').
- 'description': The first section of the docstring describing the function overall. Provided to LLMs for both recommending and using the function.
- 'signature': The signature of the function, which helps LLMs understand how to call the function.
- 'parameters': Docstring section describing parameters including args and returns, served as extra details for LLM perception.
"""
signature = inspect.signature(fn_obj)
docstring = remove_spaces(docstring)
overall_desc, param_desc = PARSER.parse(docstring)
function_type = "function" if not inspect.iscoroutinefunction(fn_obj) else "async_function"
return {"type": function_type, "description": overall_desc, "signature": str(signature), "parameters": param_desc}
def get_class_method_docstring(cls, method_name):
"""Retrieve a method's docstring, searching the class hierarchy if necessary."""
for base_class in cls.__mro__:
if method_name in base_class.__dict__:
method = base_class.__dict__[method_name]
if method.__doc__:
return method.__doc__
return None # No docstring found in the class hierarchy
class CodeVisitor(ast.NodeVisitor):
"""Visit and convert the AST nodes within a code file to tool schemas"""
def __init__(self, source_code: str):
self.tool_schemas = {} # {tool_name: tool_schema}
self.source_code = source_code
def visit_ClassDef(self, node):
class_schemas = {"type": "class", "description": remove_spaces(ast.get_docstring(node)), "methods": {}}
for body_node in node.body:
if isinstance(body_node, (ast.FunctionDef, ast.AsyncFunctionDef)) and (
not body_node.name.startswith("_") or body_node.name == "__init__"
):
func_schemas = self._get_function_schemas(body_node)
class_schemas["methods"].update({body_node.name: func_schemas})
class_schemas["code"] = ast.get_source_segment(self.source_code, node)
self.tool_schemas[node.name] = class_schemas
def visit_FunctionDef(self, node):
self._visit_function(node)
def visit_AsyncFunctionDef(self, node):
self._visit_function(node)
def _visit_function(self, node):
if node.name.startswith("_"):
return
function_schemas = self._get_function_schemas(node)
function_schemas["code"] = ast.get_source_segment(self.source_code, node)
self.tool_schemas[node.name] = function_schemas
def _get_function_schemas(self, node):
docstring = remove_spaces(ast.get_docstring(node))
overall_desc, param_desc = PARSER.parse(docstring)
return {
"type": "async_function" if isinstance(node, ast.AsyncFunctionDef) else "function",
"description": overall_desc,
"signature": self._get_function_signature(node),
"parameters": param_desc,
}
def _get_function_signature(self, node):
args = []
defaults = dict(zip([arg.arg for arg in node.args.args][-len(node.args.defaults) :], node.args.defaults))
for arg in node.args.args:
arg_str = arg.arg
if arg.annotation:
annotation = ast.unparse(arg.annotation)
arg_str += f": {annotation}"
if arg.arg in defaults:
default_value = ast.unparse(defaults[arg.arg])
arg_str += f" = {default_value}"
args.append(arg_str)
return_annotation = ""
if node.returns:
return_annotation = f" -> {ast.unparse(node.returns)}"
return f"({', '.join(args)}){return_annotation}"
def get_tool_schemas(self):
return self.tool_schemas
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/azure_tts.py | metagpt/tools/azure_tts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/9 22:22
@Author : Leo Xiao
@File : azure_tts.py
@Modified by: mashenquan, 2023/8/17. Azure TTS OAS3 api, which provides text-to-speech functionality
"""
import base64
from pathlib import Path
from uuid import uuid4
import aiofiles
from azure.cognitiveservices.speech import AudioConfig, SpeechConfig, SpeechSynthesizer
from metagpt.logs import logger
class AzureTTS:
"""Azure Text-to-Speech"""
def __init__(self, subscription_key, region):
"""
:param subscription_key: key is used to access your Azure AI service API, see: `https://portal.azure.com/` > `Resource Management` > `Keys and Endpoint`
:param region: This is the location (or region) of your resource. You may need to use this field when making calls to this API.
"""
self.subscription_key = subscription_key
self.region = region
# 参数参考:https://learn.microsoft.com/zh-cn/azure/cognitive-services/speech-service/language-support?tabs=tts#voice-styles-and-roles
async def synthesize_speech(self, lang, voice, text, output_file):
speech_config = SpeechConfig(subscription=self.subscription_key, region=self.region)
speech_config.speech_synthesis_voice_name = voice
audio_config = AudioConfig(filename=output_file)
synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
# More detail: https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice
ssml_string = (
"<speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' "
f"xml:lang='{lang}' xmlns:mstts='http://www.w3.org/2001/mstts'>"
f"<voice name='{voice}'>{text}</voice></speak>"
)
return synthesizer.speak_ssml_async(ssml_string).get()
@staticmethod
def role_style_text(role, style, text):
return f'<mstts:express-as role="{role}" style="{style}">{text}</mstts:express-as>'
@staticmethod
def role_text(role, text):
return f'<mstts:express-as role="{role}">{text}</mstts:express-as>'
@staticmethod
def style_text(style, text):
return f'<mstts:express-as style="{style}">{text}</mstts:express-as>'
# Export
async def oas3_azsure_tts(text, lang="", voice="", style="", role="", subscription_key="", region=""):
"""Text to speech
For more details, check out:`https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`
:param lang: The value can contain a language code such as en (English), or a locale such as en-US (English - United States). For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`
:param voice: For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`, `https://speech.microsoft.com/portal/voicegallery`
:param style: Speaking style to express different emotions like cheerfulness, empathy, and calm. For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`
:param role: With roles, the same voice can act as a different age and gender. For more details, checkout: `https://learn.microsoft.com/en-us/azure/ai-services/speech-service/language-support?tabs=tts`
:param text: The text used for voice conversion.
:param subscription_key: key is used to access your Azure AI service API, see: `https://portal.azure.com/` > `Resource Management` > `Keys and Endpoint`
:param region: This is the location (or region) of your resource. You may need to use this field when making calls to this API.
:return: Returns the Base64-encoded .wav file data if successful, otherwise an empty string.
"""
if not text:
return ""
if not lang:
lang = "zh-CN"
if not voice:
voice = "zh-CN-XiaomoNeural"
if not role:
role = "Girl"
if not style:
style = "affectionate"
xml_value = AzureTTS.role_style_text(role=role, style=style, text=text)
tts = AzureTTS(subscription_key=subscription_key, region=region)
filename = Path(__file__).resolve().parent / (str(uuid4()).replace("-", "") + ".wav")
try:
await tts.synthesize_speech(lang=lang, voice=voice, text=xml_value, output_file=str(filename))
async with aiofiles.open(filename, mode="rb") as reader:
data = await reader.read()
base64_string = base64.b64encode(data).decode("utf-8")
except Exception as e:
logger.error(f"text:{text}, error:{e}")
return ""
finally:
filename.unlink(missing_ok=True)
return base64_string
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/web_browser_engine.py | metagpt/tools/web_browser_engine.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import importlib
from typing import Annotated, Any, Callable, Coroutine, Optional, Union, overload
from pydantic import BaseModel, ConfigDict, Field, model_validator
from metagpt.configs.browser_config import BrowserConfig
from metagpt.tools import WebBrowserEngineType
from metagpt.utils.parse_html import WebPage
class WebBrowserEngine(BaseModel):
"""Defines a web browser engine configuration for automated browsing and data extraction.
This class encapsulates the configuration and operational logic for different web browser engines,
such as Playwright, Selenium, or custom implementations. It provides a unified interface to run
browser automation tasks.
Attributes:
model_config: Configuration dictionary allowing arbitrary types and extra fields.
engine: The type of web browser engine to use.
run_func: An optional coroutine function to run the browser engine.
proxy: An optional proxy server URL to use with the browser engine.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
engine: WebBrowserEngineType = WebBrowserEngineType.PLAYWRIGHT
run_func: Annotated[
Optional[Callable[..., Coroutine[Any, Any, Union[WebPage, list[WebPage]]]]],
Field(exclude=True),
] = None
proxy: Optional[str] = None
@model_validator(mode="after")
def validate_extra(self):
"""Validates and processes extra configuration data after model initialization.
This method is automatically called by Pydantic to validate and process any extra configuration
data provided to the model. It ensures that the extra data is properly integrated into the model's
configuration and operational logic.
Returns:
The instance itself after processing the extra data.
"""
data = self.model_dump(exclude={"engine"}, exclude_none=True, exclude_defaults=True)
if self.model_extra:
data.update(self.model_extra)
self._process_extra(**data)
return self
def _process_extra(self, **kwargs):
"""Processes extra configuration data to set up the browser engine run function.
Depending on the specified engine type, this method dynamically imports and configures
the appropriate browser engine wrapper and its run function.
Args:
**kwargs: Arbitrary keyword arguments representing extra configuration data.
Raises:
NotImplementedError: If the engine type is not supported.
"""
if self.engine is WebBrowserEngineType.PLAYWRIGHT:
module = "metagpt.tools.web_browser_engine_playwright"
run_func = importlib.import_module(module).PlaywrightWrapper(**kwargs).run
elif self.engine is WebBrowserEngineType.SELENIUM:
module = "metagpt.tools.web_browser_engine_selenium"
run_func = importlib.import_module(module).SeleniumWrapper(**kwargs).run
elif self.engine is WebBrowserEngineType.CUSTOM:
run_func = self.run_func
else:
raise NotImplementedError
self.run_func = run_func
@classmethod
def from_browser_config(cls, config: BrowserConfig, **kwargs):
"""Creates a WebBrowserEngine instance from a BrowserConfig object and additional keyword arguments.
This class method facilitates the creation of a WebBrowserEngine instance by extracting
configuration data from a BrowserConfig object and optionally merging it with additional
keyword arguments.
Args:
config: A BrowserConfig object containing base configuration data.
**kwargs: Optional additional keyword arguments to override or extend the configuration.
Returns:
A new instance of WebBrowserEngine configured according to the provided arguments.
"""
data = config.model_dump()
return cls(**data, **kwargs)
@overload
async def run(self, url: str, per_page_timeout: float = None) -> WebPage:
...
@overload
async def run(self, url: str, *urls: str, per_page_timeout: float = None) -> list[WebPage]:
...
async def run(self, url: str, *urls: str, per_page_timeout: float = None) -> WebPage | list[WebPage]:
"""Runs the browser engine to load one or more web pages.
This method is the implementation of the overloaded run signatures. It delegates the task
of loading web pages to the configured run function, handling either a single URL or multiple URLs.
Args:
url: The URL of the first web page to load.
*urls: Additional URLs of web pages to load, if any.
per_page_timeout: The maximum time for fetching a single page in seconds.
Returns:
A WebPage object if a single URL is provided, or a list of WebPage objects if multiple URLs are provided.
"""
return await self.run_func(url, *urls, per_page_timeout=per_page_timeout)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/iflytek_tts.py | metagpt/tools/iflytek_tts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/17
@Author : mashenquan
@File : iflytek_tts.py
@Desc : iFLYTEK TTS OAS3 api, which provides text-to-speech functionality
"""
import base64
import hashlib
import hmac
import json
import uuid
from datetime import datetime
from enum import Enum
from pathlib import Path
from time import mktime
from typing import Optional
from urllib.parse import urlencode
from wsgiref.handlers import format_date_time
import aiofiles
import websockets as websockets
from pydantic import BaseModel
from metagpt.logs import logger
class IFlyTekTTSStatus(Enum):
STATUS_FIRST_FRAME = 0 # The first frame
STATUS_CONTINUE_FRAME = 1 # The intermediate frame
STATUS_LAST_FRAME = 2 # The last frame
class AudioData(BaseModel):
audio: str
status: int
ced: str
class IFlyTekTTSResponse(BaseModel):
code: int
message: str
data: Optional[AudioData] = None
sid: str
DEFAULT_IFLYTEK_VOICE = "xiaoyan"
class IFlyTekTTS(object):
def __init__(self, app_id: str, api_key: str, api_secret: str):
"""
:param app_id: Application ID is used to access your iFlyTek service API, see: `https://console.xfyun.cn/services/tts`
:param api_key: WebAPI argument, see: `https://console.xfyun.cn/services/tts`
:param api_secret: WebAPI argument, see: `https://console.xfyun.cn/services/tts`
"""
self.app_id = app_id
self.api_key = api_key
self.api_secret = api_secret
async def synthesize_speech(self, text, output_file: str, voice=DEFAULT_IFLYTEK_VOICE):
url = self._create_url()
data = {
"common": {"app_id": self.app_id},
"business": {"aue": "lame", "sfl": 1, "auf": "audio/L16;rate=16000", "vcn": voice, "tte": "utf8"},
"data": {"status": 2, "text": str(base64.b64encode(text.encode("utf-8")), "UTF8")},
}
req = json.dumps(data)
async with websockets.connect(url) as websocket:
# send request
await websocket.send(req)
# receive frames
async with aiofiles.open(str(output_file), "wb") as writer:
while True:
v = await websocket.recv()
rsp = IFlyTekTTSResponse(**json.loads(v))
if rsp.data:
binary_data = base64.b64decode(rsp.data.audio)
await writer.write(binary_data)
if rsp.data.status != IFlyTekTTSStatus.STATUS_LAST_FRAME.value:
continue
break
def _create_url(self):
"""Create request url"""
url = "wss://tts-api.xfyun.cn/v2/tts"
# Generate a timestamp in RFC1123 format
now = datetime.now()
date = format_date_time(mktime(now.timetuple()))
signature_origin = "host: " + "ws-api.xfyun.cn" + "\n"
signature_origin += "date: " + date + "\n"
signature_origin += "GET " + "/v2/tts " + "HTTP/1.1"
# Perform HMAC-SHA256 encryption
signature_sha = hmac.new(
self.api_secret.encode("utf-8"), signature_origin.encode("utf-8"), digestmod=hashlib.sha256
).digest()
signature_sha = base64.b64encode(signature_sha).decode(encoding="utf-8")
authorization_origin = 'api_key="%s", algorithm="%s", headers="%s", signature="%s"' % (
self.api_key,
"hmac-sha256",
"host date request-line",
signature_sha,
)
authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8")
# Combine the authentication parameters of the request into a dictionary.
v = {"authorization": authorization, "date": date, "host": "ws-api.xfyun.cn"}
# Concatenate the authentication parameters to generate the URL.
url = url + "?" + urlencode(v)
return url
# Export
async def oas3_iflytek_tts(text: str, voice: str = "", app_id: str = "", api_key: str = "", api_secret: str = ""):
"""Text to speech
For more details, check out:`https://www.xfyun.cn/doc/tts/online_tts/API.html`
:param voice: Default `xiaoyan`. For more details, checkout: `https://www.xfyun.cn/doc/tts/online_tts/API.html#%E6%8E%A5%E5%8F%A3%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B`
:param text: The text used for voice conversion.
:param app_id: Application ID is used to access your iFlyTek service API, see: `https://console.xfyun.cn/services/tts`
:param api_key: WebAPI argument, see: `https://console.xfyun.cn/services/tts`
:param api_secret: WebAPI argument, see: `https://console.xfyun.cn/services/tts`
:return: Returns the Base64-encoded .mp3 file data if successful, otherwise an empty string.
"""
filename = Path(__file__).parent / (uuid.uuid4().hex + ".mp3")
try:
tts = IFlyTekTTS(app_id=app_id, api_key=api_key, api_secret=api_secret)
await tts.synthesize_speech(text=text, output_file=str(filename), voice=voice)
async with aiofiles.open(str(filename), mode="rb") as reader:
data = await reader.read()
base64_string = base64.b64encode(data).decode("utf-8")
except Exception as e:
logger.error(f"text:{text}, error:{e}")
base64_string = ""
finally:
filename.unlink(missing_ok=True)
return base64_string
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/swe_agent_commands/_split_string.py | metagpt/tools/swe_agent_commands/_split_string.py | #!/usr/bin/env python3
from __future__ import annotations
import sys
def print_flake8_output(input_string, show_line_numbers=False):
for value in input_string.split("\n"):
parts = value.split()
if not show_line_numbers:
print(f"- {' '.join(parts[1:])}")
else:
line_nums = ":".join(parts[0].split(":")[1:])
print(f"- {line_nums} {' '.join(parts[1:])}")
if __name__ == "__main__":
lint_output = sys.argv[1]
print_flake8_output(lint_output)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/swe_agent_commands/swe_agent_utils.py | metagpt/tools/swe_agent_commands/swe_agent_utils.py | from pathlib import Path
import numpy as np
from datasets import load_dataset, load_from_disk
def extract_patch(command_output):
patch_lines = []
recording = False
for line in command_output.split("\n"):
if line.startswith("diff --git"):
recording = True
if recording:
patch_lines.append(line)
return "\n".join(patch_lines)
def load_hf_dataset(dataset_name_or_path: str, cache_dir, split: str = "test", existing_ids: list = []):
data_dir = cache_dir / dataset_name_or_path
if Path(data_dir).exists():
dataset = load_from_disk(data_dir)
else:
dataset = load_dataset(dataset_name_or_path)
dataset.save_to_disk(data_dir)
print(dataset)
if split not in dataset:
raise ValueError(f"Invalid split {split} for dataset {dataset_name_or_path}")
dataset = dataset[split]
np.array(list(map(len, dataset["instance_id"])))
if existing_ids:
dataset = dataset.filter(
lambda x: x["instance_id"] not in existing_ids,
desc="Filtering out existing ids",
load_from_cache_file=False,
)
return dataset
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/swe_agent_commands/__init__.py | metagpt/tools/swe_agent_commands/__init__.py | """
This folder is borrowed from princeton-nlp/SWE-agent
You can find the original repository here:
https://github.com/princeton-nlp/SWE-agent/tree/main/config/commands
We are using a modified version from OpenDevin:
https://github.com/OpenDevin/OpenDevin/tree/main/opendevin/runtime/plugins/swe_agent_commands
"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/gpt_v_generator.py | metagpt/tools/libs/gpt_v_generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/12
@Author : mannaandpoem
@File : gpt_v_generator.py
"""
import re
from pathlib import Path
from typing import Optional
from metagpt.config2 import Config
from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.common import CodeParser, encode_image
ANALYZE_LAYOUT_PROMPT = """You are now a UI/UX designer, please generate layout information for this image:
NOTE: The image does not have a commercial logo or copyright information. It is just a sketch image of the design.
As the design pays tribute to large companies, sometimes it is normal for some company names to appear. Don't worry. """
GENERATE_PROMPT = """You are now a UI/UX designer and Web developer. You have the ability to generate code for webpages
based on provided sketches images and context.
Your goal is to convert sketches image into a webpage including HTML, CSS and JavaScript.
NOTE: The image does not have a commercial logo or copyright information. It is just a sketch image of the design.
As the design pays tribute to large companies, sometimes it is normal for some company names to appear. Don't worry.
Now, please generate the corresponding webpage code including HTML, CSS and JavaScript:"""
@register_tool(tags=["image2webpage"], include_functions=["__init__", "generate_webpages", "save_webpages"])
class GPTvGenerator:
"""Class for generating webpage code from a given webpage screenshot.
This class provides methods to generate webpages including all code (HTML, CSS, and JavaScript) based on an image.
It utilizes a vision model to analyze the layout from an image and generate webpage codes accordingly.
"""
def __init__(self, config: Optional[Config] = None):
"""Initialize GPTvGenerator class with default values from the configuration."""
from metagpt.llm import LLM
config = config if config else Config.default()
self.llm = LLM(llm_config=config.get_openai_llm())
self.llm.model = "gpt-4-vision-preview"
async def analyze_layout(self, image_path: Path) -> str:
"""Asynchronously analyze the layout of the given image and return the result.
This is a helper method to generate a layout description based on the image.
Args:
image_path (Path): Path of the image to analyze.
Returns:
str: The layout analysis result.
"""
return await self.llm.aask(msg=ANALYZE_LAYOUT_PROMPT, images=[encode_image(image_path)])
async def generate_webpages(self, image_path: str) -> str:
"""Asynchronously generate webpages including all code (HTML, CSS, and JavaScript) in one go based on the image.
Args:
image_path (str): The path of the image file.
Returns:
str: Generated webpages content.
"""
if isinstance(image_path, str):
image_path = Path(image_path)
layout = await self.analyze_layout(image_path)
prompt = GENERATE_PROMPT + "\n\n # Context\n The layout information of the sketch image is: \n" + layout
return await self.llm.aask(msg=prompt, images=[encode_image(image_path)])
@staticmethod
def save_webpages(webpages: str, save_folder_name: str = "example") -> Path:
"""Save webpages including all code (HTML, CSS, and JavaScript) at once.
Args:
webpages (str): The generated webpages content.
save_folder_name (str, optional): The name of the folder to save the webpages. Defaults to 'example'.
Returns:
Path: The path of the saved webpages.
"""
# Create a folder called webpages in the workspace directory to store HTML, CSS, and JavaScript files
webpages_path = Config.default().workspace.path / "webpages" / save_folder_name
logger.info(f"code will be saved at {webpages_path}")
webpages_path.mkdir(parents=True, exist_ok=True)
index_path = webpages_path / "index.html"
index_path.write_text(CodeParser.parse_code(text=webpages, lang="html"))
extract_and_save_code(folder=webpages_path, text=webpages, pattern="styles?.css", language="css")
extract_and_save_code(folder=webpages_path, text=webpages, pattern="scripts?.js", language="javascript")
return webpages_path
def extract_and_save_code(folder, text, pattern, language):
word = re.search(pattern, text)
if word:
path = folder / word.group(0)
code = CodeParser.parse_code(text=text, lang=language)
path.write_text(code, encoding="utf-8")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/web_scraping.py | metagpt/tools/libs/web_scraping.py | import contextlib
from uuid import uuid4
from metagpt.tools.libs.browser import Browser
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.file import MemoryFileSystem
from metagpt.utils.parse_html import simplify_html
@register_tool(tags=["web scraping"])
async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bool = False) -> str:
"""view the HTML content of current page to understand the structure.
Args:
url (str): The URL of the web page to scrape.
requirement (str): Providing a clear and detailed requirement helps in focusing the inspection on the desired elements.
keep_links (bool): Whether to keep the hyperlinks in the HTML content. Set to True if links are required
Returns:
str: The HTML content of the page.
"""
async with Browser() as browser:
await browser.goto(url)
page = browser.page
html = await page.content()
html = simplify_html(html, url=page.url, keep_links=keep_links)
mem_fs = MemoryFileSystem()
filename = f"{uuid4().hex}.html"
with mem_fs.open(filename, "w") as f:
f.write(html)
# Since RAG is an optional optimization, if it fails, the simplified HTML can be used as a fallback.
with contextlib.suppress(Exception):
from metagpt.rag.engines import SimpleEngine # avoid circular import
# TODO make `from_docs` asynchronous
engine = SimpleEngine.from_docs(input_files=[filename], fs=mem_fs)
nodes = await engine.aretrieve(requirement)
html = "\n".join(i.text for i in nodes)
mem_fs.rm_file(filename)
return html
# async def get_elements_outerhtml(self, element_ids: list[int]):
# """Inspect the outer HTML of the elements in Current Browser Viewer.
# """
# page = self.page
# data = []
# for element_id in element_ids:
# html = await get_element_outer_html(page, get_backend_node_id(element_id, self.accessibility_tree))
# data.append(html)
# return "\n".join(f"[{element_id}]. {html}" for element_id, html in zip(element_ids, data))
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/shell.py | metagpt/tools/libs/shell.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import subprocess
from pathlib import Path
from typing import Dict, List, Tuple, Union
async def shell_execute(
command: Union[List[str], str], cwd: str | Path = None, env: Dict = None, timeout: int = 600
) -> Tuple[str, str, int]:
"""
Execute a command asynchronously and return its standard output and standard error.
Args:
command (Union[List[str], str]): The command to execute and its arguments. It can be provided either as a list
of strings or as a single string.
cwd (str | Path, optional): The current working directory for the command. Defaults to None.
env (Dict, optional): Environment variables to set for the command. Defaults to None.
timeout (int, optional): Timeout for the command execution in seconds. Defaults to 600.
Returns:
Tuple[str, str, int]: A tuple containing the string type standard output and string type standard error of the executed command and int type return code.
Raises:
ValueError: If the command times out, this error is raised. The error message contains both standard output and
standard error of the timed-out process.
Example:
>>> # command is a list
>>> stdout, stderr, returncode = await shell_execute(command=["ls", "-l"], cwd="/home/user", env={"PATH": "/usr/bin"})
>>> print(stdout)
total 8
-rw-r--r-- 1 user user 0 Mar 22 10:00 file1.txt
-rw-r--r-- 1 user user 0 Mar 22 10:00 file2.txt
...
>>> # command is a string of shell script
>>> stdout, stderr, returncode = await shell_execute(command="ls -l", cwd="/home/user", env={"PATH": "/usr/bin"})
>>> print(stdout)
total 8
-rw-r--r-- 1 user user 0 Mar 22 10:00 file1.txt
-rw-r--r-- 1 user user 0 Mar 22 10:00 file2.txt
...
References:
This function uses `subprocess.Popen` for executing shell commands asynchronously.
"""
cwd = str(cwd) if cwd else None
shell = True if isinstance(command, str) else False
result = subprocess.run(command, cwd=cwd, capture_output=True, text=True, env=env, timeout=timeout, shell=shell)
return result.stdout, result.stderr, result.returncode
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/sd_engine.py | metagpt/tools/libs/sd_engine.py | # -*- coding: utf-8 -*-
# @Date : 2023/7/19 16:28
# @Author : stellahong (stellahong@deepwisdom.ai)
# @Desc :
from __future__ import annotations
import base64
import hashlib
import io
import json
from os.path import join
import requests
from aiohttp import ClientSession
from PIL import Image, PngImagePlugin
from metagpt.const import SD_OUTPUT_FILE_REPO, SD_URL, SOURCE_ROOT
from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
payload = {
"prompt": "",
"negative_prompt": "(easynegative:0.8),black, dark,Low resolution",
"override_settings": {"sd_model_checkpoint": "galaxytimemachinesGTM_photoV20"},
"seed": -1,
"batch_size": 1,
"n_iter": 1,
"steps": 20,
"cfg_scale": 7,
"width": 512,
"height": 768,
"restore_faces": False,
"tiling": False,
"do_not_save_samples": False,
"do_not_save_grid": False,
"enable_hr": False,
"hr_scale": 2,
"hr_upscaler": "Latent",
"hr_second_pass_steps": 0,
"hr_resize_x": 0,
"hr_resize_y": 0,
"hr_upscale_to_x": 0,
"hr_upscale_to_y": 0,
"truncate_x": 0,
"truncate_y": 0,
"applied_old_hires_behavior_to": None,
"eta": None,
"sampler_index": "DPM++ SDE Karras",
"alwayson_scripts": {},
}
default_negative_prompt = "(easynegative:0.8),black, dark,Low resolution"
@register_tool(
tags=["text2image", "multimodal"],
include_functions=["__init__", "simple_run_t2i", "run_t2i", "construct_payload", "save"],
)
class SDEngine:
"""Generate image using stable diffusion model.
This class provides methods to interact with a stable diffusion service to generate images based on text inputs.
"""
def __init__(self, sd_url=""):
"""Initialize the SDEngine instance with configuration.
Args:
sd_url (str, optional): URL of the stable diffusion service. Defaults to "".
"""
self.sd_url = SD_URL if not sd_url else sd_url
self.sd_t2i_url = f"{self.sd_url}/sdapi/v1/txt2img"
# Define default payload settings for SD API
self.payload = payload
logger.info(self.sd_t2i_url)
def construct_payload(
self,
prompt: object,
negtive_prompt: object = default_negative_prompt,
width: object = 512,
height: object = 512,
sd_model: object = "galaxytimemachinesGTM_photoV20",
) -> object:
"""Modify and set the API parameters for image generation.
Args:
prompt (str): Text input for image generation.
negtive_prompt (str, optional): Text input for negative prompts. Defaults to None.
width (int, optional): Width of the generated image in pixels. Defaults to 512.
height (int, optional): Height of the generated image in pixels. Defaults to 512.
sd_model (str, optional): The model to use for image generation. Defaults to "galaxytimemachinesGTM_photoV20".
Returns:
dict: Updated parameters for the stable diffusion API.
"""
self.payload["prompt"] = prompt
self.payload["negative_prompt"] = negtive_prompt
self.payload["width"] = width
self.payload["height"] = height
self.payload["override_settings"]["sd_model_checkpoint"] = sd_model
logger.info(f"call sd payload is {self.payload}")
return self.payload
def save(self, imgs, save_name=""):
"""Save generated images to the output directory.
Args:
imgs (str): Generated images.
save_name (str, optional): Output image name. Default is empty.
"""
save_dir = SOURCE_ROOT / SD_OUTPUT_FILE_REPO
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
batch_decode_base64_to_image(imgs, str(save_dir), save_name=save_name)
def simple_run_t2i(self, payload: dict, auto_save: bool = True):
"""Run the stable diffusion API for multiple prompts, calling the stable diffusion API to generate images.
Args:
payload (dict): Dictionary of input parameters for the stable diffusion API.
auto_save (bool, optional): Save generated images automatically. Defaults to True.
Returns:
list: The generated images as a result of the API call.
"""
with requests.Session() as session:
logger.debug(self.sd_t2i_url)
rsp = session.post(self.sd_t2i_url, json=payload, timeout=600)
results = rsp.json()["images"]
if auto_save:
save_name = hashlib.sha256(payload["prompt"][:10].encode()).hexdigest()[:6]
self.save(results, save_name=f"output_{save_name}")
return results
async def run_t2i(self, payloads: list):
"""Run the stable diffusion API for multiple prompts asynchronously.
Args:
payloads (list): list of payload, each payload is a dictionary of input parameters for the stable diffusion API.
"""
session = ClientSession()
for payload_idx, payload in enumerate(payloads):
results = await self.run(url=self.sd_t2i_url, payload=payload, session=session)
self.save(results, save_name=f"output_{payload_idx}")
await session.close()
async def run(self, url, payload, session):
"""Perform the HTTP POST request to the SD API.
Args:
url (str): The API URL.
payload (dict): The payload for the request.
session (ClientSession): The session for making HTTP requests.
Returns:
list: Images generated by the stable diffusion API.
"""
async with session.post(url, json=payload, timeout=600) as rsp:
data = await rsp.read()
rsp_json = json.loads(data)
imgs = rsp_json["images"]
logger.info(f"callback rsp json is {rsp_json.keys()}")
return imgs
def decode_base64_to_image(img, save_name):
image = Image.open(io.BytesIO(base64.b64decode(img.split(",", 1)[0])))
pnginfo = PngImagePlugin.PngInfo()
logger.info(save_name)
image.save(f"{save_name}.png", pnginfo=pnginfo)
return pnginfo, image
def batch_decode_base64_to_image(imgs, save_dir="", save_name=""):
for idx, _img in enumerate(imgs):
save_name = join(save_dir, save_name)
decode_base64_to_image(_img, save_name=save_name)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/linter.py | metagpt/tools/libs/linter.py | """
This file is borrowed from OpenDevin
You can find the original repository here:
https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/plugins/agent_skills/utils/aider/linter.py
"""
import os
import subprocess
import sys
import traceback
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
from grep_ast import TreeContext, filename_to_lang
from tree_sitter_languages import get_parser # noqa: E402
# tree_sitter is throwing a FutureWarning
warnings.simplefilter("ignore", category=FutureWarning)
@dataclass
class LintResult:
text: str
lines: list
class Linter:
def __init__(self, encoding="utf-8", root=None):
self.encoding = encoding
self.root = root
self.languages = dict(
python=self.py_lint,
sql=self.fake_lint, # base_lint lacks support for full SQL syntax. Use fake_lint to bypass the validation.
css=self.fake_lint, # base_lint lacks support for css syntax. Use fake_lint to bypass the validation.
js=self.fake_lint, # base_lint lacks support for javascipt syntax. Use fake_lint to bypass the validation.
javascript=self.fake_lint,
)
self.all_lint_cmd = None
def set_linter(self, lang, cmd):
if lang:
self.languages[lang] = cmd
return
self.all_lint_cmd = cmd
def get_rel_fname(self, fname):
if self.root:
return os.path.relpath(fname, self.root)
else:
return fname
def run_cmd(self, cmd, rel_fname, code):
cmd += " " + rel_fname
cmd = cmd.split()
process = subprocess.Popen(cmd, cwd=self.root, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = process.communicate()
errors = stdout.decode().strip()
self.returncode = process.returncode
if self.returncode == 0:
return # zero exit status
cmd = " ".join(cmd)
res = ""
res += errors
line_num = extract_error_line_from(res)
return LintResult(text=res, lines=[line_num])
def get_abs_fname(self, fname):
if os.path.isabs(fname):
return fname
elif os.path.isfile(fname):
rel_fname = self.get_rel_fname(fname)
return os.path.abspath(rel_fname)
else: # if a temp file
return self.get_rel_fname(fname)
def lint(self, fname, cmd=None) -> Optional[LintResult]:
code = Path(fname).read_text(self.encoding)
absolute_fname = self.get_abs_fname(fname)
if cmd:
cmd = cmd.strip()
if not cmd:
lang = filename_to_lang(fname)
if not lang:
return None
if self.all_lint_cmd:
cmd = self.all_lint_cmd
else:
cmd = self.languages.get(lang)
if callable(cmd):
linkres = cmd(fname, absolute_fname, code)
elif cmd:
linkres = self.run_cmd(cmd, absolute_fname, code)
else:
linkres = basic_lint(absolute_fname, code)
return linkres
def flake_lint(self, rel_fname, code):
fatal = "F821,F822,F831,E112,E113,E999,E902"
flake8 = f"flake8 --select={fatal} --isolated"
try:
flake_res = self.run_cmd(flake8, rel_fname, code)
except FileNotFoundError:
flake_res = None
return flake_res
def py_lint(self, fname, rel_fname, code):
error = self.flake_lint(rel_fname, code)
if not error:
error = lint_python_compile(fname, code)
if not error:
error = basic_lint(rel_fname, code)
return error
def fake_lint(self, fname, rel_fname, code):
return None
def lint_python_compile(fname, code):
try:
compile(code, fname, "exec") # USE TRACEBACK BELOW HERE
return
except IndentationError as err:
end_lineno = getattr(err, "end_lineno", err.lineno)
if isinstance(end_lineno, int):
line_numbers = list(range(end_lineno - 1, end_lineno))
else:
line_numbers = []
tb_lines = traceback.format_exception(type(err), err, err.__traceback__)
last_file_i = 0
target = "# USE TRACEBACK"
target += " BELOW HERE"
for i in range(len(tb_lines)):
if target in tb_lines[i]:
last_file_i = i
break
tb_lines = tb_lines[:1] + tb_lines[last_file_i + 1 :]
res = "".join(tb_lines)
return LintResult(text=res, lines=line_numbers)
def basic_lint(fname, code):
"""
Use tree-sitter to look for syntax errors, display them with tree context.
"""
lang = filename_to_lang(fname)
if not lang:
return
parser = get_parser(lang)
tree = parser.parse(bytes(code, "utf-8"))
errors = traverse_tree(tree.root_node)
if not errors:
return
return LintResult(text=f"{fname}:{errors[0]}", lines=errors)
def extract_error_line_from(lint_error):
# moved from openhands.agentskills#_lint_file
for line in lint_error.splitlines(True):
if line.strip():
# The format of the error message is: <filename>:<line>:<column>: <error code> <error message>
parts = line.split(":")
if len(parts) >= 2:
try:
first_error_line = int(parts[1])
break
except ValueError:
continue
return first_error_line
def tree_context(fname, code, line_nums):
context = TreeContext(
fname,
code,
color=False,
line_number=True,
child_context=False,
last_line=False,
margin=0,
mark_lois=True,
loi_pad=3,
# header_max=30,
show_top_of_file_parent_scope=False,
)
line_nums = set(line_nums)
context.add_lines_of_interest(line_nums)
context.add_context()
output = context.format()
return output
# Traverse the tree to find errors
def traverse_tree(node):
errors = []
if node.type == "ERROR" or node.is_missing:
line_no = node.start_point[0] + 1
errors.append(line_no)
for child in node.children:
errors += traverse_tree(child)
return errors
def main():
"""
Main function to parse files provided as command line arguments.
"""
if len(sys.argv) < 2:
print("Usage: python linter.py <file1> <file2> ...")
sys.exit(1)
linter = Linter(root=os.getcwd())
for file_path in sys.argv[1:]:
errors = linter.lint(file_path)
if errors:
print(errors)
if __name__ == "__main__":
main()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/index_repo.py | metagpt/tools/libs/index_repo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import json
import re
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union
import tiktoken
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.schema import NodeWithScore
from pydantic import BaseModel, Field, model_validator
from metagpt.config2 import config
from metagpt.context import Context
from metagpt.logs import logger
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.factories.embedding import RAGEmbeddingFactory
from metagpt.rag.schema import FAISSIndexConfig, FAISSRetrieverConfig, LLMRankerConfig
from metagpt.utils.common import aread, awrite, generate_fingerprint, list_files
from metagpt.utils.file import File
from metagpt.utils.report import EditorReporter
UPLOADS_INDEX_ROOT = "/data/.index/uploads"
DEFAULT_INDEX_ROOT = UPLOADS_INDEX_ROOT
UPLOAD_ROOT = "/data/uploads"
DEFAULT_ROOT = UPLOAD_ROOT
CHATS_INDEX_ROOT = "/data/.index/chats"
CHATS_ROOT = "/data/chats/"
OTHER_TYPE = "other"
DEFAULT_MIN_TOKEN_COUNT = 10000
DEFAULT_MAX_TOKEN_COUNT = 100000000
class IndexRepoMeta(BaseModel):
min_token_count: int
max_token_count: int
class TextScore(BaseModel):
filename: str
text: str
score: Optional[float] = None
class IndexRepo(BaseModel):
persist_path: str = DEFAULT_INDEX_ROOT # The persist path of the index repo, `/data/.index/uploads/` or `/data/.index/chats/{chat_id}/`
root_path: str = (
DEFAULT_ROOT # `/data/uploads` or r`/data/chats/[a-z0-9]+`, the root path of files indexed by the index repo.
)
fingerprint_filename: str = "fingerprint.json"
meta_filename: str = "meta.json"
model: Optional[str] = None
min_token_count: int = DEFAULT_MIN_TOKEN_COUNT
max_token_count: int = DEFAULT_MAX_TOKEN_COUNT
recall_count: int = 5
embedding: Optional[BaseEmbedding] = Field(default=None, exclude=True)
fingerprints: Dict[str, str] = Field(default_factory=dict)
@model_validator(mode="after")
def _update_fingerprints(self) -> "IndexRepo":
"""Load fingerprints from the fingerprint file if not already loaded.
Returns:
IndexRepo: The updated IndexRepo instance.
"""
if not self.fingerprints:
filename = Path(self.persist_path) / self.fingerprint_filename
if not filename.exists():
return self
with open(str(filename), "r") as reader:
self.fingerprints = json.load(reader)
return self
async def search(
self, query: str, filenames: Optional[List[Path]] = None
) -> Optional[List[Union[NodeWithScore, TextScore]]]:
"""Search for documents related to the given query.
Args:
query (str): The search query.
filenames (Optional[List[Path]]): A list of filenames to filter the search.
Returns:
Optional[List[Union[NodeWithScore, TextScore]]]: A list of search results containing NodeWithScore or TextScore.
"""
encoding = tiktoken.get_encoding("cl100k_base")
result: List[Union[NodeWithScore, TextScore]] = []
filenames, excludes = await self._filter(filenames)
if not filenames:
raise ValueError(f"Unsupported file types: {[str(i) for i in excludes]}")
resource = EditorReporter()
for i in filenames:
await resource.async_report(str(i), "path")
filter_filenames = set()
meta = await self._read_meta()
new_files = {}
for i in filenames:
if Path(i).suffix.lower() in {".pdf", ".doc", ".docx"}:
if str(i) not in self.fingerprints:
new_files[i] = ""
logger.warning(f'file: "{i}" not indexed')
filter_filenames.add(str(i))
continue
content = await File.read_text_file(i)
token_count = len(encoding.encode(content))
if not self._is_buildable(
token_count, min_token_count=meta.min_token_count, max_token_count=meta.max_token_count
):
result.append(TextScore(filename=str(i), text=content))
continue
file_fingerprint = generate_fingerprint(content)
if str(i) not in self.fingerprints or (self.fingerprints.get(str(i)) != file_fingerprint):
new_files[i] = content
logger.warning(f'file: "{i}" changed but not indexed')
continue
filter_filenames.add(str(i))
if new_files:
added, others = await self.add(paths=list(new_files.keys()), file_datas=new_files)
filter_filenames.update([str(i) for i in added])
for i in others:
result.append(TextScore(filename=str(i), text=new_files.get(i)))
filter_filenames.discard(str(i))
nodes = await self._search(query=query, filters=filter_filenames)
return result + nodes
async def merge(
self, query: str, indices_list: List[List[Union[NodeWithScore, TextScore]]]
) -> List[Union[NodeWithScore, TextScore]]:
"""Merge results from multiple indices based on the query.
Args:
query (str): The search query.
indices_list (List[List[Union[NodeWithScore, TextScore]]]): A list of result lists from different indices.
Returns:
List[Union[NodeWithScore, TextScore]]: A list of merged results sorted by similarity.
"""
flat_nodes = [node for indices in indices_list if indices for node in indices if node]
if len(flat_nodes) <= self.recall_count:
return flat_nodes
if not self.embedding:
if self.model:
config.embedding.model = self.model
factory = RAGEmbeddingFactory(config)
self.embedding = factory.get_rag_embedding()
scores = []
query_embedding = await self.embedding.aget_text_embedding(query)
for i in flat_nodes:
try:
text_embedding = await self.embedding.aget_text_embedding(i.text)
except Exception as e: # 超过最大长度
tenth = int(len(i.text) / 10) # DEFAULT_MIN_TOKEN_COUNT = 10000
logger.warning(
f"{e}, tenth len={tenth}, pre_part_len={len(i.text[: tenth * 6])}, post_part_len={len(i.text[tenth * 4:])}"
)
pre_win_part = await self.embedding.aget_text_embedding(i.text[: tenth * 6])
post_win_part = await self.embedding.aget_text_embedding(i.text[tenth * 4 :])
similarity = max(
self.embedding.similarity(query_embedding, pre_win_part),
self.embedding.similarity(query_embedding, post_win_part),
)
scores.append((similarity, i))
continue
similarity = self.embedding.similarity(query_embedding, text_embedding)
scores.append((similarity, i))
scores.sort(key=lambda x: x[0], reverse=True)
return [i[1] for i in scores][: self.recall_count]
async def add(
self, paths: List[Path], file_datas: Dict[Union[str, Path], str] = None
) -> Tuple[List[str], List[str]]:
"""Add new documents to the index.
Args:
paths (List[Path]): A list of paths to the documents to be added.
file_datas (Dict[Union[str, Path], str]): A list of file content.
Returns:
Tuple[List[str], List[str]]: A tuple containing two lists:
1. The list of filenames that were successfully added to the index.
2. The list of filenames that were not added to the index because they were not buildable.
"""
encoding = tiktoken.get_encoding("cl100k_base")
filenames, _ = await self._filter(paths)
filter_filenames = []
delete_filenames = []
file_datas = file_datas or {}
for i in filenames:
content = file_datas.get(i) or await File.read_text_file(i)
file_datas[i] = content
if not self._is_fingerprint_changed(filename=i, content=content):
continue
token_count = len(encoding.encode(content))
if self._is_buildable(token_count):
filter_filenames.append(i)
logger.debug(f"{i} is_buildable: {token_count}, {self.min_token_count}~{self.max_token_count}")
else:
delete_filenames.append(i)
logger.debug(f"{i} not is_buildable: {token_count}, {self.min_token_count}~{self.max_token_count}")
await self._add_batch(filenames=filter_filenames, delete_filenames=delete_filenames, file_datas=file_datas)
return filter_filenames, delete_filenames
async def _add_batch(
self,
filenames: List[Union[str, Path]],
delete_filenames: List[Union[str, Path]],
file_datas: Dict[Union[str, Path], str],
):
"""Add and remove documents in a batch operation.
Args:
filenames (List[Union[str, Path]]): List of filenames to add.
delete_filenames (List[Union[str, Path]]): List of filenames to delete.
"""
if not filenames:
return
logger.info(f"update index repo, add {filenames}, remove {delete_filenames}")
engine = None
Context()
if Path(self.persist_path).exists():
logger.debug(f"load index from {self.persist_path}")
engine = SimpleEngine.from_index(
index_config=FAISSIndexConfig(persist_path=self.persist_path),
retriever_configs=[FAISSRetrieverConfig()],
)
try:
engine.delete_docs(filenames + delete_filenames)
logger.info(f"delete docs {filenames + delete_filenames}")
engine.add_docs(input_files=filenames)
logger.info(f"add docs {filenames}")
except NotImplementedError as e:
logger.debug(f"{e}")
filenames = list(set([str(i) for i in filenames] + list(self.fingerprints.keys())))
engine = None
logger.info(f"{e}. Rebuild all.")
if not engine:
engine = SimpleEngine.from_docs(
input_files=[str(i) for i in filenames],
retriever_configs=[FAISSRetrieverConfig()],
ranker_configs=[LLMRankerConfig()],
)
logger.info(f"add docs {filenames}")
engine.persist(persist_dir=self.persist_path)
for i in filenames:
content = file_datas.get(i) or await File.read_text_file(i)
fp = generate_fingerprint(content)
self.fingerprints[str(i)] = fp
await awrite(filename=Path(self.persist_path) / self.fingerprint_filename, data=json.dumps(self.fingerprints))
await self._save_meta()
def __str__(self):
"""Return a string representation of the IndexRepo.
Returns:
str: The filename of the index repository.
"""
return f"{self.persist_path}"
def _is_buildable(self, token_count: int, min_token_count: int = -1, max_token_count=-1) -> bool:
"""Check if the token count is within the buildable range.
Args:
token_count (int): The number of tokens in the content.
Returns:
bool: True if buildable, False otherwise.
"""
min_token_count = min_token_count if min_token_count >= 0 else self.min_token_count
max_token_count = max_token_count if max_token_count >= 0 else self.max_token_count
if token_count < min_token_count or token_count > max_token_count:
return False
return True
async def _filter(self, filenames: Optional[List[Union[str, Path]]] = None) -> (List[Path], List[Path]):
"""Filter the provided filenames to only include valid text files.
Args:
filenames (Optional[List[Union[str, Path]]]): List of filenames to filter.
Returns:
Tuple[List[Path], List[Path]]: A tuple containing a list of valid pathnames and a list of excluded paths.
"""
root_path = Path(self.root_path).absolute()
if not filenames:
filenames = [root_path]
pathnames = []
excludes = []
for i in filenames:
path = Path(i).absolute()
if not path.is_relative_to(root_path):
excludes.append(path)
logger.debug(f"{path} not is_relative_to {root_path})")
continue
if not path.is_dir():
is_text = await File.is_textual_file(path)
if is_text:
pathnames.append(path)
continue
subfiles = list_files(path)
for j in subfiles:
is_text = await File.is_textual_file(j)
if is_text:
pathnames.append(j)
logger.debug(f"{pathnames}, excludes:{excludes})")
return pathnames, excludes
async def _search(self, query: str, filters: Set[str]) -> List[NodeWithScore]:
"""Perform a search for the given query using the index.
Args:
query (str): The search query.
filters (Set[str]): A set of filenames to filter the search results.
Returns:
List[NodeWithScore]: A list of nodes with scores matching the query.
"""
if not filters:
return []
if not Path(self.persist_path).exists():
raise ValueError(f"IndexRepo {Path(self.persist_path).name} not exists.")
Context()
engine = SimpleEngine.from_index(
index_config=FAISSIndexConfig(persist_path=self.persist_path),
retriever_configs=[FAISSRetrieverConfig()],
)
rsp = await engine.aretrieve(query)
return [i for i in rsp if i.metadata.get("file_path") in filters]
def _is_fingerprint_changed(self, filename: Union[str, Path], content: str) -> bool:
"""Check if the fingerprint of the given document content has changed.
Args:
filename (Union[str, Path]): The filename of the document.
content (str): The content of the document.
Returns:
bool: True if the fingerprint has changed, False otherwise.
"""
old_fp = self.fingerprints.get(str(filename))
if not old_fp:
return True
fp = generate_fingerprint(content)
return old_fp != fp
@staticmethod
def find_index_repo_path(files: List[Union[str, Path]]) -> Tuple[Dict[str, Set[Path]], Dict[str, str]]:
"""Map the file path to the corresponding index repo.
Args:
files (List[Union[str, Path]]): A list of file paths or Path objects to be classified.
Returns:
Tuple[Dict[str, Set[Path]], Dict[str, str]]:
- A dictionary mapping the index repo path to the files.
- A dictionary mapping the index repo path to their corresponding root directories.
"""
mappings = {
UPLOADS_INDEX_ROOT: re.compile(r"^/data/uploads($|/.*)"),
CHATS_INDEX_ROOT: re.compile(r"^/data/chats/[a-z0-9]+($|/.*)"),
}
clusters = {}
roots = {}
for i in files:
path = Path(i).absolute()
path_type = OTHER_TYPE
for type_, pattern in mappings.items():
if re.match(pattern, str(i)):
path_type = type_
break
if path_type == CHATS_INDEX_ROOT:
chat_id = path.parts[3]
path_type = str(Path(path_type) / chat_id)
roots[path_type] = str(Path(CHATS_ROOT) / chat_id)
elif path_type == UPLOADS_INDEX_ROOT:
roots[path_type] = UPLOAD_ROOT
if path_type in clusters:
clusters[path_type].add(path)
else:
clusters[path_type] = {path}
return clusters, roots
async def _save_meta(self):
meta = IndexRepoMeta(min_token_count=self.min_token_count, max_token_count=self.max_token_count)
await awrite(filename=Path(self.persist_path) / self.meta_filename, data=meta.model_dump_json())
async def _read_meta(self) -> IndexRepoMeta:
default_meta = IndexRepoMeta(min_token_count=self.min_token_count, max_token_count=self.max_token_count)
filename = Path(self.persist_path) / self.meta_filename
if not filename.exists():
return default_meta
meta_data = await aread(filename=filename)
try:
meta = IndexRepoMeta.model_validate_json(meta_data)
return meta
except Exception as e:
logger.warning(f"Load meta error: {e}")
return default_meta
@staticmethod
async def cross_repo_search(query: str, file_or_path: Union[str, Path]) -> List[str]:
"""Search for a query across multiple repositories.
This asynchronous function searches for the specified query in files
located at the given path or file.
Args:
query (str): The search term to look for in the files.
file_or_path (Union[str, Path]): The path to the file or directory
where the search should be conducted. This can be a string path
or a Path object.
Returns:
List[str]: A list of strings containing the paths of files that
contain the query results.
Raises:
ValueError: If the query string is empty.
"""
if not file_or_path or not Path(file_or_path).exists():
raise ValueError(f'"{str(file_or_path)}" not exists')
files = [file_or_path] if not Path(file_or_path).is_dir() else list_files(file_or_path)
clusters, roots = IndexRepo.find_index_repo_path(files)
futures = []
others = set()
for persist_path, filenames in clusters.items():
if persist_path == OTHER_TYPE:
others.update(filenames)
continue
root = roots[persist_path]
repo = IndexRepo(persist_path=persist_path, root_path=root)
futures.append(repo.search(query=query, filenames=list(filenames)))
for i in others:
futures.append(File.read_text_file(i))
futures_results = []
if futures:
futures_results = await asyncio.gather(*futures)
result = []
v_result = []
for i in futures_results:
if not i:
continue
if isinstance(i, str):
result.append(i)
else:
v_result.append(i)
repo = IndexRepo()
merged = await repo.merge(query=query, indices_list=v_result)
return [i.text for i in merged] + result
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/git.py | metagpt/tools/libs/git.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import urllib
from pathlib import Path
from typing import Optional
from github.Issue import Issue
from github.PullRequest import PullRequest
from metagpt.tools.tool_registry import register_tool
@register_tool(tags=["software development", "git", "create a git pull request or merge request"])
async def git_create_pull(
base: str,
head: str,
app_name: str,
base_repo_name: str,
head_repo_name: str = None,
title: Optional[str] = None,
body: Optional[str] = None,
issue: Optional[Issue] = None,
) -> PullRequest:
"""
Creates a pull request on a Git repository. Use this tool in priority over Browser to create a pull request.
Args:
base (str): The name of the base branch where the pull request will be merged.
head (str): The name of the branch that contains the changes for the pull request.
app_name (str): The name of the platform hosting the repository (e.g., "github", "gitlab", "bitbucket").
base_repo_name (str): The full name of the target repository (in the format "user/repo") where the pull request will be created.
head_repo_name (Optional[str]): The full name of the source repository (in the format "user/repo") from which the changes will be pulled.
title (Optional[str]): The title of the pull request. Defaults to None.
body (Optional[str]): The description or body content of the pull request. Defaults to None.
issue (Optional[Issue]): An optional issue related to the pull request. Defaults to None.
Example:
>>> # create pull request
>>> base_repo_name = "geekan/MetaGPT"
>>> head_repo_name = "ioris/MetaGPT"
>>> base = "master"
>>> head = "feature/http"
>>> title = "feat: modify http lib",
>>> body = "Change HTTP library used to send requests"
>>> app_name = "github"
>>> pr = await git_create_pull(
>>> base_repo_name=base_repo_name,
>>> head_repo_name=head_repo_name,
>>> base=base,
>>> head=head,
>>> title=title,
>>> body=body,
>>> app_name=app_name,
>>> )
>>> if isinstance(pr, PullRequest):
>>> print(pr)
PullRequest("feat: modify http lib")
>>> if isinstance(pr, str):
>>> print(f"Visit this url to create a new pull request: '{pr}'")
Visit this url to create a new pull request: 'https://github.com/geekan/MetaGPT/compare/master...iorisa:MetaGPT:feature/http'
Returns:
PullRequest: The created pull request.
"""
from metagpt.utils.git_repository import GitRepository
git_credentials_path = Path.home() / ".git-credentials"
with open(git_credentials_path, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if not line:
continue
parsed_url = urllib.parse.urlparse(line)
if app_name in parsed_url.hostname:
colon_index = parsed_url.netloc.find(":")
at_index = parsed_url.netloc.find("@")
access_token = parsed_url.netloc[colon_index + 1 : at_index]
break
return await GitRepository.create_pull(
base=base,
head=head,
base_repo_name=base_repo_name,
head_repo_name=head_repo_name,
title=title,
body=body,
issue=issue,
access_token=access_token,
)
@register_tool(tags=["software development", "create a git issue"])
async def git_create_issue(
repo_name: str,
title: str,
access_token: str,
body: Optional[str] = None,
) -> Issue:
"""
Creates an issue on a Git repository.
Args:
repo_name (str): The name of the repository.
title (str): The title of the issue.
access_token (str): The access token for authentication. Use `get_env` to get access token.
body (Optional[str], optional): The body of the issue. Defaults to None.
Example:
>>> repo_name = "geekan/MetaGPT"
>>> title = "This is a new issue"
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github")
>>> body = "This is the issue body."
>>> issue = await git_create_issue(
>>> repo_name=repo_name,
>>> title=title,
>>> access_token=access_token,
>>> body=body,
>>> )
>>> print(issue)
Issue("This is a new issue")
Returns:
Issue: The created issue.
"""
from metagpt.utils.git_repository import GitRepository
return await GitRepository.create_issue(repo_name=repo_name, title=title, body=body, access_token=access_token)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/terminal.py | metagpt/tools/libs/terminal.py | import asyncio
import os
import re
from asyncio import Queue
from asyncio.subprocess import PIPE, STDOUT
from typing import Optional
from metagpt.config2 import Config
from metagpt.const import DEFAULT_WORKSPACE_ROOT, SWE_SETUP_PATH
from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.report import END_MARKER_VALUE, TerminalReporter
@register_tool()
class Terminal:
"""
A tool for running terminal commands.
Don't initialize a new instance of this class if one already exists.
For commands that need to be executed within a Conda environment, it is recommended
to use the `execute_in_conda_env` method.
"""
def __init__(self):
self.shell_command = ["bash"] # FIXME: should consider windows support later
self.command_terminator = "\n"
self.stdout_queue = Queue(maxsize=1000)
self.observer = TerminalReporter()
self.process: Optional[asyncio.subprocess.Process] = None
# The cmd in forbidden_terminal_commands will be replace by pass ana return the advise. example:{"cmd":"forbidden_reason/advice"}
self.forbidden_commands = {
"run dev": "Use Deployer.deploy_to_public instead.",
# serve cmd have a space behind it,
"serve ": "Use Deployer.deploy_to_public instead.",
}
async def _start_process(self):
# Start a persistent shell process
self.process = await asyncio.create_subprocess_exec(
*self.shell_command,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
executable="bash",
env=os.environ.copy(),
cwd=DEFAULT_WORKSPACE_ROOT.absolute(),
)
await self._check_state()
async def _check_state(self):
"""
Check the state of the terminal, e.g. the current directory of the terminal process. Useful for agent to understand.
"""
output = await self.run_command("pwd")
logger.info("The terminal is at:", output)
async def run_command(self, cmd: str, daemon=False) -> str:
"""
Executes a specified command in the terminal and streams the output back in real time.
This command maintains state across executions, such as the current directory,
allowing for sequential commands to be contextually aware.
Args:
cmd (str): The command to execute in the terminal.
daemon (bool): If True, executes the command in an asynchronous task, allowing
the main program to continue execution.
Returns:
str: The command's output or an empty string if `daemon` is True. Remember that
when `daemon` is True, use the `get_stdout_output` method to get the output.
"""
if self.process is None:
await self._start_process()
output = ""
# Remove forbidden commands
commands = re.split(r"\s*&&\s*", cmd)
for cmd_name, reason in self.forbidden_commands.items():
# "true" is a pass command in linux terminal.
for index, command in enumerate(commands):
if cmd_name in command:
output += f"Failed to execut {command}. {reason}\n"
commands[index] = "true"
cmd = " && ".join(commands)
# Send the command
self.process.stdin.write((cmd + self.command_terminator).encode())
self.process.stdin.write(
f'echo "{END_MARKER_VALUE}"{self.command_terminator}'.encode() # write EOF
) # Unique marker to signal command end
await self.process.stdin.drain()
if daemon:
asyncio.create_task(self._read_and_process_output(cmd))
else:
output += await self._read_and_process_output(cmd)
return output
async def execute_in_conda_env(self, cmd: str, env, daemon=False) -> str:
"""
Executes a given command within a specified Conda environment automatically without
the need for manual activation. Users just need to provide the name of the Conda
environment and the command to execute.
Args:
cmd (str): The command to execute within the Conda environment.
env (str, optional): The name of the Conda environment to activate before executing the command.
If not specified, the command will run in the current active environment.
daemon (bool): If True, the command is run in an asynchronous task, similar to `run_command`,
affecting error logging and handling in the same manner.
Returns:
str: The command's output, or an empty string if `daemon` is True, with output processed
asynchronously in that case.
Note:
This function wraps `run_command`, prepending the necessary Conda activation commands
to ensure the specified environment is active for the command's execution.
"""
cmd = f"conda run -n {env} {cmd}"
return await self.run_command(cmd, daemon=daemon)
async def get_stdout_output(self) -> str:
"""
Retrieves all collected output from background running commands and returns it as a string.
Returns:
str: The collected output from background running commands, returned as a string.
"""
output_lines = []
while not self.stdout_queue.empty():
line = await self.stdout_queue.get()
output_lines.append(line)
return "\n".join(output_lines)
async def _read_and_process_output(self, cmd, daemon=False) -> str:
async with self.observer as observer:
cmd_output = []
await observer.async_report(cmd + self.command_terminator, "cmd")
# report the command
# Read the output until the unique marker is found.
# We read bytes directly from stdout instead of text because when reading text,
# '\r' is changed to '\n', resulting in excessive output.
tmp = b""
while True:
output = tmp + await self.process.stdout.read(1)
if not output:
continue
*lines, tmp = output.splitlines(True)
for line in lines:
line = line.decode()
ix = line.rfind(END_MARKER_VALUE)
if ix >= 0:
line = line[0:ix]
if line:
await observer.async_report(line, "output")
# report stdout in real-time
cmd_output.append(line)
return "".join(cmd_output)
# log stdout in real-time
await observer.async_report(line, "output")
cmd_output.append(line)
if daemon:
await self.stdout_queue.put(line)
async def close(self):
"""Close the persistent shell process."""
self.process.stdin.close()
await self.process.wait()
@register_tool(include_functions=["run"])
class Bash(Terminal):
"""
A class to run bash commands directly and provides custom shell functions.
All custom functions in this class can ONLY be called via the `Bash.run` method.
"""
def __init__(self):
"""init"""
os.environ["SWE_CMD_WORK_DIR"] = str(Config.default().workspace.path)
super().__init__()
self.start_flag = False
async def start(self):
await self.run_command(f"cd {Config.default().workspace.path}")
await self.run_command(f"source {SWE_SETUP_PATH}")
async def run(self, cmd) -> str:
"""
Executes a bash command.
Args:
cmd (str): The bash command to execute.
Returns:
str: The output of the command.
This method allows for executing standard bash commands as well as
utilizing several custom shell functions defined in the environment.
Custom Shell Functions:
- open <path> [<line_number>]
Opens the file at the given path in the editor. If line_number is provided,
the window will move to include that line.
Arguments:
path (str): The path to the file to open.
line_number (int, optional): The line number to move the window to.
If not provided, the window will start at the top of the file.
- goto <line_number>
Moves the window to show <line_number>.
Arguments:
line_number (int): The line number to move the window to.
- scroll_down
Moves the window down {WINDOW} lines.
- scroll_up
Moves the window up {WINDOW} lines.
- create <filename>
Creates and opens a new file with the given name.
Arguments:
filename (str): The name of the file to create.
- search_dir_and_preview <search_term> [<dir>]
Searches for search_term in all files in dir and gives their code preview
with line numbers. If dir is not provided, searches in the current directory.
Arguments:
search_term (str): The term to search for.
dir (str, optional): The directory to search in. Defaults to the current directory.
- search_file <search_term> [<file>]
Searches for search_term in file. If file is not provided, searches in the current open file.
Arguments:
search_term (str): The term to search for.
file (str, optional): The file to search in. Defaults to the current open file.
- find_file <file_name> [<dir>]
Finds all files with the given name in dir. If dir is not provided, searches in the current directory.
Arguments:
file_name (str): The name of the file to search for.
dir (str, optional): The directory to search in. Defaults to the current directory.
- edit <start_line>:<end_line> <<EOF
<replacement_text>
EOF
Line numbers start from 1. Replaces lines <start_line> through <end_line> (inclusive) with the given text in the open file.
The replacement text is terminated by a line with only EOF on it. All of the <replacement text> will be entered, so make
sure your indentation is formatted properly. Python files will be checked for syntax errors after the edit. If the system
detects a syntax error, the edit will not be executed. Simply try to edit the file again, but make sure to read the error
message and modify the edit command you issue accordingly. Issuing the same command a second time will just lead to the same
error message again. All code modifications made via the 'edit' command must strictly follow the PEP8 standard.
Arguments:
start_line (int): The line number to start the edit at, starting from 1.
end_line (int): The line number to end the edit at (inclusive), starting from 1.
replacement_text (str): The text to replace the current selection with, must conform to PEP8 standards.
- submit
Submits your current code locally. it can only be executed once, the last action before the `end`.
Note: Make sure to use these functions as per their defined arguments and behaviors.
"""
if not self.start_flag:
await self.start()
self.start_flag = True
return await self.run_command(cmd)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/data_preprocess.py | metagpt/tools/libs/data_preprocess.py | from __future__ import annotations
import json
from typing import Literal
import numpy as np
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import (
LabelEncoder,
MaxAbsScaler,
MinMaxScaler,
OneHotEncoder,
OrdinalEncoder,
RobustScaler,
StandardScaler,
)
from metagpt.tools.tool_registry import register_tool
TAGS = ["data preprocessing", "machine learning"]
class MLProcess:
def fit(self, df: pd.DataFrame):
"""
Fit a model to be used in subsequent transform.
Args:
df (pd.DataFrame): The input DataFrame.
"""
raise NotImplementedError
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform the input DataFrame with the fitted model.
Args:
df (pd.DataFrame): The input DataFrame.
Returns:
pd.DataFrame: The transformed DataFrame.
"""
raise NotImplementedError
def fit_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Fit and transform the input DataFrame.
Args:
df (pd.DataFrame): The input DataFrame.
Returns:
pd.DataFrame: The transformed DataFrame.
"""
self.fit(df)
return self.transform(df)
class DataPreprocessTool(MLProcess):
"""
Completing a data preprocessing operation.
"""
def __init__(self, features: list):
"""
Initialize self.
Args:
features (list): Columns to be processed.
"""
self.features = features
self.model = None # to be filled by specific subclass Tool
def fit(self, df: pd.DataFrame):
if len(self.features) == 0:
return
self.model.fit(df[self.features])
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if len(self.features) == 0:
return df
new_df = df.copy()
new_df[self.features] = self.model.transform(new_df[self.features])
return new_df
@register_tool(tags=TAGS)
class FillMissingValue(DataPreprocessTool):
"""
Completing missing values with simple strategies.
"""
def __init__(
self, features: list, strategy: Literal["mean", "median", "most_frequent", "constant"] = "mean", fill_value=None
):
"""
Initialize self.
Args:
features (list): Columns to be processed.
strategy (Literal["mean", "median", "most_frequent", "constant"], optional): The imputation strategy, notice 'mean' and 'median' can only
be used for numeric features. Defaults to 'mean'.
fill_value (int, optional): Fill_value is used to replace all occurrences of missing_values.
Defaults to None.
"""
self.features = features
self.model = SimpleImputer(strategy=strategy, fill_value=fill_value)
@register_tool(tags=TAGS)
class MinMaxScale(DataPreprocessTool):
"""
Transform features by scaling each feature to a range, which is (0, 1).
"""
def __init__(self, features: list):
self.features = features
self.model = MinMaxScaler()
@register_tool(tags=TAGS)
class StandardScale(DataPreprocessTool):
"""
Standardize features by removing the mean and scaling to unit variance.
"""
def __init__(self, features: list):
self.features = features
self.model = StandardScaler()
@register_tool(tags=TAGS)
class MaxAbsScale(DataPreprocessTool):
"""
Scale each feature by its maximum absolute value.
"""
def __init__(self, features: list):
self.features = features
self.model = MaxAbsScaler()
@register_tool(tags=TAGS)
class RobustScale(DataPreprocessTool):
"""
Apply the RobustScaler to scale features using statistics that are robust to outliers.
"""
def __init__(self, features: list):
self.features = features
self.model = RobustScaler()
@register_tool(tags=TAGS)
class OrdinalEncode(DataPreprocessTool):
"""
Encode categorical features as ordinal integers.
"""
def __init__(self, features: list):
self.features = features
self.model = OrdinalEncoder()
@register_tool(tags=TAGS)
class OneHotEncode(DataPreprocessTool):
"""
Apply one-hot encoding to specified categorical columns, the original columns will be dropped.
"""
def __init__(self, features: list):
self.features = features
self.model = OneHotEncoder(handle_unknown="ignore", sparse_output=False)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
ts_data = self.model.transform(df[self.features])
new_columns = self.model.get_feature_names_out(self.features)
ts_data = pd.DataFrame(ts_data, columns=new_columns, index=df.index)
new_df = df.drop(self.features, axis=1)
new_df = pd.concat([new_df, ts_data], axis=1)
return new_df
@register_tool(tags=TAGS)
class LabelEncode(DataPreprocessTool):
"""
Apply label encoding to specified categorical columns in-place.
"""
def __init__(self, features: list):
"""
Initialize self.
Args:
features (list): Categorical columns to be label encoded.
"""
self.features = features
self.le_encoders = []
def fit(self, df: pd.DataFrame):
if len(self.features) == 0:
return
for col in self.features:
le = LabelEncoder().fit(df[col].astype(str).unique().tolist() + ["unknown"])
self.le_encoders.append(le)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if len(self.features) == 0:
return df
new_df = df.copy()
for i in range(len(self.features)):
data_list = df[self.features[i]].astype(str).tolist()
for unique_item in np.unique(df[self.features[i]].astype(str)):
if unique_item not in self.le_encoders[i].classes_:
data_list = ["unknown" if x == unique_item else x for x in data_list]
new_df[self.features[i]] = self.le_encoders[i].transform(data_list)
return new_df
def get_column_info(df: pd.DataFrame) -> dict:
"""
Analyzes a DataFrame and categorizes its columns based on data types.
Args:
df (pd.DataFrame): The DataFrame to be analyzed.
Returns:
dict: A dictionary with four keys ('Category', 'Numeric', 'Datetime', 'Others').
Each key corresponds to a list of column names belonging to that category.
"""
column_info = {
"Category": [],
"Numeric": [],
"Datetime": [],
"Others": [],
}
for col in df.columns:
data_type = str(df[col].dtype).replace("dtype('", "").replace("')", "")
if data_type.startswith("object"):
column_info["Category"].append(col)
elif data_type.startswith("int") or data_type.startswith("float"):
column_info["Numeric"].append(col)
elif data_type.startswith("datetime"):
column_info["Datetime"].append(col)
else:
column_info["Others"].append(col)
if len(json.dumps(column_info)) > 2000:
column_info["Numeric"] = column_info["Numeric"][0:5] + ["Too many cols, omission here..."]
return column_info
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/cr.py | metagpt/tools/libs/cr.py | import difflib
import json
from pathlib import Path
from typing import Optional
import aiofiles
from bs4 import BeautifulSoup
from unidiff import PatchSet
import metagpt.ext.cr
from metagpt.ext.cr.actions.code_review import CodeReview as CodeReview_
from metagpt.ext.cr.actions.modify_code import ModifyCode
from metagpt.ext.cr.utils.schema import Point
from metagpt.tools.libs.browser import Browser
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.report import EditorReporter
@register_tool(tags=["codereview"], include_functions=["review", "fix"])
class CodeReview:
"""Review and fix the patch content from the pull request URL or a file."""
async def review(
self,
patch_path: str,
output_file: str,
point_file: Optional[str] = None,
) -> str:
"""Review a PR and save code review comments.
Notes:
If the user does not specify an output path, saved it using a relative path in the current working directory.
Args:
patch_path: The local path of the patch file or the URL of the pull request.
output_file: Output file path where code review comments will be saved.
point_file: File path for specifying code review points. If not specified, this parameter does not need to be passed.
Examples:
>>> cr = CodeReview()
>>> await cr.review(patch_path="https://github.com/geekan/MetaGPT/pull/136", output_file="cr/MetaGPT_136.json")
>>> await cr.review(patch_path="/data/uploads/dev-master.diff", output_file="cr/dev-master.json")
>>> await cr.review(patch_path="/data/uploads/main.py", output_file="cr/main.json")
"""
patch = await self._get_patch_content(patch_path)
point_file = point_file if point_file else Path(metagpt.ext.cr.__file__).parent / "points.json"
await EditorReporter().async_report(str(point_file), "path")
async with aiofiles.open(point_file, "rb") as f:
cr_point_content = await f.read()
cr_points = [Point(**i) for i in json.loads(cr_point_content)]
try:
comments = await CodeReview_().run(patch, cr_points, output_file)
except ValueError as e:
return str(e)
return f"The number of defects: {len(comments)}, the comments are stored in {output_file}, and the checkpoints are stored in {str(point_file)}"
async def fix(
self,
patch_path: str,
cr_file: str,
output_dir: str,
) -> str:
"""Fix the patch content based on code review comments.
Args:
patch_path: The local path of the patch file or the url of the pull request.
cr_file: File path where code review comments are stored.
output_dir: File path where code review comments are stored.
"""
patch = await self._get_patch_content(patch_path)
async with aiofiles.open(cr_file, "r", encoding="utf-8") as f:
comments = json.loads(await f.read())
await ModifyCode(pr="").run(patch, comments, output_dir)
return f"The fixed patch files store in {output_dir}"
async def _get_patch_content(self, patch_path):
if patch_path.startswith(("https://", "http://")):
# async with aiohttp.ClientSession(trust_env=True) as client:
# async with client.get(f"{patch_path}.diff", ) as resp:
# patch_file_content = await resp.text()
async with Browser() as browser:
await browser.goto(f"{patch_path}.diff")
patch_file_content = await browser.page.content()
if patch_file_content.startswith("<html>"):
soup = BeautifulSoup(patch_file_content, "html.parser")
pre = soup.find("pre")
if pre:
patch_file_content = pre.text
else:
async with aiofiles.open(patch_path, encoding="utf-8") as f:
patch_file_content = await f.read()
await EditorReporter().async_report(patch_path)
if not patch_path.endswith((".diff", ".patch")):
name = Path(patch_path).name
patch_file_content = "".join(
difflib.unified_diff([], patch_file_content.splitlines(keepends=True), "/dev/null", f"b/{name}"),
)
patch_file_content = f"diff --git a/{name} b/{name}\n{patch_file_content}"
patch: PatchSet = PatchSet(patch_file_content)
return patch
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/deployer.py | metagpt/tools/libs/deployer.py | from metagpt.tools.tool_registry import register_tool
# An un-implemented tool reserved for deploying a local service to public
@register_tool(
include_functions=[
"deploy_to_public",
]
)
class Deployer:
"""Deploy a local service to public. Used only for final deployment, you should NOT use it for development and testing."""
async def static_server(self, src_path: str) -> str:
"""This function will be implemented in the remote service."""
return "http://127.0.0.1:8000/index.html"
async def deploy_to_public(self, dist_dir: str):
"""
Deploy a web project to public.
Args:
dist_dir (str): The dist directory of the web project after run build.
>>>
deployer = Deployer("2048_game/dist")
"""
url = await self.static_server(dist_dir)
return "The Project is deployed to: " + url + "\n Deployment successed!"
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/email_login.py | metagpt/tools/libs/email_login.py | from imap_tools import MailBox
from metagpt.tools.tool_registry import register_tool
# Define a dictionary mapping email domains to their IMAP server addresses
IMAP_SERVERS = {
"outlook.com": "imap-mail.outlook.com", # Outlook
"163.com": "imap.163.com", # 163 Mail
"qq.com": "imap.qq.com", # QQ Mail
"gmail.com": "imap.gmail.com", # Gmail
"yahoo.com": "imap.mail.yahoo.com", # Yahoo Mail
"icloud.com": "imap.mail.me.com", # iCloud Mail
"hotmail.com": "imap-mail.outlook.com", # Hotmail (同 Outlook)
"live.com": "imap-mail.outlook.com", # Live (同 Outlook)
"sina.com": "imap.sina.com", # Sina Mail
"sohu.com": "imap.sohu.com", # Sohu Mail
"yahoo.co.jp": "imap.mail.yahoo.co.jp", # Yahoo Mail Japan
"yandex.com": "imap.yandex.com", # Yandex Mail
"mail.ru": "imap.mail.ru", # Mail.ru
"aol.com": "imap.aol.com", # AOL Mail
"gmx.com": "imap.gmx.com", # GMX Mail
"zoho.com": "imap.zoho.com", # Zoho Mail
}
@register_tool(tags=["email login"])
def email_login_imap(email_address, email_password):
"""
Use imap_tools package to log in to your email (the email that supports IMAP protocol) to verify and return the account object.
Args:
email_address (str): Email address that needs to be logged in and linked.
email_password (str): Password for the email address that needs to be logged in and linked.
Returns:
object: The imap_tools's MailBox object returned after successfully connecting to the mailbox through imap_tools, including various information about this account (email, etc.), or None if login fails.
"""
# Extract the domain from the email address
domain = email_address.split("@")[-1]
# Determine the correct IMAP server
imap_server = IMAP_SERVERS.get(domain)
assert imap_server, f"IMAP server for {domain} not found."
# Attempt to log in to the email account
mailbox = MailBox(imap_server).login(email_address, email_password)
return mailbox
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/image_getter.py | metagpt/tools/libs/image_getter.py | from __future__ import annotations
from typing import Optional
from playwright.async_api import Browser as Browser_
from playwright.async_api import BrowserContext, Page, Playwright, async_playwright
from pydantic import BaseModel, ConfigDict, Field
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.common import decode_image
from metagpt.utils.proxy_env import get_proxy_from_env
from metagpt.utils.report import BrowserReporter
DOWNLOAD_PICTURE_JAVASCRIPT = """
async () => {{
var img = document.querySelector('{img_element_selector}');
if (img && img.src) {{
const response = await fetch(img.src);
if (response.ok) {{
const blob = await response.blob();
return await new Promise(resolve => {{
const reader = new FileReader();
reader.onloadend = () => resolve(reader.result);
reader.readAsDataURL(blob);
}});
}}
}}
return null;
}}
"""
@register_tool(include_functions=["get_image"])
class ImageGetter(BaseModel):
"""
A tool to get images.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
playwright: Optional[Playwright] = Field(default=None, exclude=True)
browser_instance: Optional[Browser_] = Field(default=None, exclude=True)
browser_ctx: Optional[BrowserContext] = Field(default=None, exclude=True)
page: Optional[Page] = Field(default=None, exclude=True)
headless: bool = Field(default=True)
proxy: Optional[dict] = Field(default_factory=get_proxy_from_env)
reporter: BrowserReporter = Field(default_factory=BrowserReporter)
url: str = "https://unsplash.com/s/photos/{search_term}/"
img_element_selector: str = ".zNNw1 > div > img:nth-of-type(2)"
async def start(self) -> None:
"""Starts Playwright and launches a browser"""
if self.playwright is None:
self.playwright = playwright = await async_playwright().start()
browser = self.browser_instance = await playwright.chromium.launch(headless=self.headless, proxy=self.proxy)
browser_ctx = self.browser_ctx = await browser.new_context()
self.page = await browser_ctx.new_page()
async def get_image(self, search_term, image_save_path):
"""
Get an image related to the search term.
Args:
search_term (str): The term to search for the image. The search term must be in English. Using any other language may lead to a mismatch.
image_save_path (str): The file path where the image will be saved.
"""
# Search for images from https://unsplash.com/s/photos/
if self.page is None:
await self.start()
await self.page.goto(self.url.format(search_term=search_term), wait_until="domcontentloaded")
# Wait until the image element is loaded
try:
await self.page.wait_for_selector(self.img_element_selector)
except TimeoutError:
return f"{search_term} not found. Please broaden the search term."
# Get the base64 code of the first retrieved image
image_base64 = await self.page.evaluate(
DOWNLOAD_PICTURE_JAVASCRIPT.format(img_element_selector=self.img_element_selector)
)
if image_base64:
image = decode_image(image_base64)
image.save(image_save_path)
return f"{search_term} found. The image is saved in {image_save_path}."
return f"{search_term} not found. Please broaden the search term."
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/browser.py | metagpt/tools/libs/browser.py | from __future__ import annotations
import time
from typing import Literal, Optional
from playwright.async_api import Browser as Browser_
from playwright.async_api import (
BrowserContext,
Frame,
Page,
Playwright,
Request,
async_playwright,
)
from pydantic import BaseModel, ConfigDict, Field
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.a11y_tree import (
click_element,
get_accessibility_tree,
get_backend_node_id,
hover_element,
key_press,
parse_accessibility_tree,
scroll_page,
type_text,
)
from metagpt.utils.proxy_env import get_proxy_from_env
from metagpt.utils.report import BrowserReporter
@register_tool(
tags=["web", "browse"],
include_functions=[
"click",
"close_tab",
"go_back",
"go_forward",
"goto",
"hover",
"press",
"scroll",
"tab_focus",
"type",
],
)
class Browser(BaseModel):
"""A tool for browsing the web. Don't initialize a new instance of this class if one already exists.
Note: If you plan to use the browser to assist you in completing tasks, then using the browser should be a standalone
task, executing actions each time based on the content seen on the webpage before proceeding to the next step.
## Example
Issue: The details of the latest issue in the geekan/MetaGPT repository.
Plan: Use a browser to view the details of the latest issue in the geekan/MetaGPT repository.
Solution:
Let's first open the issue page of the MetaGPT repository with the `Browser.goto` command
>>> await browser.goto("https://github.com/geekan/MetaGPT/issues")
From the output webpage, we've identified that the latest issue can be accessed by clicking on the element with ID "1141".
>>> await browser.click(1141)
Finally, we have found the webpage for the latest issue, we can close the tab and finish current task.
>>> await browser.close_tab()
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
playwright: Optional[Playwright] = Field(default=None, exclude=True)
browser_instance: Optional[Browser_] = Field(default=None, exclude=True)
browser_ctx: Optional[BrowserContext] = Field(default=None, exclude=True)
page: Optional[Page] = Field(default=None, exclude=True)
accessibility_tree: list = Field(default_factory=list)
headless: bool = Field(default=True)
proxy: Optional[dict] = Field(default_factory=get_proxy_from_env)
is_empty_page: bool = Field(default=True)
reporter: BrowserReporter = Field(default_factory=BrowserReporter)
async def start(self) -> None:
"""Starts Playwright and launches a browser"""
if self.playwright is None:
self.playwright = playwright = await async_playwright().start()
browser = self.browser_instance = await playwright.chromium.launch(headless=self.headless, proxy=self.proxy)
browser_ctx = self.browser_ctx = await browser.new_context()
self.page = await browser_ctx.new_page()
async def stop(self):
if self.playwright:
playwright = self.playwright
self.playwright = None
self.browser_instance = None
self.browser_ctx = None
await playwright.stop()
async def click(self, element_id: int):
"""clicks on an element with a specific id on the webpage."""
await click_element(self.page, get_backend_node_id(element_id, self.accessibility_tree))
return await self._wait_page()
async def type(self, element_id: int, content: str, press_enter_after: bool = False):
"""Use this to type the content into the field with id."""
if press_enter_after:
content += "\n"
await click_element(self.page, get_backend_node_id(element_id, self.accessibility_tree))
await type_text(self.page, content)
return await self._wait_page()
async def hover(self, element_id: int):
"""Hover over an element with id."""
await hover_element(self.page, get_backend_node_id(element_id, self.accessibility_tree))
return await self._wait_page()
async def press(self, key_comb: str):
"""Simulates the pressing of a key combination on the keyboard (e.g., Ctrl+v)."""
await key_press(self.page, key_comb)
return await self._wait_page()
async def scroll(self, direction: Literal["down", "up"]):
"""Scroll the page up or down."""
await scroll_page(self.page, direction)
return await self._wait_page()
async def goto(self, url: str, timeout: float = 90000):
"""Navigate to a specific URL."""
if self.page is None:
await self.start()
async with self.reporter as reporter:
await reporter.async_report(url, "url")
await self.page.goto(url, timeout=timeout)
self.is_empty_page = False
return await self._wait_page()
async def go_back(self):
"""Navigate to the previously viewed page."""
await self.page.go_back()
return await self._wait_page()
async def go_forward(self):
"""Navigate to the next page (if a previous 'go_back' action was performed)."""
await self.page.go_forward()
return await self._wait_page()
async def tab_focus(self, page_number: int):
"""Open a new, empty browser tab."""
page = self.browser_ctx.pages[page_number]
await page.bring_to_front()
return await self._wait_page()
async def close_tab(self):
"""Close the currently active tab."""
await self.page.close()
if len(self.browser_ctx.pages) > 0:
self.page = self.browser_ctx.pages[-1]
else:
self.page = await self.browser_ctx.new_page()
self.is_empty_page = True
return await self._wait_page()
async def _wait_page(self):
page = self.page
await self._wait_until_page_idle(page)
self.accessibility_tree = await get_accessibility_tree(page)
await self.reporter.async_report(page, "page")
return f"SUCCESS, URL: {page.url} have been loaded."
def _register_page_event(self, page: Page):
page.last_busy_time = time.time()
page.requests = set()
page.on("domcontentloaded", self._update_page_last_busy_time)
page.on("load", self._update_page_last_busy_time)
page.on("request", self._on_page_request)
page.on("requestfailed", self._on_page_requestfinished)
page.on("requestfinished", self._on_page_requestfinished)
page.on("frameattached", self._on_frame_change)
page.on("framenavigated", self._on_frame_change)
async def _wait_until_page_idle(self, page) -> None:
if not hasattr(page, "last_busy_time"):
self._register_page_event(page)
else:
page.last_busy_time = time.time()
while time.time() - page.last_busy_time < 0.5:
await page.wait_for_timeout(100)
async def _update_page_last_busy_time(self, page: Page):
page.last_busy_time = time.time()
async def _on_page_request(self, request: Request):
page = request.frame.page
page.requests.add(request)
await self._update_page_last_busy_time(page)
async def _on_page_requestfinished(self, request: Request):
request.frame.page.requests.discard(request)
async def _on_frame_change(self, frame: Frame):
await self._update_page_last_busy_time(frame.page)
async def view(self):
observation = parse_accessibility_tree(self.accessibility_tree)
return f"Current Browser Viewer\n URL: {self.page.url}\nOBSERVATION:\n{observation[0]}\n"
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, *args, **kwargs):
await self.stop()
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/editor.py | metagpt/tools/libs/editor.py | """
This file is borrowed from OpenDevin
You can find the original repository here:
https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/plugins/agent_skills/file_ops/file_ops.py
"""
import os
import re
import shutil
import tempfile
from pathlib import Path
from typing import List, Optional, Union
import tiktoken
from pydantic import BaseModel, ConfigDict
from metagpt.const import DEFAULT_MIN_TOKEN_COUNT, DEFAULT_WORKSPACE_ROOT
from metagpt.tools.libs.linter import Linter
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.common import awrite
from metagpt.utils.file import File
from metagpt.utils.report import EditorReporter
# This is also used in unit tests!
LINTER_ERROR_MSG = "[Your proposed edit has introduced new syntax error(s). Please understand the errors and retry your edit command.]\n"
INDENTATION_INFO = """
The previous line is:
"{pre_line}"
The indentation has {pre_line_indent} spaces.
The error line is:
"{insert_line}"
The indentation has {insert_line_indent} spaces.
Please check the indentation of the code to ensure that it is not causing any errors.
Try using indentation with either {sub_4_space} or {add_4_space} spaces.
"""
ERROR_GUIDANCE = """
{linter_error_msg}
[This is how your edit would have looked if applied]
-------------------------------------------------
{window_after_applied}
-------------------------------------------------
[This is the original code before your edit]
-------------------------------------------------
{window_before_applied}
-------------------------------------------------
Your changes have NOT been applied. Please fix your edit command and try again
{guidance_message}
"""
LINE_NUMBER_AND_CONTENT_MISMATCH = """Error: The `{position}_replaced_line_number` does not match the `{position}_replaced_line_content`. Please correct the parameters.
The `{position}_replaced_line_number` is {line_number} and the corresponding content is "{true_content}".
But the `{position}_replaced_line_content ` is "{fake_content}".
The content around the specified line is:
{context}
Pay attention to the new content. Ensure that it aligns with the new parameters.
"""
SUCCESS_EDIT_INFO = """
[File: {file_name} ({n_total_lines} lines total after edit)]
{window_after_applied}
[File updated (edited at line {line_number})].
"""
# Please review the changes and make sure they are correct (correct indentation, no duplicate lines, etc). Edit the file again if necessary.
class FileBlock(BaseModel):
"""A block of content in a file"""
file_path: str
block_content: str
class LineNumberError(Exception):
pass
@register_tool(
include_functions=[
"write",
"read",
"open_file",
"goto_line",
"scroll_down",
"scroll_up",
"create_file",
"edit_file_by_replace",
"insert_content_at_line",
"append_file",
"search_dir",
"search_file",
"find_file",
"similarity_search",
]
)
class Editor(BaseModel):
"""
A tool for reading, understanding, writing, and editing files.
Support local file including text-based files (txt, md, json, py, html, js, css, etc.), pdf, docx, excluding images, csv, excel, or online links
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
resource: EditorReporter = EditorReporter()
current_file: Optional[Path] = None
current_line: int = 1
window: int = 200
enable_auto_lint: bool = False
working_dir: Path = DEFAULT_WORKSPACE_ROOT
def write(self, path: str, content: str):
"""Write the whole content to a file. When used, make sure content arg contains the full content of the file."""
path = self._try_fix_path(path)
if "\n" not in content and "\\n" in content:
# A very raw rule to correct the content: If 'content' lacks actual newlines ('\n') but includes '\\n', consider
# replacing them with '\n' to potentially correct mistaken representations of newline characters.
content = content.replace("\\n", "\n")
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(path, "w", encoding="utf-8") as f:
f.write(content)
# self.resource.report(path, "path")
return f"The writing/coding the of the file {os.path.basename(path)}' is now completed. The file '{os.path.basename(path)}' has been successfully created."
async def read(self, path: str) -> FileBlock:
"""Read the whole content of a file. Using absolute paths as the argument for specifying the file location."""
path = self._try_fix_path(path)
error = FileBlock(
file_path=str(path),
block_content="The file is too large to read. Use `Editor.similarity_search` to read the file instead.",
)
path = Path(path)
if path.stat().st_size > 5 * DEFAULT_MIN_TOKEN_COUNT:
return error
content = await File.read_text_file(path)
if not content:
return FileBlock(file_path=str(path), block_content="")
if self.is_large_file(content=content):
return error
self.resource.report(str(path), "path")
lines = content.splitlines(keepends=True)
lines_with_num = [f"{i + 1:03}|{line}" for i, line in enumerate(lines)]
result = FileBlock(
file_path=str(path),
block_content="".join(lines_with_num),
)
return result
@staticmethod
def _is_valid_filename(file_name: str) -> bool:
if not file_name or not file_name.strip():
return False
invalid_chars = '<>:"/\\|?*'
if os.name == "nt": # Windows
invalid_chars = '<>:"/\\|?*'
elif os.name == "posix": # Unix-like systems
invalid_chars = "\0"
for char in invalid_chars:
if char in file_name:
return False
return True
@staticmethod
def _is_valid_path(path: Path) -> bool:
try:
return path.exists()
except PermissionError:
return False
@staticmethod
def _create_paths(file_path: Path) -> bool:
try:
if file_path.parent:
file_path.parent.mkdir(parents=True, exist_ok=True)
return True
except PermissionError:
return False
def _check_current_file(self, file_path: Optional[Path] = None) -> bool:
if file_path is None:
file_path = self.current_file
if not file_path or not file_path.is_file():
raise ValueError("No file open. Use the open_file function first.")
return True
@staticmethod
def _clamp(value, min_value, max_value):
return max(min_value, min(value, max_value))
def _lint_file(self, file_path: Path) -> tuple[Optional[str], Optional[int]]:
"""Lint the file at the given path and return a tuple with a boolean indicating if there are errors,
and the line number of the first error, if any.
Returns:
tuple[str | None, int | None]: (lint_error, first_error_line_number)
"""
linter = Linter(root=self.working_dir)
lint_error = linter.lint(str(file_path))
if not lint_error:
# Linting successful. No issues found.
return None, None
return "ERRORS:\n" + lint_error.text, lint_error.lines[0]
def _print_window(self, file_path: Path, targeted_line: int, window: int):
self._check_current_file(file_path)
with file_path.open() as file:
content = file.read()
# Ensure the content ends with a newline character
if not content.endswith("\n"):
content += "\n"
lines = content.splitlines(True) # Keep all line ending characters
total_lines = len(lines)
# cover edge cases
self.current_line = self._clamp(targeted_line, 1, total_lines)
half_window = max(1, window // 2)
# Ensure at least one line above and below the targeted line
start = max(1, self.current_line - half_window)
end = min(total_lines, self.current_line + half_window)
# Adjust start and end to ensure at least one line above and below
if start == 1:
end = min(total_lines, start + window - 1)
if end == total_lines:
start = max(1, end - window + 1)
output = ""
# only display this when there's at least one line above
if start > 1:
output += f"({start - 1} more lines above)\n"
else:
output += "(this is the beginning of the file)\n"
for i in range(start, end + 1):
_new_line = f"{i:03d}|{lines[i - 1]}"
if not _new_line.endswith("\n"):
_new_line += "\n"
output += _new_line
if end < total_lines:
output += f"({total_lines - end} more lines below)\n"
else:
output += "(this is the end of the file)\n"
output = output.rstrip()
return output
@staticmethod
def _cur_file_header(current_file: Path, total_lines: int) -> str:
if not current_file:
return ""
return f"[File: {current_file.resolve()} ({total_lines} lines total)]\n"
def _set_workdir(self, path: str) -> None:
"""
Sets the working directory to the given path. eg: repo directory.
You MUST to set it up before open the file.
Args:
path: str: The path to set as the working directory.
"""
self.working_dir = Path(path)
def open_file(
self, path: Union[Path, str], line_number: Optional[int] = 1, context_lines: Optional[int] = None
) -> str:
"""Opens the file at the given path in the editor. If line_number is provided, the window will be moved to include that line.
It only shows the first 100 lines by default! Max `context_lines` supported is 2000, use `scroll up/down`
to view the file if you want to see more.
Args:
path: str: The path to the file to open, preferred absolute path.
line_number: int | None = 1: The line number to move to. Defaults to 1.
context_lines: int | None = 100: Only shows this number of lines in the context window (usually from line 1), with line_number as the center (if possible). Defaults to 100.
"""
if context_lines is None:
context_lines = self.window
path = self._try_fix_path(path)
if not path.is_file():
raise FileNotFoundError(f"File {path} not found")
self.current_file = path
with path.open() as file:
total_lines = max(1, sum(1 for _ in file))
if not isinstance(line_number, int) or line_number < 1 or line_number > total_lines:
raise ValueError(f"Line number must be between 1 and {total_lines}")
self.current_line = line_number
# Override WINDOW with context_lines
if context_lines is None or context_lines < 1:
context_lines = self.window
output = self._cur_file_header(path, total_lines)
output += self._print_window(path, self.current_line, self._clamp(context_lines, 1, 2000))
self.resource.report(path, "path")
return output
def goto_line(self, line_number: int) -> str:
"""Moves the window to show the specified line number.
Args:
line_number: int: The line number to move to.
"""
self._check_current_file()
with self.current_file.open() as file:
total_lines = max(1, sum(1 for _ in file))
if not isinstance(line_number, int) or line_number < 1 or line_number > total_lines:
raise ValueError(f"Line number must be between 1 and {total_lines}")
self.current_line = self._clamp(line_number, 1, total_lines)
output = self._cur_file_header(self.current_file, total_lines)
output += self._print_window(self.current_file, self.current_line, self.window)
return output
def scroll_down(self) -> str:
"""Moves the window down by 100 lines."""
self._check_current_file()
with self.current_file.open() as file:
total_lines = max(1, sum(1 for _ in file))
self.current_line = self._clamp(self.current_line + self.window, 1, total_lines)
output = self._cur_file_header(self.current_file, total_lines)
output += self._print_window(self.current_file, self.current_line, self.window)
return output
def scroll_up(self) -> str:
"""Moves the window up by 100 lines."""
self._check_current_file()
with self.current_file.open() as file:
total_lines = max(1, sum(1 for _ in file))
self.current_line = self._clamp(self.current_line - self.window, 1, total_lines)
output = self._cur_file_header(self.current_file, total_lines)
output += self._print_window(self.current_file, self.current_line, self.window)
return output
async def create_file(self, filename: str) -> str:
"""Creates and opens a new file with the given name.
Args:
filename: str: The name of the file to create. If the parent directory does not exist, it will be created.
"""
filename = self._try_fix_path(filename)
if filename.exists():
raise FileExistsError(f"File '{filename}' already exists.")
await awrite(filename, "\n")
self.open_file(filename)
return f"[File {filename} created.]"
@staticmethod
def _append_impl(lines, content):
"""Internal method to handle appending to a file.
Args:
lines: list[str]: The lines in the original file.
content: str: The content to append to the file.
Returns:
content: str: The new content of the file.
n_added_lines: int: The number of lines added to the file.
"""
content_lines = content.splitlines(keepends=True)
n_added_lines = len(content_lines)
if lines and not (len(lines) == 1 and lines[0].strip() == ""):
# file is not empty
if not lines[-1].endswith("\n"):
lines[-1] += "\n"
new_lines = lines + content_lines
content = "".join(new_lines)
else:
# file is empty
content = "".join(content_lines)
return content, n_added_lines
@staticmethod
def _insert_impl(lines, start, content):
"""Internal method to handle inserting to a file.
Args:
lines: list[str]: The lines in the original file.
start: int: The start line number for inserting.
content: str: The content to insert to the file.
Returns:
content: str: The new content of the file.
n_added_lines: int: The number of lines added to the file.
Raises:
LineNumberError: If the start line number is invalid.
"""
inserted_lines = [content + "\n" if not content.endswith("\n") else content]
if len(lines) == 0:
new_lines = inserted_lines
elif start is not None:
if len(lines) == 1 and lines[0].strip() == "":
# if the file with only 1 line and that line is empty
lines = []
if len(lines) == 0:
new_lines = inserted_lines
else:
new_lines = lines[: start - 1] + inserted_lines + lines[start - 1 :]
else:
raise LineNumberError(
f"Invalid line number: {start}. Line numbers must be between 1 and {len(lines)} (inclusive)."
)
content = "".join(new_lines)
n_added_lines = len(inserted_lines)
return content, n_added_lines
@staticmethod
def _edit_impl(lines, start, end, content):
"""Internal method to handle editing a file.
REQUIRES (should be checked by caller):
start <= end
start and end are between 1 and len(lines) (inclusive)
content ends with a newline
Args:
lines: list[str]: The lines in the original file.
start: int: The start line number for editing.
end: int: The end line number for editing.
content: str: The content to replace the lines with.
Returns:
content: str: The new content of the file.
n_added_lines: int: The number of lines added to the file.
"""
# Handle cases where start or end are None
if start is None:
start = 1 # Default to the beginning
if end is None:
end = len(lines) # Default to the end
# Check arguments
if not (1 <= start <= len(lines)):
raise LineNumberError(
f"Invalid start line number: {start}. Line numbers must be between 1 and {len(lines)} (inclusive)."
)
if not (1 <= end <= len(lines)):
raise LineNumberError(
f"Invalid end line number: {end}. Line numbers must be between 1 and {len(lines)} (inclusive)."
)
if start > end:
raise LineNumberError(f"Invalid line range: {start}-{end}. Start must be less than or equal to end.")
# Split content into lines and ensure it ends with a newline
if not content.endswith("\n"):
content += "\n"
content_lines = content.splitlines(True)
# Calculate the number of lines to be added
n_added_lines = len(content_lines)
# Remove the specified range of lines and insert the new content
new_lines = lines[: start - 1] + content_lines + lines[end:]
# Handle the case where the original lines are empty
if len(lines) == 0:
new_lines = content_lines
# Join the lines to create the new content
content = "".join(new_lines)
return content, n_added_lines
def _get_indentation_info(self, content, first_line):
"""
The indentation of the first insert line and the previous line, along with guidance for the next attempt.
"""
content_lines = content.split("\n")
pre_line = content_lines[first_line - 2] if first_line - 2 >= 0 else ""
pre_line_indent = len(pre_line) - len(pre_line.lstrip())
insert_line = content_lines[first_line - 1]
insert_line_indent = len(insert_line) - len(insert_line.lstrip())
ret_str = INDENTATION_INFO.format(
pre_line=pre_line,
pre_line_indent=pre_line_indent,
insert_line=insert_line,
insert_line_indent=insert_line_indent,
sub_4_space=max(insert_line_indent - 4, 0),
add_4_space=insert_line_indent + 4,
)
return ret_str
def _edit_file_impl(
self,
file_name: Path,
start: Optional[int] = None,
end: Optional[int] = None,
content: str = "",
is_insert: bool = False,
is_append: bool = False,
) -> str:
"""Internal method to handle common logic for edit_/append_file methods.
Args:
file_name: Path: The name of the file to edit or append to.
start: int | None = None: The start line number for editing. Ignored if is_append is True.
end: int | None = None: The end line number for editing. Ignored if is_append is True.
content: str: The content to replace the lines with or to append.
is_insert: bool = False: Whether to insert content at the given line number instead of editing.
is_append: bool = False: Whether to append content to the file instead of editing.
"""
ERROR_MSG = f"[Error editing file {file_name}. Please confirm the file is correct.]"
ERROR_MSG_SUFFIX = (
"Your changes have NOT been applied. Please fix your edit command and try again.\n"
"You either need to 1) Open the correct file and try again or 2) Specify the correct line number arguments.\n"
"DO NOT re-run the same failed edit command. Running it again will lead to the same error."
)
if not self._is_valid_filename(file_name.name):
raise FileNotFoundError("Invalid file name.")
if not self._is_valid_path(file_name):
raise FileNotFoundError("Invalid path or file name.")
if not self._create_paths(file_name):
raise PermissionError("Could not access or create directories.")
if not file_name.is_file():
raise FileNotFoundError(f"File {file_name} not found.")
if is_insert and is_append:
raise ValueError("Cannot insert and append at the same time.")
# Use a temporary file to write changes
content = str(content or "")
temp_file_path = ""
src_abs_path = file_name.resolve()
first_error_line = None
# The file to store previous content and will be removed automatically.
temp_backup_file = tempfile.NamedTemporaryFile("w", delete=True)
try:
# lint the original file
# enable_auto_lint = os.getenv("ENABLE_AUTO_LINT", "false").lower() == "true"
if self.enable_auto_lint:
original_lint_error, _ = self._lint_file(file_name)
# Create a temporary file
with tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
temp_file_path = temp_file.name
# Read the original file and check if empty and for a trailing newline
with file_name.open() as original_file:
lines = original_file.readlines()
if is_append:
content, n_added_lines = self._append_impl(lines, content)
elif is_insert:
try:
content, n_added_lines = self._insert_impl(lines, start, content)
except LineNumberError as e:
return (f"{ERROR_MSG}\n" f"{e}\n" f"{ERROR_MSG_SUFFIX}") + "\n"
else:
try:
content, n_added_lines = self._edit_impl(lines, start, end, content)
except LineNumberError as e:
return (f"{ERROR_MSG}\n" f"{e}\n" f"{ERROR_MSG_SUFFIX}") + "\n"
if not content.endswith("\n"):
content += "\n"
# Write the new content to the temporary file
temp_file.write(content)
# Replace the original file with the temporary file atomically
shutil.move(temp_file_path, src_abs_path)
# Handle linting
# NOTE: we need to get env var inside this function
# because the env var will be set AFTER the agentskills is imported
if self.enable_auto_lint:
# BACKUP the original file
temp_backup_file.writelines(lines)
temp_backup_file.flush()
lint_error, first_error_line = self._lint_file(file_name)
# Select the errors caused by the modification
def extract_last_part(line):
parts = line.split(":")
if len(parts) > 1:
return parts[-1].strip()
return line.strip()
def subtract_strings(str1, str2) -> str:
lines1 = str1.splitlines()
lines2 = str2.splitlines()
last_parts1 = [extract_last_part(line) for line in lines1]
remaining_lines = [line for line in lines2 if extract_last_part(line) not in last_parts1]
result = "\n".join(remaining_lines)
return result
if original_lint_error and lint_error:
lint_error = subtract_strings(original_lint_error, lint_error)
if lint_error == "":
lint_error = None
first_error_line = None
if lint_error is not None:
# if first_error_line is not None:
# show_line = int(first_error_line)
# show the first insert line.
if is_append:
# original end-of-file
show_line = len(lines)
# insert OR edit WILL provide meaningful line numbers
elif start is not None and end is not None:
show_line = int((start + end) / 2)
else:
raise ValueError("Invalid state. This should never happen.")
guidance_message = self._get_indentation_info(content, start or len(lines))
guidance_message += (
"You either need to 1) Specify the correct start/end line arguments or 2) Correct your edit code.\n"
"DO NOT re-run the same failed edit command. Running it again will lead to the same error."
)
lint_error_info = ERROR_GUIDANCE.format(
linter_error_msg=LINTER_ERROR_MSG + lint_error,
window_after_applied=self._print_window(file_name, show_line, n_added_lines + 20),
window_before_applied=self._print_window(
Path(temp_backup_file.name), show_line, n_added_lines + 20
),
guidance_message=guidance_message,
).strip()
# recover the original file
shutil.move(temp_backup_file.name, src_abs_path)
return lint_error_info
except FileNotFoundError as e:
return f"File not found: {e}\n"
except IOError as e:
return f"An error occurred while handling the file: {e}\n"
except ValueError as e:
return f"Invalid input: {e}\n"
except Exception as e:
guidance_message = self._get_indentation_info(content, start or len(lines))
guidance_message += (
"You either need to 1) Specify the correct start/end line arguments or 2) Enlarge the range of original code.\n"
"DO NOT re-run the same failed edit command. Running it again will lead to the same error."
)
error_info = ERROR_GUIDANCE.format(
linter_error_msg=LINTER_ERROR_MSG + str(e),
window_after_applied=self._print_window(file_name, start or len(lines), 100),
window_before_applied=self._print_window(Path(temp_backup_file.name), start or len(lines), 100),
guidance_message=guidance_message,
).strip()
# Clean up the temporary file if an error occurs
shutil.move(temp_backup_file.name, src_abs_path)
if temp_file_path and Path(temp_file_path).exists():
Path(temp_file_path).unlink()
# logger.warning(f"An unexpected error occurred: {e}")
raise Exception(f"{error_info}") from e
# Update the file information and print the updated content
with file_name.open("r", encoding="utf-8") as file:
n_total_lines = max(1, len(file.readlines()))
if first_error_line is not None and int(first_error_line) > 0:
self.current_line = first_error_line
else:
if is_append:
self.current_line = max(1, len(lines)) # end of original file
else:
self.current_line = start or n_total_lines or 1
success_edit_info = SUCCESS_EDIT_INFO.format(
file_name=file_name.resolve(),
n_total_lines=n_total_lines,
window_after_applied=self._print_window(file_name, self.current_line, self.window),
line_number=self.current_line,
).strip()
return success_edit_info
def edit_file_by_replace(
self,
file_name: str,
first_replaced_line_number: int,
first_replaced_line_content: str,
last_replaced_line_number: int,
last_replaced_line_content: str,
new_content: str,
) -> str:
"""
Line numbers start from 1. Replace lines from start_line to end_line (inclusive) with the new_content in the open file.
All of the new_content will be entered, so makesure your indentation is formatted properly.
The new_content must be a complete block of code.
Example 1:
Given a file "/workspace/example.txt" with the following content:
```
001|contain f
002|contain g
003|contain h
004|contain i
```
EDITING: If you want to replace line 2 and line 3
edit_file_by_replace(
"/workspace/example.txt",
first_replaced_line_number=2,
first_replaced_line_content="contain g",
last_replaced_line_number=3,
last_replaced_line_content="contain h",
new_content="new content",
)
This will replace the second line 2 and line 3 with "new content".
The resulting file will be:
```
001|contain f
002|new content
003|contain i
```
Example 2:
Given a file "/workspace/example.txt" with the following content:
```
001|contain f
002|contain g
003|contain h
004|contain i
```
EDITING: If you want to remove the line 2 and line 3.
edit_file_by_replace(
"/workspace/example.txt",
first_replaced_line_number=2,
first_replaced_line_content="contain g",
last_replaced_line_number=3,
last_replaced_line_content="contain h",
new_content="",
)
This will remove line 2 and line 3.
The resulting file will be:
```
001|contain f
002|
003|contain i
```
Args:
file_name (str): The name of the file to edit.
first_replaced_line_number (int): The line number to start the edit at, starting from 1.
first_replaced_line_content (str): The content of the start replace line, according to the first_replaced_line_number.
last_replaced_line_number (int): The line number to end the edit at (inclusive), starting from 1.
last_replaced_line_content (str): The content of the end replace line, according to the last_replaced_line_number.
new_content (str): The text to replace the current selection with, must conform to PEP8 standards. The content in the start line and end line will also be replaced.
"""
file_name = self._try_fix_path(file_name)
# Check if the first_replaced_line_number and last_replaced_line_number correspond to the appropriate content.
mismatch_error = ""
with file_name.open() as file:
content = file.read()
# Ensure the content ends with a newline character
if not content.endswith("\n"):
content += "\n"
lines = content.splitlines(True)
total_lines = len(lines)
check_list = [
("first", first_replaced_line_number, first_replaced_line_content),
("last", last_replaced_line_number, last_replaced_line_content),
]
for position, line_number, line_content in check_list:
if line_number > len(lines) or lines[line_number - 1].rstrip() != line_content:
start = max(1, line_number - 3)
end = min(total_lines, line_number + 3)
context = "\n".join(
[
f'The {cur_line_number:03d} line is "{lines[cur_line_number-1].rstrip()}"'
for cur_line_number in range(start, end + 1)
]
)
mismatch_error += LINE_NUMBER_AND_CONTENT_MISMATCH.format(
position=position,
line_number=line_number,
true_content=lines[line_number - 1].rstrip()
if line_number - 1 < len(lines)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | true |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/__init__.py | metagpt/tools/libs/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/11/16 16:32
# @Author : lidanyang
# @File : __init__.py
# @Desc :
from metagpt.tools.libs import (
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
web_scraping,
# email_login,
terminal,
editor,
browser,
deployer,
git,
)
from metagpt.tools.libs.env import get_env, set_get_env_entry, default_get_env, get_env_description, get_env_default
_ = (
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
web_scraping,
# email_login,
terminal,
editor,
browser,
deployer,
git,
get_env,
get_env_default,
get_env_description,
set_get_env_entry,
default_get_env,
) # Avoid pre-commit error
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/feature_engineering.py | metagpt/tools/libs/feature_engineering.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/11/17 10:33
# @Author : lidanyang
# @File : feature_engineering.py
# @Desc : Feature Engineering Tools
from __future__ import annotations
import itertools
# import lightgbm as lgb
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from pandas.core.dtypes.common import is_object_dtype
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import KFold
from sklearn.preprocessing import KBinsDiscretizer, PolynomialFeatures
from metagpt.tools.libs.data_preprocess import MLProcess
from metagpt.tools.tool_registry import register_tool
TAGS = ["feature engineering", "machine learning"]
@register_tool(tags=TAGS)
class PolynomialExpansion(MLProcess):
"""
Add polynomial and interaction features from selected numeric columns to input DataFrame.
"""
def __init__(self, cols: list, label_col: str, degree: int = 2):
"""
Initialize self.
Args:
cols (list): Columns for polynomial expansion.
label_col (str): Label column name.
degree (int, optional): The degree of the polynomial features. Defaults to 2.
"""
self.cols = cols
self.degree = degree
self.label_col = label_col
if self.label_col in self.cols:
self.cols.remove(self.label_col)
self.poly = PolynomialFeatures(degree=degree, include_bias=False)
def fit(self, df: pd.DataFrame):
if len(self.cols) == 0:
return
if len(self.cols) > 10:
corr = df[self.cols + [self.label_col]].corr()
corr = corr[self.label_col].abs().sort_values(ascending=False)
self.cols = corr.index.tolist()[1:11]
self.poly.fit(df[self.cols].fillna(0))
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
if len(self.cols) == 0:
return df
ts_data = self.poly.transform(df[self.cols].fillna(0))
column_name = self.poly.get_feature_names_out(self.cols)
ts_data = pd.DataFrame(ts_data, index=df.index, columns=column_name)
new_df = df.drop(self.cols, axis=1)
new_df = pd.concat([new_df, ts_data], axis=1)
return new_df
@register_tool(tags=TAGS)
class CatCount(MLProcess):
"""
Add value counts of a categorical column as new feature.
"""
def __init__(self, col: str):
"""
Initialize self.
Args:
col (str): Column for value counts.
"""
self.col = col
self.encoder_dict = None
def fit(self, df: pd.DataFrame):
self.encoder_dict = df[self.col].value_counts().to_dict()
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.copy()
new_df[f"{self.col}_cnt"] = new_df[self.col].map(self.encoder_dict)
return new_df
@register_tool(tags=TAGS)
class TargetMeanEncoder(MLProcess):
"""
Encode a categorical column by the mean of the label column, and adds the result as a new feature.
"""
def __init__(self, col: str, label: str):
"""
Initialize self.
Args:
col (str): Column to be mean encoded.
label (str): Predicted label column.
"""
self.col = col
self.label = label
self.encoder_dict = None
def fit(self, df: pd.DataFrame):
self.encoder_dict = df.groupby(self.col)[self.label].mean().to_dict()
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.copy()
new_df[f"{self.col}_target_mean"] = new_df[self.col].map(self.encoder_dict)
return new_df
@register_tool(tags=TAGS)
class KFoldTargetMeanEncoder(MLProcess):
"""
Add a new feature to the DataFrame by k-fold mean encoding of a categorical column using the label column.
"""
def __init__(self, col: str, label: str, n_splits: int = 5, random_state: int = 2021):
"""
Initialize self.
Args:
col (str): Column to be k-fold mean encoded.
label (str): Predicted label column.
n_splits (int, optional): Number of splits for K-fold. Defaults to 5.
random_state (int, optional): Random seed. Defaults to 2021.
"""
self.col = col
self.label = label
self.n_splits = n_splits
self.random_state = random_state
self.encoder_dict = None
def fit(self, df: pd.DataFrame):
tmp = df.copy()
kf = KFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state)
global_mean = tmp[self.label].mean()
col_name = f"{self.col}_kf_target_mean"
for trn_idx, val_idx in kf.split(tmp, tmp[self.label]):
_trn, _val = tmp.iloc[trn_idx], tmp.iloc[val_idx]
tmp.loc[tmp.index[val_idx], col_name] = _val[self.col].map(_trn.groupby(self.col)[self.label].mean())
tmp[col_name].fillna(global_mean, inplace=True)
self.encoder_dict = tmp.groupby(self.col)[col_name].mean().to_dict()
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.copy()
new_df[f"{self.col}_kf_target_mean"] = new_df[self.col].map(self.encoder_dict)
return new_df
@register_tool(tags=TAGS)
class CatCross(MLProcess):
"""
Add pairwise crossed features and convert them to numerical features.
"""
def __init__(self, cols: list, max_cat_num: int = 100):
"""
Initialize self.
Args:
cols (list): Columns to be pairwise crossed, at least 2 columns.
max_cat_num (int, optional): Maximum unique categories per crossed feature. Defaults to 100.
"""
self.cols = cols
self.max_cat_num = max_cat_num
self.combs = []
self.combs_map = {}
@staticmethod
def _cross_two(comb, df):
"""
Cross two columns and convert them to numerical features.
Args:
comb (tuple): The pair of columns to be crossed.
df (pd.DataFrame): The input DataFrame.
Returns:
tuple: The new column name and the crossed feature map.
"""
new_col = f"{comb[0]}_{comb[1]}"
new_col_combs = list(itertools.product(df[comb[0]].unique(), df[comb[1]].unique()))
ll = list(range(len(new_col_combs)))
comb_map = dict(zip(new_col_combs, ll))
return new_col, comb_map
def fit(self, df: pd.DataFrame):
for col in self.cols:
if df[col].nunique() > self.max_cat_num:
self.cols.remove(col)
self.combs = list(itertools.combinations(self.cols, 2))
res = Parallel(n_jobs=4, require="sharedmem")(delayed(self._cross_two)(comb, df) for comb in self.combs)
self.combs_map = dict(res)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.copy()
for comb in self.combs:
new_col = f"{comb[0]}_{comb[1]}"
_map = self.combs_map[new_col]
new_df[new_col] = pd.Series(zip(new_df[comb[0]], new_df[comb[1]])).map(_map)
# set the unknown value to a new number
new_df[new_col].fillna(max(_map.values()) + 1, inplace=True)
new_df[new_col] = new_df[new_col].astype(int)
return new_df
@register_tool(tags=TAGS)
class GroupStat(MLProcess):
"""
Aggregate specified column in a DataFrame grouped by another column, adding new features named '<agg_col>_<agg_func>_by_<group_col>'.
"""
def __init__(self, group_col: str, agg_col: str, agg_funcs: list):
"""
Initialize self.
Args:
group_col (str): Column used for grouping.
agg_col (str): Column on which aggregation is performed.
agg_funcs (list): List of aggregation functions to apply, such as ['mean', 'std']. Each function must be supported by pandas.
"""
self.group_col = group_col
self.agg_col = agg_col
self.agg_funcs = agg_funcs
self.group_df = None
def fit(self, df: pd.DataFrame):
group_df = df.groupby(self.group_col)[self.agg_col].agg(self.agg_funcs).reset_index()
group_df.columns = [self.group_col] + [
f"{self.agg_col}_{agg_func}_by_{self.group_col}" for agg_func in self.agg_funcs
]
self.group_df = group_df
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.merge(self.group_df, on=self.group_col, how="left")
return new_df
@register_tool(tags=TAGS)
class SplitBins(MLProcess):
"""
Inplace binning of continuous data into intervals, returning integer-encoded bin identifiers directly.
"""
def __init__(self, cols: list, strategy: str = "quantile"):
"""
Initialize self.
Args:
cols (list): Columns to be binned inplace.
strategy (str, optional): Strategy used to define the widths of the bins. Enum: ['quantile', 'uniform', 'kmeans']. Defaults to 'quantile'.
"""
self.cols = cols
self.strategy = strategy
self.encoder = None
def fit(self, df: pd.DataFrame):
self.encoder = KBinsDiscretizer(strategy=self.strategy, encode="ordinal")
self.encoder.fit(df[self.cols].fillna(0))
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df.copy()
new_df[self.cols] = self.encoder.transform(new_df[self.cols].fillna(0))
return new_df
# @register_tool(tags=TAGS)
class ExtractTimeComps(MLProcess):
"""
Extract time components from a datetime column and add them as new features.
"""
def __init__(self, time_col: str, time_comps: list):
"""
Initialize self.
Args:
time_col (str): The name of the column containing time data.
time_comps (list): List of time components to extract. Each component must be in ['year', 'month', 'day', 'hour', 'dayofweek', 'is_weekend'].
"""
self.time_col = time_col
self.time_comps = time_comps
def fit(self, df: pd.DataFrame):
pass
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
time_s = pd.to_datetime(df[self.time_col], errors="coerce")
time_comps_df = pd.DataFrame()
if "year" in self.time_comps:
time_comps_df["year"] = time_s.dt.year
if "month" in self.time_comps:
time_comps_df["month"] = time_s.dt.month
if "day" in self.time_comps:
time_comps_df["day"] = time_s.dt.day
if "hour" in self.time_comps:
time_comps_df["hour"] = time_s.dt.hour
if "dayofweek" in self.time_comps:
time_comps_df["dayofweek"] = time_s.dt.dayofweek + 1
if "is_weekend" in self.time_comps:
time_comps_df["is_weekend"] = time_s.dt.dayofweek.isin([5, 6]).astype(int)
new_df = pd.concat([df, time_comps_df], axis=1)
return new_df
@register_tool(tags=TAGS)
class GeneralSelection(MLProcess):
"""
Drop all nan feats and feats with only one unique value.
"""
def __init__(self, label_col: str):
self.label_col = label_col
self.feats = []
def fit(self, df: pd.DataFrame):
feats = [f for f in df.columns if f != self.label_col]
for col in df.columns:
if df[col].isnull().sum() / df.shape[0] == 1:
feats.remove(col)
if df[col].nunique() == 1:
feats.remove(col)
if df.loc[df[col] == np.inf].shape[0] != 0 or df.loc[df[col] == np.inf].shape[0] != 0:
feats.remove(col)
if is_object_dtype(df[col]) and df[col].nunique() == df.shape[0]:
feats.remove(col)
self.feats = feats
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df[self.feats + [self.label_col]]
return new_df
# skip for now because lgb is needed
# @register_tool(tags=TAGS)
class TreeBasedSelection(MLProcess):
"""
Select features based on tree-based model and remove features with low importance.
"""
def __init__(self, label_col: str, task_type: str):
"""
Initialize self.
Args:
label_col (str): Label column name.
task_type (str): Task type, 'cls' for classification, 'mcls' for multi-class classification, 'reg' for regression.
"""
self.label_col = label_col
self.task_type = task_type
self.feats = None
def fit(self, df: pd.DataFrame):
params = {
"boosting_type": "gbdt",
"objective": "binary",
"learning_rate": 0.1,
"num_leaves": 31,
}
if self.task_type == "cls":
params["objective"] = "binary"
params["metric"] = "auc"
elif self.task_type == "mcls":
params["objective"] = "multiclass"
params["num_class"] = df[self.label_col].nunique()
params["metric"] = "auc_mu"
elif self.task_type == "reg":
params["objective"] = "regression"
params["metric"] = "rmse"
num_cols = df.select_dtypes(include=np.number).columns.tolist()
cols = [f for f in num_cols if f not in [self.label_col]]
dtrain = lgb.Dataset(df[cols], df[self.label_col])
model = lgb.train(params, dtrain, num_boost_round=100)
df_imp = pd.DataFrame({"feature_name": dtrain.feature_name, "importance": model.feature_importance("gain")})
df_imp.sort_values("importance", ascending=False, inplace=True)
df_imp = df_imp[df_imp["importance"] > 0]
self.feats = df_imp["feature_name"].tolist()
self.feats.append(self.label_col)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df[self.feats]
return new_df
@register_tool(tags=TAGS)
class VarianceBasedSelection(MLProcess):
"""
Select features based on variance and remove features with low variance.
"""
def __init__(self, label_col: str, threshold: float = 0):
"""
Initialize self.
Args:
label_col (str): Label column name.
threshold (float, optional): Threshold for variance. Defaults to 0.
"""
self.label_col = label_col
self.threshold = threshold
self.feats = None
self.selector = VarianceThreshold(threshold=self.threshold)
def fit(self, df: pd.DataFrame):
num_cols = df.select_dtypes(include=np.number).columns.tolist()
cols = [f for f in num_cols if f not in [self.label_col]]
self.selector.fit(df[cols])
self.feats = df[cols].columns[self.selector.get_support(indices=True)].tolist()
self.feats.append(self.label_col)
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
new_df = df[self.feats]
return new_df
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/software_development.py | metagpt/tools/libs/software_development.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import uuid
from datetime import datetime
from pathlib import Path
from typing import Optional
from metagpt.actions.requirement_analysis.framework import (
EvaluateFramework,
WriteFramework,
save_framework,
)
from metagpt.actions.requirement_analysis.trd import (
CompressExternalInterfaces,
DetectInteraction,
EvaluateTRD,
WriteTRD,
)
from metagpt.const import ASSISTANT_ALIAS, TEST_DATA_PATH
from metagpt.context import Context
from metagpt.logs import ToolLogItem, log_tool_output, logger
from metagpt.utils.common import aread
from metagpt.utils.cost_manager import CostManager
async def import_git_repo(url: str) -> Path:
"""
Imports a project from a Git website and formats it to MetaGPT project format to enable incremental appending requirements.
Args:
url (str): The Git project URL, such as "https://github.com/geekan/MetaGPT.git".
Returns:
Path: The path of the formatted project.
Example:
# The Git project URL to input
>>> git_url = "https://github.com/geekan/MetaGPT.git"
# Import the Git repository and get the formatted project path
>>> formatted_project_path = await import_git_repo(git_url)
>>> print("Formatted project path:", formatted_project_path)
/PATH/TO/THE/FORMMATTED/PROJECT
"""
from metagpt.actions.import_repo import ImportRepo
from metagpt.context import Context
log_tool_output(
output=[ToolLogItem(name=ASSISTANT_ALIAS, value=import_git_repo.__name__)], tool_name=import_git_repo.__name__
)
ctx = Context()
action = ImportRepo(repo_path=url, context=ctx)
await action.run()
outputs = [ToolLogItem(name="MetaGPT Project", value=str(ctx.repo.workdir))]
log_tool_output(output=outputs, tool_name=import_git_repo.__name__)
return ctx.repo.workdir
async def extract_external_interfaces(acknowledge: str) -> str:
"""
Extracts and compresses information about external system interfaces from a given acknowledgement text.
Args:
acknowledge (str): A natural text of acknowledgement containing details about external system interfaces.
Returns:
str: A compressed version of the information about external system interfaces.
Example:
>>> acknowledge = "## Interfaces\\n..."
>>> external_interfaces = await extract_external_interfaces(acknowledge=acknowledge)
>>> print(external_interfaces)
```json\n[\n{\n"id": 1,\n"inputs": {...
"""
compress_acknowledge = CompressExternalInterfaces()
return await compress_acknowledge.run(acknowledge=acknowledge)
async def mock_asearch_acknowledgement(use_case_actors: str):
return await aread(filename=TEST_DATA_PATH / "requirements/1.acknowledge.md")
async def write_trd(
use_case_actors: str,
user_requirements: str,
investment: float = 10,
context: Optional[Context] = None,
) -> str:
"""
Handles the writing of a Technical Requirements Document (TRD) based on user requirements.
Args:
user_requirements (str): The new/incremental user requirements.
use_case_actors (str): Description of the actors involved in the use case.
investment (float): Budget. Automatically stops optimizing TRD when the budget is overdrawn.
context (Context, optional): The context configuration. Default is None.
Returns:
str: The newly created TRD.
Example:
>>> # Given a new user requirements, write out a new TRD.
>>> user_requirements = "Write a 'snake game' TRD."
>>> use_case_actors = "- Actor: game player;\\n- System: snake game; \\n- External System: game center;"
>>> investment = 10.0
>>> trd = await write_trd(
>>> user_requirements=user_requirements,
>>> use_case_actors=use_case_actors,
>>> investment=investment,
>>> )
>>> print(trd)
## Technical Requirements Document\n ...
"""
context = context or Context(cost_manager=CostManager(max_budget=investment))
compress_acknowledge = CompressExternalInterfaces()
acknowledgement = await mock_asearch_acknowledgement(use_case_actors) # Replaced by acknowledgement_repo later.
external_interfaces = await compress_acknowledge.run(acknowledge=acknowledgement)
detect_interaction = DetectInteraction(context=context)
w_trd = WriteTRD(context=context)
evaluate_trd = EvaluateTRD(context=context)
is_pass = False
evaluation_conclusion = ""
interaction_events = ""
trd = ""
while not is_pass and (context.cost_manager.total_cost < context.cost_manager.max_budget):
interaction_events = await detect_interaction.run(
user_requirements=user_requirements,
use_case_actors=use_case_actors,
legacy_interaction_events=interaction_events,
evaluation_conclusion=evaluation_conclusion,
)
trd = await w_trd.run(
user_requirements=user_requirements,
use_case_actors=use_case_actors,
available_external_interfaces=external_interfaces,
evaluation_conclusion=evaluation_conclusion,
interaction_events=interaction_events,
previous_version_trd=trd,
)
evaluation = await evaluate_trd.run(
user_requirements=user_requirements,
use_case_actors=use_case_actors,
trd=trd,
interaction_events=interaction_events,
)
is_pass = evaluation.is_pass
evaluation_conclusion = evaluation.conclusion
return trd
async def write_framework(
use_case_actors: str,
trd: str,
additional_technical_requirements: str,
output_dir: Optional[str] = "",
investment: float = 20.0,
context: Optional[Context] = None,
max_loop: int = 20,
) -> str:
"""
Run the action to generate a software framework based on the provided TRD and related information.
Args:
use_case_actors (str): Description of the use case actors involved.
trd (str): Technical Requirements Document detailing the requirements.
additional_technical_requirements (str): Any additional technical requirements.
output_dir (str, optional): Path to save the software framework files. Default is en empty string.
investment (float): Budget. Automatically stops optimizing TRD when the budget is overdrawn.
context (Context, optional): The context configuration. Default is None.
max_loop(int, optional): Acts as a safety exit valve when cost statistics fail. Default is 20.
Returns:
str: The generated software framework as a string of pathnames.
Example:
>>> use_case_actors = "- Actor: game player;\\n- System: snake game; \\n- External System: game center;"
>>> trd = "## TRD\\n..."
>>> additional_technical_requirements = "Using Java language, ..."
>>> investment = 15.0
>>> framework = await write_framework(
>>> use_case_actors=use_case_actors,
>>> trd=trd,
>>> additional_technical_requirements=constraint,
>>> investment=investment,
>>> )
>>> print(framework)
[{"path":"balabala", "filename":"...", ...
"""
context = context or Context(cost_manager=CostManager(max_budget=investment))
write_framework = WriteFramework(context=context)
evaluate_framework = EvaluateFramework(context=context)
is_pass = False
framework = ""
evaluation_conclusion = ""
acknowledgement = await mock_asearch_acknowledgement(use_case_actors) # Replaced by acknowledgement_repo later.
loop_count = 0
output_dir = (
Path(output_dir)
if output_dir
else context.config.workspace.path / (datetime.now().strftime("%Y%m%d%H%M%ST") + uuid.uuid4().hex[0:8])
)
file_list = []
while not is_pass and (context.cost_manager.total_cost < context.cost_manager.max_budget):
try:
framework = await write_framework.run(
use_case_actors=use_case_actors,
trd=trd,
acknowledge=acknowledgement,
legacy_output=framework,
evaluation_conclusion=evaluation_conclusion,
additional_technical_requirements=additional_technical_requirements,
)
except Exception as e:
logger.info(f"{e}")
break
evaluation = await evaluate_framework.run(
use_case_actors=use_case_actors,
trd=trd,
acknowledge=acknowledgement,
legacy_output=framework,
additional_technical_requirements=additional_technical_requirements,
)
is_pass = evaluation.is_pass
evaluation_conclusion = evaluation.conclusion
loop_count += 1
logger.info(f"Loop {loop_count}")
if context.cost_manager.total_cost < 1 and loop_count > max_loop:
break
file_list = await save_framework(dir_data=framework, trd=trd, output_dir=output_dir)
logger.info(f"Output:\n{file_list}")
return "## Software Framework" + "".join([f"\n- {i}" for i in file_list])
async def write_trd_and_framework(
use_case_actors: str,
user_requirements: str,
additional_technical_requirements: str,
investment: float = 50.0,
output_dir: Optional[str] = "",
context: Optional[Context] = None,
) -> str:
context = context or Context(cost_manager=CostManager(max_budget=investment))
trd = await write_trd(use_case_actors=use_case_actors, user_requirements=user_requirements, context=context)
return await write_framework(
use_case_actors=use_case_actors,
trd=trd,
additional_technical_requirements=additional_technical_requirements,
output_dir=output_dir,
context=context,
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/tools/libs/env.py | metagpt/tools/libs/env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/4/25
@Author : mashenquan
@File : env.py
@Desc: Implement `get_env`. RFC 216 2.4.2.4.2.
"""
import os
from typing import Dict, Optional
class EnvKeyNotFoundError(Exception):
def __init__(self, info):
super().__init__(info)
def to_app_key(key: str, app_name: str = None) -> str:
return f"{app_name}-{key}" if app_name else key
def split_app_key(app_key: str) -> (str, str):
if "-" not in app_key:
return "", app_key
app_name, key = app_key.split("-", 1)
return app_name, key
async def default_get_env(key: str, app_name: str = None) -> str:
app_key = to_app_key(key=key, app_name=app_name)
if app_key in os.environ:
return os.environ[app_key]
env_app_key = app_key.replace("-", "_") # "-" is not supported by linux environment variable
if env_app_key in os.environ:
return os.environ[env_app_key]
from metagpt.context import Context
context = Context()
val = context.kwargs.get(app_key, None)
if val is not None:
return val
raise EnvKeyNotFoundError(f"EnvKeyNotFoundError: {key}, app_name:{app_name or ''}")
async def default_get_env_description() -> Dict[str, str]:
result = {}
for k in os.environ.keys():
app_name, key = split_app_key(k)
call = f'await get_env(key="{key}", app_name="{app_name}")'
result[call] = f"Return the value of environment variable `{k}`."
from metagpt.context import Context
context = Context()
for k in context.kwargs.__dict__.keys():
app_name, key = split_app_key(k)
call = f'await get_env(key="{key}", app_name="{app_name}")'
result[call] = f"Get the value of environment variable `{k}`."
return result
_get_env_entry = default_get_env
_get_env_description_entry = default_get_env_description
async def get_env(key: str, app_name: str = None) -> str:
"""
Retrieve the value of the environment variable for the specified key.
Args:
key (str): The key of the environment variable.
app_name (str, optional): The name of the application. Defaults to None.
Returns:
str: The value corresponding to the given key in the environment variables.
If no value is found for the given key, an empty string is returned.
Example:
This function can be used to retrieve environment variables asynchronously.
It should be called using `await`.
>>> from metagpt.tools.libs.env import get_env
>>> api_key = await get_env("API_KEY")
>>> print(api_key)
<API_KEY>
>>> from metagpt.tools.libs.env import get_env
>>> api_key = await get_env(key="API_KEY", app_name="GITHUB")
>>> print(api_key)
<API_KEY>
Note:
This is an asynchronous function and must be called using `await`.
"""
global _get_env_entry
if _get_env_entry:
return await _get_env_entry(key=key, app_name=app_name)
return await default_get_env(key=key, app_name=app_name)
async def get_env_default(key: str, app_name: str = None, default_value: str = None) -> Optional[str]:
"""
Retrieves the value for the specified environment variable key. If the key is not found,
returns the default value.
Args:
key (str): The name of the environment variable to retrieve.
app_name (str, optional): The name of the application or component to associate with the environment variable.
default_value (str, optional): The default value to return if the environment variable is not found.
Returns:
str or None: The value of the environment variable if found, otherwise the default value.
Example:
>>> from metagpt.tools.libs.env import get_env
>>> api_key = await get_env_default(key="NOT_EXISTS_API_KEY", default_value="<API_KEY>")
>>> print(api_key)
<API_KEY>
>>> from metagpt.tools.libs.env import get_env
>>> api_key = await get_env_default(key="NOT_EXISTS_API_KEY", app_name="GITHUB", default_value="<API_KEY>")
>>> print(api_key)
<API_KEY>
"""
try:
return await get_env(key=key, app_name=app_name)
except EnvKeyNotFoundError:
return default_value
async def get_env_description() -> Dict[str, str]:
global _get_env_description_entry
if _get_env_description_entry:
return await _get_env_description_entry()
return await default_get_env_description()
def set_get_env_entry(value, description):
"""Modify `get_env` entry and `get_description` entry.
Args:
value (function): New function entry.
description (str): Description of the function.
This function modifies the `get_env` entry by updating the function
to the provided `value` and its description to the provided `description`.
"""
global _get_env_entry
global _get_env_description_entry
_get_env_entry = value
_get_env_description_entry = description
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/base/base_role.py | metagpt/base/base_role.py | from abc import abstractmethod
from typing import Optional, Union
from metagpt.base.base_serialization import BaseSerialization
class BaseRole(BaseSerialization):
"""Abstract base class for all roles."""
name: str
@property
def is_idle(self) -> bool:
raise NotImplementedError
@abstractmethod
def think(self):
"""Consider what to do and decide on the next course of action."""
raise NotImplementedError
@abstractmethod
def act(self):
"""Perform the current action."""
raise NotImplementedError
@abstractmethod
async def react(self) -> "Message":
"""Entry to one of three strategies by which Role reacts to the observed Message."""
@abstractmethod
async def run(self, with_message: Optional[Union[str, "Message", list[str]]] = None) -> Optional["Message"]:
"""Observe, and think and act based on the results of the observation."""
@abstractmethod
def get_memories(self, k: int = 0) -> list["Message"]:
"""Return the most recent k memories of this role."""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/base/base_serialization.py | metagpt/base/base_serialization.py | from __future__ import annotations
from typing import Any
from pydantic import BaseModel, model_serializer, model_validator
class BaseSerialization(BaseModel, extra="forbid"):
"""
PolyMorphic subclasses Serialization / Deserialization Mixin
- First of all, we need to know that pydantic is not designed for polymorphism.
- If Engineer is subclass of Role, it would be serialized as Role. If we want to serialize it as Engineer, we need
to add `class name` to Engineer. So we need Engineer inherit SerializationMixin.
More details:
- https://docs.pydantic.dev/latest/concepts/serialization/
- https://github.com/pydantic/pydantic/discussions/7008 discuss about avoid `__get_pydantic_core_schema__`
"""
__is_polymorphic_base = False
__subclasses_map__ = {}
@model_serializer(mode="wrap")
def __serialize_with_class_type__(self, default_serializer) -> Any:
# default serializer, then append the `__module_class_name` field and return
ret = default_serializer(self)
ret["__module_class_name"] = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
return ret
@model_validator(mode="wrap")
@classmethod
def __convert_to_real_type__(cls, value: Any, handler):
if isinstance(value, dict) is False:
return handler(value)
# it is a dict so make sure to remove the __module_class_name
# because we don't allow extra keywords but want to ensure
# e.g Cat.model_validate(cat.model_dump()) works
class_full_name = value.pop("__module_class_name", None)
# if it's not the polymorphic base we construct via default handler
if not cls.__is_polymorphic_base:
if class_full_name is None:
return handler(value)
elif str(cls) == f"<class '{class_full_name}'>":
return handler(value)
else:
# f"Trying to instantiate {class_full_name} but this is not the polymorphic base class")
pass
# otherwise we lookup the correct polymorphic type and construct that
# instead
if class_full_name is None:
raise ValueError("Missing __module_class_name field")
class_type = cls.__subclasses_map__.get(class_full_name, None)
if class_type is None:
# TODO could try dynamic import
raise TypeError(f"Trying to instantiate {class_full_name}, which has not yet been defined!")
return class_type(**value)
def __init_subclass__(cls, is_polymorphic_base: bool = False, **kwargs):
cls.__is_polymorphic_base = is_polymorphic_base
cls.__subclasses_map__[f"{cls.__module__}.{cls.__qualname__}"] = cls
super().__init_subclass__(**kwargs)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/base/base_env.py | metagpt/base/base_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : base environment
import typing
from abc import abstractmethod
from typing import Any, Optional
from metagpt.base.base_env_space import BaseEnvAction, BaseEnvObsParams
from metagpt.base.base_serialization import BaseSerialization
if typing.TYPE_CHECKING:
from metagpt.schema import Message
class BaseEnvironment(BaseSerialization):
"""Base environment"""
@abstractmethod
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict[str, Any]] = None,
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Implement this to get init observation"""
@abstractmethod
def observe(self, obs_params: Optional[BaseEnvObsParams] = None) -> Any:
"""Implement this if you want to get partial observation from the env"""
@abstractmethod
def step(self, action: BaseEnvAction) -> tuple[dict[str, Any], float, bool, bool, dict[str, Any]]:
"""Implement this to feed a action and then get new observation from the env"""
@abstractmethod
def publish_message(self, message: "Message", peekable: bool = True) -> bool:
"""Distribute the message to the recipients."""
@abstractmethod
async def run(self, k=1):
"""Process all task at once"""
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/base/base_env_space.py | metagpt/base/base_env_space.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
from enum import IntEnum
from pydantic import BaseModel, ConfigDict, Field
class BaseEnvActionType(IntEnum):
# # NONE = 0 # no action to run, just get observation
pass
class BaseEnvAction(BaseModel):
"""env action type and its related params of action functions/apis"""
model_config = ConfigDict(arbitrary_types_allowed=True)
action_type: int = Field(default=0, description="action type")
class BaseEnvObsType(IntEnum):
# # NONE = 0 # get whole observation from env
pass
class BaseEnvObsParams(BaseModel):
"""observation params for different EnvObsType to get its observe result"""
model_config = ConfigDict(arbitrary_types_allowed=True)
obs_type: int = Field(default=0, description="observation type")
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/base/__init__.py | metagpt/base/__init__.py | from metagpt.base.base_env import BaseEnvironment
from metagpt.base.base_role import BaseRole
__all__ = [
"BaseEnvironment",
"BaseRole",
]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/base_store.py | metagpt/document_store/base_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/28 00:01
@Author : alexanderwu
@File : base_store.py
"""
from abc import ABC, abstractmethod
from pathlib import Path
class BaseStore(ABC):
"""FIXME: consider add_index, set_index and think about granularity."""
@abstractmethod
def search(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def write(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def add(self, *args, **kwargs):
raise NotImplementedError
class LocalStore(BaseStore, ABC):
def __init__(self, raw_data_path: Path, cache_dir: Path = None):
if not raw_data_path:
raise FileNotFoundError
self.raw_data_path = raw_data_path
self.fname = self.raw_data_path.stem
if not cache_dir:
cache_dir = raw_data_path.parent
self.cache_dir = cache_dir
self.store = self._load()
if not self.store:
self.store = self.write()
def _get_index_and_store_fname(self, index_ext=".json", docstore_ext=".json"):
index_file = self.cache_dir / "default__vector_store" / index_ext
store_file = self.cache_dir / "docstore" / docstore_ext
return index_file, store_file
@abstractmethod
def _load(self):
raise NotImplementedError
@abstractmethod
def _write(self, docs, metadatas):
raise NotImplementedError
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/qdrant_store.py | metagpt/document_store/qdrant_store.py | from dataclasses import dataclass
from typing import List
from qdrant_client import QdrantClient
from qdrant_client.models import Filter, PointStruct, VectorParams
from metagpt.document_store.base_store import BaseStore
@dataclass
class QdrantConnection:
"""
Args:
url: qdrant url
host: qdrant host
port: qdrant port
memory: qdrant service use memory mode
api_key: qdrant cloud api_key
"""
url: str = None
host: str = None
port: int = None
memory: bool = False
api_key: str = None
class QdrantStore(BaseStore):
def __init__(self, connect: QdrantConnection):
if connect.memory:
self.client = QdrantClient(":memory:")
elif connect.url:
self.client = QdrantClient(url=connect.url, api_key=connect.api_key)
elif connect.host and connect.port:
self.client = QdrantClient(host=connect.host, port=connect.port, api_key=connect.api_key)
else:
raise Exception("please check QdrantConnection.")
def create_collection(
self,
collection_name: str,
vectors_config: VectorParams,
force_recreate=False,
**kwargs,
):
"""
create a collection
Args:
collection_name: collection name
vectors_config: VectorParams object,detail in https://github.com/qdrant/qdrant-client
force_recreate: default is False, if True, will delete exists collection,then create it
**kwargs:
Returns:
"""
try:
self.client.get_collection(collection_name)
if force_recreate:
res = self.client.recreate_collection(collection_name, vectors_config=vectors_config, **kwargs)
return res
return True
except: # noqa: E722
return self.client.recreate_collection(collection_name, vectors_config=vectors_config, **kwargs)
def has_collection(self, collection_name: str):
try:
self.client.get_collection(collection_name)
return True
except: # noqa: E722
return False
def delete_collection(self, collection_name: str, timeout=60):
res = self.client.delete_collection(collection_name, timeout=timeout)
if not res:
raise Exception(f"Delete collection {collection_name} failed.")
def add(self, collection_name: str, points: List[PointStruct]):
"""
add some vector data to qdrant
Args:
collection_name: collection name
points: list of PointStruct object, about PointStruct detail in https://github.com/qdrant/qdrant-client
Returns: NoneX
"""
# self.client.upload_records()
self.client.upsert(
collection_name,
points,
)
def search(
self,
collection_name: str,
query: List[float],
query_filter: Filter = None,
k=10,
return_vector=False,
):
"""
vector search
Args:
collection_name: qdrant collection name
query: input vector
query_filter: Filter object, detail in https://github.com/qdrant/qdrant-client
k: return the most similar k pieces of data
return_vector: whether return vector
Returns: list of dict
"""
hits = self.client.search(
collection_name=collection_name,
query_vector=query,
query_filter=query_filter,
limit=k,
with_vectors=return_vector,
)
return [hit.__dict__ for hit in hits]
def write(self, *args, **kwargs):
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/lancedb_store.py | metagpt/document_store/lancedb_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/8/9 15:42
@Author : unkn-wn (Leon Yee)
@File : lancedb_store.py
"""
import os
import shutil
import lancedb
class LanceStore:
def __init__(self, name):
db = lancedb.connect("./data/lancedb")
self.db = db
self.name = name
self.table = None
def search(self, query, n_results=2, metric="L2", nprobes=20, **kwargs):
# This assumes query is a vector embedding
# kwargs can be used for optional filtering
# .select - only searches the specified columns
# .where - SQL syntax filtering for metadata (e.g. where("price > 100"))
# .metric - specifies the distance metric to use
# .nprobes - values will yield better recall (more likely to find vectors if they exist) at the expense of latency.
if self.table is None:
raise Exception("Table not created yet, please add data first.")
results = (
self.table.search(query)
.limit(n_results)
.select(kwargs.get("select"))
.where(kwargs.get("where"))
.metric(metric)
.nprobes(nprobes)
.to_df()
)
return results
def persist(self):
raise NotImplementedError
def write(self, data, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# "data" is the list of embeddings
# Inserts into table by expanding metadatas into a dataframe: [{'vector', 'id', 'meta', 'meta2'}, ...]
documents = []
for i in range(len(data)):
row = {"vector": data[i], "id": ids[i]}
row.update(metadatas[i])
documents.append(row)
if self.table is not None:
self.table.add(documents)
else:
self.table = self.db.create_table(self.name, documents)
def add(self, data, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single vector embedding, metadata, and id
row = {"vector": data, "id": _id}
row.update(metadata)
if self.table is not None:
self.table.add([row])
else:
self.table = self.db.create_table(self.name, [row])
def delete(self, _id):
# This function deletes a row by id.
# LanceDB delete syntax uses SQL syntax, so you can use "in" or "="
if self.table is None:
raise Exception("Table not created yet, please add data first")
if isinstance(_id, str):
return self.table.delete(f"id = '{_id}'")
else:
return self.table.delete(f"id = {_id}")
def drop(self, name):
# This function drops a table, if it exists.
path = os.path.join(self.db.uri, name + ".lance")
if os.path.exists(path):
shutil.rmtree(path)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/faiss_store.py | metagpt/document_store/faiss_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/25 10:20
@Author : alexanderwu
@File : faiss_store.py
"""
import asyncio
from pathlib import Path
from typing import Any, Optional
import faiss
from llama_index.core import VectorStoreIndex, load_index_from_storage
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.schema import Document, QueryBundle, TextNode
from llama_index.core.storage import StorageContext
from llama_index.vector_stores.faiss import FaissVectorStore
from metagpt.document import IndexableDocument
from metagpt.document_store.base_store import LocalStore
from metagpt.logs import logger
from metagpt.utils.embedding import get_embedding
class FaissStore(LocalStore):
def __init__(
self, raw_data: Path, cache_dir=None, meta_col="source", content_col="output", embedding: BaseEmbedding = None
):
self.meta_col = meta_col
self.content_col = content_col
self.embedding = embedding or get_embedding()
self.store: VectorStoreIndex
super().__init__(raw_data, cache_dir)
def _load(self) -> Optional["VectorStoreIndex"]:
index_file, store_file = self._get_index_and_store_fname()
if not (index_file.exists() and store_file.exists()):
logger.info("Missing at least one of index_file/store_file, load failed and return None")
return None
vector_store = FaissVectorStore.from_persist_dir(persist_dir=self.cache_dir)
storage_context = StorageContext.from_defaults(persist_dir=self.cache_dir, vector_store=vector_store)
index = load_index_from_storage(storage_context, embed_model=self.embedding)
return index
def _write(self, docs: list[str], metadatas: list[dict[str, Any]]) -> VectorStoreIndex:
assert len(docs) == len(metadatas)
documents = [Document(text=doc, metadata=metadatas[idx]) for idx, doc in enumerate(docs)]
vector_store = FaissVectorStore(faiss_index=faiss.IndexFlatL2(1536))
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents=documents, storage_context=storage_context, embed_model=self.embedding
)
return index
def persist(self):
self.store.storage_context.persist(self.cache_dir)
def search(self, query: str, expand_cols=False, sep="\n", *args, k=5, **kwargs):
retriever = self.store.as_retriever(similarity_top_k=k)
rsp = retriever.retrieve(QueryBundle(query_str=query, embedding=self.embedding.get_text_embedding(query)))
logger.debug(rsp)
if expand_cols:
return str(sep.join([f"{x.node.text}: {x.node.metadata}" for x in rsp]))
else:
return str(sep.join([f"{x.node.text}" for x in rsp]))
async def asearch(self, *args, **kwargs):
return await asyncio.to_thread(self.search, *args, **kwargs)
def write(self):
"""Initialize the index and library based on the Document (JSON / XLSX, etc.) file provided by the user."""
if not self.raw_data_path.exists():
raise FileNotFoundError
doc = IndexableDocument.from_path(self.raw_data_path, self.content_col, self.meta_col)
docs, metadatas = doc.get_docs_and_metadatas()
self.store = self._write(docs, metadatas)
self.persist()
return self.store
def add(self, texts: list[str], *args, **kwargs) -> list[str]:
"""FIXME: Currently, the store is not updated after adding."""
texts_embeds = self.embedding.get_text_embedding_batch(texts)
nodes = [TextNode(text=texts[idx], embedding=embed) for idx, embed in enumerate(texts_embeds)]
self.store.insert_nodes(nodes)
return []
def delete(self, *args, **kwargs):
"""Currently, faiss does not provide a delete interface."""
raise NotImplementedError
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/__init__.py | metagpt/document_store/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/25 10:20
@Author : alexanderwu
@File : __init__.py
"""
from metagpt.document_store.faiss_store import FaissStore
__all__ = ["FaissStore"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/milvus_store.py | metagpt/document_store/milvus_store.py | from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from metagpt.document_store.base_store import BaseStore
@dataclass
class MilvusConnection:
"""
Args:
uri: milvus url
token: milvus token
"""
uri: str = None
token: str = None
class MilvusStore(BaseStore):
def __init__(self, connect: MilvusConnection):
try:
from pymilvus import MilvusClient
except ImportError:
raise Exception("Please install pymilvus first.")
if not connect.uri:
raise Exception("please check MilvusConnection, uri must be set.")
self.client = MilvusClient(uri=connect.uri, token=connect.token)
def create_collection(self, collection_name: str, dim: int, enable_dynamic_schema: bool = True):
from pymilvus import DataType
if self.client.has_collection(collection_name=collection_name):
self.client.drop_collection(collection_name=collection_name)
schema = self.client.create_schema(
auto_id=False,
enable_dynamic_field=False,
)
schema.add_field(field_name="id", datatype=DataType.VARCHAR, is_primary=True, max_length=36)
schema.add_field(field_name="vector", datatype=DataType.FLOAT_VECTOR, dim=dim)
index_params = self.client.prepare_index_params()
index_params.add_index(field_name="vector", index_type="AUTOINDEX", metric_type="COSINE")
self.client.create_collection(
collection_name=collection_name,
schema=schema,
index_params=index_params,
enable_dynamic_schema=enable_dynamic_schema,
)
@staticmethod
def build_filter(key, value) -> str:
if isinstance(value, str):
filter_expression = f'{key} == "{value}"'
else:
if isinstance(value, list):
filter_expression = f"{key} in {value}"
else:
filter_expression = f"{key} == {value}"
return filter_expression
def search(
self,
collection_name: str,
query: List[float],
filter: Dict = None,
limit: int = 10,
output_fields: Optional[List[str]] = None,
) -> List[dict]:
filter_expression = " and ".join([self.build_filter(key, value) for key, value in filter.items()])
print(filter_expression)
res = self.client.search(
collection_name=collection_name,
data=[query],
filter=filter_expression,
limit=limit,
output_fields=output_fields,
)[0]
return res
def add(self, collection_name: str, _ids: List[str], vector: List[List[float]], metadata: List[Dict[str, Any]]):
data = dict()
for i, id in enumerate(_ids):
data["id"] = id
data["vector"] = vector[i]
data["metadata"] = metadata[i]
self.client.upsert(collection_name=collection_name, data=data)
def delete(self, collection_name: str, _ids: List[str]):
self.client.delete(collection_name=collection_name, ids=_ids)
def write(self, *args, **kwargs):
pass
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/document_store/chromadb_store.py | metagpt/document_store/chromadb_store.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/29 14:46
@Author : alexanderwu
@File : chromadb_store.py
"""
import chromadb
class ChromaStore:
"""If inherited from BaseStore, or importing other modules from metagpt, a Python exception occurs, which is strange."""
def __init__(self, name: str, get_or_create: bool = False):
client = chromadb.Client()
collection = client.create_collection(name, get_or_create=get_or_create)
self.client = client
self.collection = collection
def search(self, query, n_results=2, metadata_filter=None, document_filter=None):
# kwargs can be used for optional filtering
results = self.collection.query(
query_texts=[query],
n_results=n_results,
where=metadata_filter, # optional filter
where_document=document_filter, # optional filter
)
return results
def persist(self):
"""Chroma recommends using server mode and not persisting locally."""
raise NotImplementedError
def write(self, documents, metadatas, ids):
# This function is similar to add(), but it's for more generalized updates
# It assumes you're passing in lists of docs, metadatas, and ids
return self.collection.add(
documents=documents,
metadatas=metadatas,
ids=ids,
)
def add(self, document, metadata, _id):
# This function is for adding individual documents
# It assumes you're passing in a single doc, metadata, and id
return self.collection.add(
documents=[document],
metadatas=[metadata],
ids=[_id],
)
def delete(self, _id):
return self.collection.delete([_id])
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/schema.py | metagpt/rag/schema.py | """RAG schemas."""
from enum import Enum
from pathlib import Path
from typing import Any, ClassVar, List, Literal, Optional, Union
from chromadb.api.types import CollectionMetadata
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.indices.base import BaseIndex
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.schema import TextNode
from llama_index.core.vector_stores.types import VectorStoreQueryMode
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
from metagpt.config2 import config
from metagpt.configs.embedding_config import EmbeddingType
from metagpt.logs import logger
from metagpt.rag.interface import RAGObject
from metagpt.rag.prompts.default_prompts import DEFAULT_CHOICE_SELECT_PROMPT
class BaseRetrieverConfig(BaseModel):
"""Common config for retrievers.
If add new subconfig, it is necessary to add the corresponding instance implementation in rag.factories.retriever.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
similarity_top_k: int = Field(default=5, description="Number of top-k similar results to return during retrieval.")
class IndexRetrieverConfig(BaseRetrieverConfig):
"""Config for Index-basd retrievers."""
index: BaseIndex = Field(default=None, description="Index for retriver.")
class FAISSRetrieverConfig(IndexRetrieverConfig):
"""Config for FAISS-based retrievers."""
dimensions: int = Field(default=0, description="Dimensionality of the vectors for FAISS index construction.")
_embedding_type_to_dimensions: ClassVar[dict[EmbeddingType, int]] = {
EmbeddingType.GEMINI: 768,
EmbeddingType.OLLAMA: 4096,
}
@model_validator(mode="after")
def check_dimensions(self):
if self.dimensions == 0:
self.dimensions = config.embedding.dimensions or self._embedding_type_to_dimensions.get(
config.embedding.api_type, 1536
)
if not config.embedding.dimensions and config.embedding.api_type not in self._embedding_type_to_dimensions:
logger.warning(
f"You didn't set dimensions in config when using {config.embedding.api_type}, default to 1536"
)
return self
class BM25RetrieverConfig(IndexRetrieverConfig):
"""Config for BM25-based retrievers."""
create_index: bool = Field(
default=False,
description="Indicates whether to create an index for the nodes. It is useful when you need to persist data while only using BM25.",
exclude=True,
)
_no_embedding: bool = PrivateAttr(default=True)
class ChromaRetrieverConfig(IndexRetrieverConfig):
"""Config for Chroma-based retrievers."""
persist_path: Union[str, Path] = Field(default="./chroma_db", description="The directory to save data.")
collection_name: str = Field(default="metagpt", description="The name of the collection.")
metadata: Optional[CollectionMetadata] = Field(
default=None, description="Optional metadata to associate with the collection"
)
class ElasticsearchStoreConfig(BaseModel):
index_name: str = Field(default="metagpt", description="Name of the Elasticsearch index.")
es_url: str = Field(default=None, description="Elasticsearch URL.")
es_cloud_id: str = Field(default=None, description="Elasticsearch cloud ID.")
es_api_key: str = Field(default=None, description="Elasticsearch API key.")
es_user: str = Field(default=None, description="Elasticsearch username.")
es_password: str = Field(default=None, description="Elasticsearch password.")
batch_size: int = Field(default=200, description="Batch size for bulk indexing.")
distance_strategy: str = Field(default="COSINE", description="Distance strategy to use for similarity search.")
class ElasticsearchRetrieverConfig(IndexRetrieverConfig):
"""Config for Elasticsearch-based retrievers. Support both vector and text."""
store_config: ElasticsearchStoreConfig = Field(..., description="ElasticsearchStore config.")
vector_store_query_mode: VectorStoreQueryMode = Field(
default=VectorStoreQueryMode.DEFAULT, description="default is vector query."
)
class ElasticsearchKeywordRetrieverConfig(ElasticsearchRetrieverConfig):
"""Config for Elasticsearch-based retrievers. Support text only."""
_no_embedding: bool = PrivateAttr(default=True)
vector_store_query_mode: Literal[VectorStoreQueryMode.TEXT_SEARCH] = Field(
default=VectorStoreQueryMode.TEXT_SEARCH, description="text query only."
)
class BaseRankerConfig(BaseModel):
"""Common config for rankers.
If add new subconfig, it is necessary to add the corresponding instance implementation in rag.factories.ranker.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
top_n: int = Field(default=5, description="The number of top results to return.")
class LLMRankerConfig(BaseRankerConfig):
"""Config for LLM-based rankers."""
llm: Any = Field(
default=None,
description="The LLM to rerank with. using Any instead of LLM, as llama_index.core.llms.LLM is pydantic.v1.",
)
choice_select_prompt: Optional[BasePromptTemplate] = Field(
default=DEFAULT_CHOICE_SELECT_PROMPT, description="Choice select prompt."
)
class ColbertRerankConfig(BaseRankerConfig):
model: str = Field(default="colbert-ir/colbertv2.0", description="Colbert model name.")
device: str = Field(default="cpu", description="Device to use for sentence transformer.")
keep_retrieval_score: bool = Field(default=False, description="Whether to keep the retrieval score in metadata.")
class CohereRerankConfig(BaseRankerConfig):
model: str = Field(default="rerank-english-v3.0")
api_key: str = Field(default="YOUR_COHERE_API")
class BGERerankConfig(BaseRankerConfig):
model: str = Field(default="BAAI/bge-reranker-large", description="BAAI Reranker model name.")
use_fp16: bool = Field(default=True, description="Whether to use fp16 for inference.")
class ObjectRankerConfig(BaseRankerConfig):
field_name: str = Field(..., description="field name of the object, field's value must can be compared.")
order: Literal["desc", "asc"] = Field(default="desc", description="the direction of order.")
class BaseIndexConfig(BaseModel):
"""Common config for index.
If add new subconfig, it is necessary to add the corresponding instance implementation in rag.factories.index.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
persist_path: Union[str, Path] = Field(description="The directory of saved data.")
class VectorIndexConfig(BaseIndexConfig):
"""Config for vector-based index."""
embed_model: BaseEmbedding = Field(default=None, description="Embed model.")
class FAISSIndexConfig(VectorIndexConfig):
"""Config for faiss-based index."""
class ChromaIndexConfig(VectorIndexConfig):
"""Config for chroma-based index."""
collection_name: str = Field(default="metagpt", description="The name of the collection.")
metadata: Optional[CollectionMetadata] = Field(
default=None, description="Optional metadata to associate with the collection"
)
class BM25IndexConfig(BaseIndexConfig):
"""Config for bm25-based index."""
_no_embedding: bool = PrivateAttr(default=True)
class ElasticsearchIndexConfig(VectorIndexConfig):
"""Config for es-based index."""
store_config: ElasticsearchStoreConfig = Field(..., description="ElasticsearchStore config.")
persist_path: Union[str, Path] = ""
class ElasticsearchKeywordIndexConfig(ElasticsearchIndexConfig):
"""Config for es-based index. no embedding."""
_no_embedding: bool = PrivateAttr(default=True)
class ObjectNodeMetadata(BaseModel):
"""Metadata of ObjectNode."""
is_obj: bool = Field(default=True)
obj: Any = Field(default=None, description="When rag retrieve, will reconstruct obj from obj_json")
obj_json: str = Field(..., description="The json of object, e.g. obj.model_dump_json()")
obj_cls_name: str = Field(..., description="The class name of object, e.g. obj.__class__.__name__")
obj_mod_name: str = Field(..., description="The module name of class, e.g. obj.__class__.__module__")
class ObjectNode(TextNode):
"""RAG add object."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.excluded_llm_metadata_keys = list(ObjectNodeMetadata.model_fields.keys())
self.excluded_embed_metadata_keys = self.excluded_llm_metadata_keys
@staticmethod
def get_obj_metadata(obj: RAGObject) -> dict:
metadata = ObjectNodeMetadata(
obj_json=obj.model_dump_json(), obj_cls_name=obj.__class__.__name__, obj_mod_name=obj.__class__.__module__
)
return metadata.model_dump()
class OmniParseType(str, Enum):
"""OmniParseType"""
PDF = "PDF"
DOCUMENT = "DOCUMENT"
class ParseResultType(str, Enum):
"""The result type for the parser."""
TXT = "text"
MD = "markdown"
JSON = "json"
class OmniParseOptions(BaseModel):
"""OmniParse Options config"""
result_type: ParseResultType = Field(default=ParseResultType.MD, description="OmniParse result_type")
parse_type: OmniParseType = Field(default=OmniParseType.DOCUMENT, description="OmniParse parse_type")
max_timeout: Optional[int] = Field(default=120, description="Maximum timeout for OmniParse service requests")
num_workers: int = Field(
default=5,
gt=0,
lt=10,
description="Number of concurrent requests for multiple files",
)
class OminParseImage(BaseModel):
image: str = Field(default="", description="image str bytes")
image_name: str = Field(default="", description="image name")
image_info: Optional[dict] = Field(default={}, description="image info")
class OmniParsedResult(BaseModel):
markdown: str = Field(default="", description="markdown text")
text: str = Field(default="", description="plain text")
images: Optional[List[OminParseImage]] = Field(default=[], description="images")
metadata: Optional[dict] = Field(default={}, description="metadata")
@model_validator(mode="before")
def set_markdown(cls, values):
if not values.get("markdown"):
values["markdown"] = values.get("text")
return values
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/interface.py | metagpt/rag/interface.py | """RAG Interfaces."""
from typing import Protocol, runtime_checkable
@runtime_checkable
class RAGObject(Protocol):
"""Support rag add object."""
def rag_key(self) -> str:
"""For rag search."""
def model_dump_json(self) -> str:
"""For rag persist.
Pydantic Model don't need to implement this, as there is a built-in function named model_dump_json.
"""
@runtime_checkable
class NoEmbedding(Protocol):
"""Some retriever does not require embeddings, e.g. BM25"""
_no_embedding: bool
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/__init__.py | metagpt/rag/__init__.py | python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false | |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/benchmark/__init__.py | metagpt/rag/benchmark/__init__.py | from metagpt.rag.benchmark.base import RAGBenchmark
__all__ = ["RAGBenchmark"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/benchmark/base.py | metagpt/rag/benchmark/base.py | import asyncio
from typing import List, Tuple, Union
import evaluate
import jieba
from llama_index.core.embeddings import BaseEmbedding
from llama_index.core.evaluation import SemanticSimilarityEvaluator
from llama_index.core.schema import NodeWithScore
from pydantic import BaseModel
from metagpt.const import EXAMPLE_BENCHMARK_PATH
from metagpt.logs import logger
from metagpt.rag.factories import get_rag_embedding
from metagpt.utils.common import read_json_file
class DatasetInfo(BaseModel):
name: str
document_files: List[str]
gt_info: List[dict]
class DatasetConfig(BaseModel):
datasets: List[DatasetInfo]
class RAGBenchmark:
def __init__(
self,
embed_model: BaseEmbedding = None,
):
self.evaluator = SemanticSimilarityEvaluator(
embed_model=embed_model or get_rag_embedding(),
)
def set_metrics(
self,
bleu_avg: float = 0.0,
bleu_1: float = 0.0,
bleu_2: float = 0.0,
bleu_3: float = 0.0,
bleu_4: float = 0.0,
rouge_l: float = 0.0,
semantic_similarity: float = 0.0,
recall: float = 0.0,
hit_rate: float = 0.0,
mrr: float = 0.0,
length: float = 0.0,
generated_text: str = None,
ground_truth_text: str = None,
question: str = None,
):
metrics = {
"bleu-avg": bleu_avg,
"bleu-1": bleu_1,
"bleu-2": bleu_2,
"bleu-3": bleu_3,
"bleu-4": bleu_4,
"rouge-L": rouge_l,
"semantic similarity": semantic_similarity,
"recall": recall,
"hit_rate": hit_rate,
"mrr": mrr,
"length": length,
}
log = {
"generated_text": generated_text,
"ground_truth_text": ground_truth_text,
"question": question,
}
return {"metrics": metrics, "log": log}
def bleu_score(self, response: str, reference: str, with_penalty=False) -> Union[float, Tuple[float]]:
f = lambda text: list(jieba.cut(text))
bleu = evaluate.load(path="bleu")
results = bleu.compute(predictions=[response], references=[[reference]], tokenizer=f)
bleu_avg = results["bleu"]
bleu1 = results["precisions"][0]
bleu2 = results["precisions"][1]
bleu3 = results["precisions"][2]
bleu4 = results["precisions"][3]
brevity_penalty = results["brevity_penalty"]
if with_penalty:
return bleu_avg, bleu1, bleu2, bleu3, bleu4
else:
return 0.0 if brevity_penalty == 0 else bleu_avg / brevity_penalty, bleu1, bleu2, bleu3, bleu4
def rougel_score(self, response: str, reference: str) -> float:
# pip install rouge_score
f = lambda text: list(jieba.cut(text))
rouge = evaluate.load(path="rouge")
results = rouge.compute(predictions=[response], references=[[reference]], tokenizer=f, rouge_types=["rougeL"])
score = results["rougeL"]
return score
def recall(self, nodes: list[NodeWithScore], reference_docs: list[str]) -> float:
if nodes:
total_recall = sum(any(node.text in doc for node in nodes) for doc in reference_docs)
return total_recall / len(reference_docs)
else:
return 0.0
def hit_rate(self, nodes: list[NodeWithScore], reference_docs: list[str]) -> float:
if nodes:
return 1.0 if any(node.text in doc for doc in reference_docs for node in nodes) else 0.0
else:
return 0.0
def mean_reciprocal_rank(self, nodes: list[NodeWithScore], reference_docs: list[str]) -> float:
mrr_sum = 0.0
for i, node in enumerate(nodes, start=1):
for doc in reference_docs:
if text in doc:
mrr_sum += 1.0 / i
return mrr_sum
return mrr_sum
async def semantic_similarity(self, response: str, reference: str) -> float:
result = await self.evaluator.aevaluate(
response=response,
reference=reference,
)
return result.score
async def compute_metric(
self,
response: str = None,
reference: str = None,
nodes: list[NodeWithScore] = None,
reference_doc: list[str] = None,
question: str = None,
):
recall = self.recall(nodes, reference_doc)
bleu_avg, bleu1, bleu2, bleu3, bleu4 = self.bleu_score(response, reference)
rouge_l = self.rougel_score(response, reference)
hit_rate = self.hit_rate(nodes, reference_doc)
mrr = self.mean_reciprocal_rank(nodes, reference_doc)
similarity = await self.semantic_similarity(response, reference)
result = self.set_metrics(
bleu_avg,
bleu1,
bleu2,
bleu3,
bleu4,
rouge_l,
similarity,
recall,
hit_rate,
mrr,
len(response),
response,
reference,
question,
)
return result
@staticmethod
def load_dataset(ds_names: list[str] = ["all"]):
infos = read_json_file((EXAMPLE_BENCHMARK_PATH / "dataset_info.json").as_posix())
dataset_config = DatasetConfig(
datasets=[
DatasetInfo(
name=name,
document_files=[
(EXAMPLE_BENCHMARK_PATH / name / file).as_posix() for file in info["document_file"]
],
gt_info=read_json_file((EXAMPLE_BENCHMARK_PATH / name / info["gt_file"]).as_posix()),
)
for dataset_info in infos
for name, info in dataset_info.items()
if name in ds_names or "all" in ds_names
]
)
return dataset_config
if __name__ == "__main__":
benchmark = RAGBenchmark()
answer = "是的,根据提供的信息,2023年7月20日,应急管理部和财政部确实联合发布了《因灾倒塌、损坏住房恢复重建救助工作规范》的通知。这份《规范》旨在进一步规范因灾倒塌、损坏住房的恢复重建救助相关工作。它明确了地方各级政府负责实施救助工作,应急管理部和财政部则负责统筹指导。地方财政应安排足够的资金,中央财政也会提供适当的补助。救助资金将通过专账管理,并采取特定的管理方式。救助对象是那些因自然灾害导致住房倒塌或损坏,并向政府提出申请且符合条件的受灾家庭。相关部门将组织调查统计救助对象信息,并建立档案。此外,《规范》还强调了资金发放的具体方式和公开透明的要求。"
ground_truth = "“启明行动”是为了防控儿童青少年的近视问题,并发布了《防控儿童青少年近视核心知识十条》。"
bleu_avg, bleu1, bleu2, bleu3, bleu4 = benchmark.bleu_score(answer, ground_truth)
rougeL_score = benchmark.rougel_score(answer, ground_truth)
similarity = asyncio.run(benchmark.SemanticSimilarity(answer, ground_truth))
logger.info(
f"BLEU Scores: bleu_avg = {bleu_avg}, bleu1 = {bleu1}, bleu2 = {bleu2}, bleu3 = {bleu3}, bleu4 = {bleu4}, "
f"RougeL Score: {rougeL_score}, "
f"Semantic Similarity: {similarity}"
)
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/factories/llm.py | metagpt/rag/factories/llm.py | """RAG LLM."""
import asyncio
from typing import Any
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW
from llama_index.core.llms import (
CompletionResponse,
CompletionResponseGen,
CustomLLM,
LLMMetadata,
)
from llama_index.core.llms.callbacks import llm_completion_callback
from pydantic import Field
from metagpt.config2 import config
from metagpt.provider.base_llm import BaseLLM
from metagpt.utils.async_helper import NestAsyncio
from metagpt.utils.token_counter import TOKEN_MAX
class RAGLLM(CustomLLM):
"""LlamaIndex's LLM is different from MetaGPT's LLM.
Inherit CustomLLM from llamaindex, making MetaGPT's LLM can be used by LlamaIndex.
Set context_length or max_token of LLM in config.yaml if you encounter "Calculated available context size -xxx was not non-negative" error.
"""
model_infer: BaseLLM = Field(..., description="The MetaGPT's LLM.")
context_window: int = -1
num_output: int = -1
model_name: str = ""
def __init__(
self,
model_infer: BaseLLM,
context_window: int = -1,
num_output: int = -1,
model_name: str = "",
*args,
**kwargs
):
super().__init__(*args, **kwargs)
if context_window < 0:
context_window = TOKEN_MAX.get(config.llm.model, DEFAULT_CONTEXT_WINDOW)
if num_output < 0:
num_output = config.llm.max_token
if not model_name:
model_name = config.llm.model
self.model_infer = model_infer
self.context_window = context_window
self.num_output = num_output
self.model_name = model_name
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
return LLMMetadata(
context_window=self.context_window, num_output=self.num_output, model_name=self.model_name or "unknown"
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
NestAsyncio.apply_once()
return asyncio.get_event_loop().run_until_complete(self.acomplete(prompt, **kwargs))
@llm_completion_callback()
async def acomplete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse:
text = await self.model_infer.aask(msg=prompt, stream=False)
return CompletionResponse(text=text)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
...
def get_rag_llm(model_infer: BaseLLM = None) -> RAGLLM:
"""Get llm that can be used by LlamaIndex."""
from metagpt.llm import LLM
return RAGLLM(model_infer=model_infer or LLM())
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/factories/ranker.py | metagpt/rag/factories/ranker.py | """RAG Ranker Factory."""
from llama_index.core.llms import LLM
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from metagpt.rag.factories.base import ConfigBasedFactory
from metagpt.rag.rankers.object_ranker import ObjectSortPostprocessor
from metagpt.rag.schema import (
BaseRankerConfig,
BGERerankConfig,
CohereRerankConfig,
ColbertRerankConfig,
LLMRankerConfig,
ObjectRankerConfig,
)
class RankerFactory(ConfigBasedFactory):
"""Modify creators for dynamically instance implementation."""
def __init__(self):
creators = {
LLMRankerConfig: self._create_llm_ranker,
ColbertRerankConfig: self._create_colbert_ranker,
ObjectRankerConfig: self._create_object_ranker,
CohereRerankConfig: self._create_cohere_rerank,
BGERerankConfig: self._create_bge_rerank,
}
super().__init__(creators)
def get_rankers(self, configs: list[BaseRankerConfig] = None, **kwargs) -> list[BaseNodePostprocessor]:
"""Creates and returns a retriever instance based on the provided configurations."""
if not configs:
return []
return super().get_instances(configs, **kwargs)
def _create_llm_ranker(self, config: LLMRankerConfig, **kwargs) -> LLMRerank:
config.llm = self._extract_llm(config, **kwargs)
return LLMRerank(**config.model_dump())
def _create_colbert_ranker(self, config: ColbertRerankConfig, **kwargs) -> LLMRerank:
try:
from llama_index.postprocessor.colbert_rerank import ColbertRerank
except ImportError:
raise ImportError(
"`llama-index-postprocessor-colbert-rerank` package not found, please run `pip install llama-index-postprocessor-colbert-rerank`"
)
return ColbertRerank(**config.model_dump())
def _create_cohere_rerank(self, config: CohereRerankConfig, **kwargs) -> LLMRerank:
try:
from llama_index.postprocessor.cohere_rerank import CohereRerank
except ImportError:
raise ImportError(
"`llama-index-postprocessor-cohere-rerank` package not found, please run `pip install llama-index-postprocessor-cohere-rerank`"
)
return CohereRerank(**config.model_dump())
def _create_bge_rerank(self, config: BGERerankConfig, **kwargs) -> LLMRerank:
try:
from llama_index.postprocessor.flag_embedding_reranker import (
FlagEmbeddingReranker,
)
except ImportError:
raise ImportError(
"`llama-index-postprocessor-flag-embedding-reranker` package not found, please run `pip install llama-index-postprocessor-flag-embedding-reranker`"
)
return FlagEmbeddingReranker(**config.model_dump())
def _create_object_ranker(self, config: ObjectRankerConfig, **kwargs) -> LLMRerank:
return ObjectSortPostprocessor(**config.model_dump())
def _extract_llm(self, config: BaseRankerConfig = None, **kwargs) -> LLM:
return self._val_from_config_or_kwargs("llm", config, **kwargs)
get_rankers = RankerFactory().get_rankers
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
FoundationAgents/MetaGPT | https://github.com/FoundationAgents/MetaGPT/blob/fc6e8433747be02826dec818627ed5cec0950e77/metagpt/rag/factories/__init__.py | metagpt/rag/factories/__init__.py | """RAG factories"""
from metagpt.rag.factories.retriever import get_retriever
from metagpt.rag.factories.ranker import get_rankers
from metagpt.rag.factories.embedding import get_rag_embedding
from metagpt.rag.factories.index import get_index
from metagpt.rag.factories.llm import get_rag_llm
__all__ = ["get_retriever", "get_rankers", "get_rag_embedding", "get_index", "get_rag_llm"]
| python | MIT | fc6e8433747be02826dec818627ed5cec0950e77 | 2026-01-04T14:38:37.890126Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.