{"repo_name": "OpenManus", "file_name": "/OpenManus/protocol/a2a/app/agent.py", "inference_info": {"prefix_code": "import httpx\nfrom typing import Any, Dict, AsyncIterable, Literal, List, ClassVar\nfrom pydantic import BaseModel\nfrom app.agent.manus import Manus\n\n\nclass ResponseFormat(BaseModel):\n \"\"\"Respond to the user in this format.\"\"\"\n\n status: Literal[\"input_required\", \"completed\", \"error\"] = \"input_required\"\n message: str\n\n\n", "suffix_code": "\n", "middle_code": "class A2AManus(Manus):\n async def invoke(self, query, sessionId) -> str:\n config = {\"configurable\": {\"thread_id\": sessionId}}\n response = await self.run(query)\n return self.get_agent_response(config, response)\n async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:\n raise NotImplementedError(\"Streaming is not supported by Manus yet.\")\n def get_agent_response(self, config, agent_response):\n return {\n \"is_task_complete\": True,\n \"require_user_input\": False,\n \"content\": agent_response,\n }\n SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = [\"text\", \"text/plain\"]", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/OpenManus/app/bedrock.py", "import json\nimport sys\nimport time\nimport uuid\nfrom datetime import datetime\nfrom typing import Dict, List, Literal, Optional\n\nimport boto3\n\n\n# Global variables to track the current tool use ID across function calls\n# Tmp solution\nCURRENT_TOOLUSE_ID = None\n\n\n# Class to handle OpenAI-style response formatting\nclass OpenAIResponse:\n def __init__(self, data):\n # Recursively convert nested dicts and lists to OpenAIResponse objects\n for key, value in data.items():\n if isinstance(value, dict):\n value = OpenAIResponse(value)\n elif isinstance(value, list):\n value = [\n OpenAIResponse(item) if isinstance(item, dict) else item\n for item in value\n ]\n setattr(self, key, value)\n\n def model_dump(self, *args, **kwargs):\n # Convert object to dict and add timestamp\n data = self.__dict__\n data[\"created_at\"] = datetime.now().isoformat()\n return data\n\n\n# Main client class for interacting with Amazon Bedrock\nclass BedrockClient:\n def __init__(self):\n # Initialize Bedrock client, you need to configure AWS env first\n try:\n self.client = boto3.client(\"bedrock-runtime\")\n self.chat = Chat(self.client)\n except Exception as e:\n print(f\"Error initializing Bedrock client: {e}\")\n sys.exit(1)\n\n\n# Chat interface class\nclass Chat:\n def __init__(self, client):\n self.completions = ChatCompletions(client)\n\n\n# Core class handling chat completions functionality\nclass ChatCompletions:\n def __init__(self, client):\n self.client = client\n\n def _convert_openai_tools_to_bedrock_format(self, tools):\n # Convert OpenAI function calling format to Bedrock tool format\n bedrock_tools = []\n for tool in tools:\n if tool.get(\"type\") == \"function\":\n function = tool.get(\"function\", {})\n bedrock_tool = {\n \"toolSpec\": {\n \"name\": function.get(\"name\", \"\"),\n \"description\": function.get(\"description\", \"\"),\n \"inputSchema\": {\n \"json\": {\n \"type\": \"object\",\n \"properties\": function.get(\"parameters\", {}).get(\n \"properties\", {}\n ),\n \"required\": function.get(\"parameters\", {}).get(\n \"required\", []\n ),\n }\n },\n }\n }\n bedrock_tools.append(bedrock_tool)\n return bedrock_tools\n\n def _convert_openai_messages_to_bedrock_format(self, messages):\n # Convert OpenAI message format to Bedrock message format\n bedrock_messages = []\n system_prompt = []\n for message in messages:\n if message.get(\"role\") == \"system\":\n system_prompt = [{\"text\": message.get(\"content\")}]\n elif message.get(\"role\") == \"user\":\n bedrock_message = {\n \"role\": message.get(\"role\", \"user\"),\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"assistant\":\n bedrock_message = {\n \"role\": \"assistant\",\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n openai_tool_calls = message.get(\"tool_calls\", [])\n if openai_tool_calls:\n bedrock_tool_use = {\n \"toolUseId\": openai_tool_calls[0][\"id\"],\n \"name\": openai_tool_calls[0][\"function\"][\"name\"],\n \"input\": json.loads(\n openai_tool_calls[0][\"function\"][\"arguments\"]\n ),\n }\n bedrock_message[\"content\"].append({\"toolUse\": bedrock_tool_use})\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = openai_tool_calls[0][\"id\"]\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"tool\":\n bedrock_message = {\n \"role\": \"user\",\n \"content\": [\n {\n \"toolResult\": {\n \"toolUseId\": CURRENT_TOOLUSE_ID,\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n }\n ],\n }\n bedrock_messages.append(bedrock_message)\n else:\n raise ValueError(f\"Invalid role: {message.get('role')}\")\n return system_prompt, bedrock_messages\n\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n # Convert Bedrock response format to OpenAI format\n content = \"\"\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n content_array = bedrock_response[\"output\"][\"message\"][\"content\"]\n content = \"\".join(item.get(\"text\", \"\") for item in content_array)\n if content == \"\":\n content = \".\"\n\n # Handle tool calls in response\n openai_tool_calls = []\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n for content_item in bedrock_response[\"output\"][\"message\"][\"content\"]:\n if content_item.get(\"toolUse\"):\n bedrock_tool_use = content_item[\"toolUse\"]\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n openai_tool_call = {\n \"id\": CURRENT_TOOLUSE_ID,\n \"type\": \"function\",\n \"function\": {\n \"name\": bedrock_tool_use[\"name\"],\n \"arguments\": json.dumps(bedrock_tool_use[\"input\"]),\n },\n }\n openai_tool_calls.append(openai_tool_call)\n\n # Construct final OpenAI format response\n openai_format = {\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"created\": int(time.time()),\n \"object\": \"chat.completion\",\n \"system_fingerprint\": None,\n \"choices\": [\n {\n \"finish_reason\": bedrock_response.get(\"stopReason\", \"end_turn\"),\n \"index\": 0,\n \"message\": {\n \"content\": content,\n \"role\": bedrock_response.get(\"output\", {})\n .get(\"message\", {})\n .get(\"role\", \"assistant\"),\n \"tool_calls\": openai_tool_calls\n if openai_tool_calls != []\n else None,\n \"function_call\": None,\n },\n }\n ],\n \"usage\": {\n \"completion_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"outputTokens\", 0\n ),\n \"prompt_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"inputTokens\", 0\n ),\n \"total_tokens\": bedrock_response.get(\"usage\", {}).get(\"totalTokens\", 0),\n },\n }\n return OpenAIResponse(openai_format)\n\n async def _invoke_bedrock(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Non-streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n openai_response = self._convert_bedrock_response_to_openai_format(response)\n return openai_response\n\n async def _invoke_bedrock_stream(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse_stream(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n\n # Initialize response structure\n bedrock_response = {\n \"output\": {\"message\": {\"role\": \"\", \"content\": []}},\n \"stopReason\": \"\",\n \"usage\": {},\n \"metrics\": {},\n }\n bedrock_response_text = \"\"\n bedrock_response_tool_input = \"\"\n\n # Process streaming response\n stream = response.get(\"stream\")\n if stream:\n for event in stream:\n if event.get(\"messageStart\", {}).get(\"role\"):\n bedrock_response[\"output\"][\"message\"][\"role\"] = event[\n \"messageStart\"\n ][\"role\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"text\"):\n bedrock_response_text += event[\"contentBlockDelta\"][\"delta\"][\"text\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"text\"], end=\"\", flush=True\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 0:\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"text\": bedrock_response_text}\n )\n if event.get(\"contentBlockStart\", {}).get(\"start\", {}).get(\"toolUse\"):\n bedrock_tool_use = event[\"contentBlockStart\"][\"start\"][\"toolUse\"]\n tool_use = {\n \"toolUseId\": bedrock_tool_use[\"toolUseId\"],\n \"name\": bedrock_tool_use[\"name\"],\n }\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"toolUse\": tool_use}\n )\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"toolUse\"):\n bedrock_response_tool_input += event[\"contentBlockDelta\"][\"delta\"][\n \"toolUse\"\n ][\"input\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"toolUse\"][\"input\"],\n end=\"\",\n flush=True,\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 1:\n bedrock_response[\"output\"][\"message\"][\"content\"][1][\"toolUse\"][\n \"input\"\n ] = json.loads(bedrock_response_tool_input)\n print()\n openai_response = self._convert_bedrock_response_to_openai_format(\n bedrock_response\n )\n return openai_response\n\n def create(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n stream: Optional[bool] = True,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Main entry point for chat completion\n bedrock_tools = []\n if tools is not None:\n bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)\n if stream:\n return self._invoke_bedrock_stream(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n else:\n return self._invoke_bedrock(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n"], ["/OpenManus/app/llm.py", "import math\nfrom typing import Dict, List, Optional, Union\n\nimport tiktoken\nfrom openai import (\n APIError,\n AsyncAzureOpenAI,\n AsyncOpenAI,\n AuthenticationError,\n OpenAIError,\n RateLimitError,\n)\nfrom openai.types.chat import ChatCompletion, ChatCompletionMessage\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_random_exponential,\n)\n\nfrom app.bedrock import BedrockClient\nfrom app.config import LLMSettings, config\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger # Assuming a logger is set up in your app\nfrom app.schema import (\n ROLE_VALUES,\n TOOL_CHOICE_TYPE,\n TOOL_CHOICE_VALUES,\n Message,\n ToolChoice,\n)\n\n\nREASONING_MODELS = [\"o1\", \"o3-mini\"]\nMULTIMODAL_MODELS = [\n \"gpt-4-vision-preview\",\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"claude-3-opus-20240229\",\n \"claude-3-sonnet-20240229\",\n \"claude-3-haiku-20240307\",\n]\n\n\nclass TokenCounter:\n # Token constants\n BASE_MESSAGE_TOKENS = 4\n FORMAT_TOKENS = 2\n LOW_DETAIL_IMAGE_TOKENS = 85\n HIGH_DETAIL_TILE_TOKENS = 170\n\n # Image processing constants\n MAX_SIZE = 2048\n HIGH_DETAIL_TARGET_SHORT_SIDE = 768\n TILE_SIZE = 512\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n def count_text(self, text: str) -> int:\n \"\"\"Calculate tokens for a text string\"\"\"\n return 0 if not text else len(self.tokenizer.encode(text))\n\n def count_image(self, image_item: dict) -> int:\n \"\"\"\n Calculate tokens for an image based on detail level and dimensions\n\n For \"low\" detail: fixed 85 tokens\n For \"high\" detail:\n 1. Scale to fit in 2048x2048 square\n 2. Scale shortest side to 768px\n 3. Count 512px tiles (170 tokens each)\n 4. Add 85 tokens\n \"\"\"\n detail = image_item.get(\"detail\", \"medium\")\n\n # For low detail, always return fixed token count\n if detail == \"low\":\n return self.LOW_DETAIL_IMAGE_TOKENS\n\n # For medium detail (default in OpenAI), use high detail calculation\n # OpenAI doesn't specify a separate calculation for medium\n\n # For high detail, calculate based on dimensions if available\n if detail == \"high\" or detail == \"medium\":\n # If dimensions are provided in the image_item\n if \"dimensions\" in image_item:\n width, height = image_item[\"dimensions\"]\n return self._calculate_high_detail_tokens(width, height)\n\n return (\n self._calculate_high_detail_tokens(1024, 1024) if detail == \"high\" else 1024\n )\n\n def _calculate_high_detail_tokens(self, width: int, height: int) -> int:\n \"\"\"Calculate tokens for high detail images based on dimensions\"\"\"\n # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square\n if width > self.MAX_SIZE or height > self.MAX_SIZE:\n scale = self.MAX_SIZE / max(width, height)\n width = int(width * scale)\n height = int(height * scale)\n\n # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE\n scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height)\n scaled_width = int(width * scale)\n scaled_height = int(height * scale)\n\n # Step 3: Count number of 512px tiles\n tiles_x = math.ceil(scaled_width / self.TILE_SIZE)\n tiles_y = math.ceil(scaled_height / self.TILE_SIZE)\n total_tiles = tiles_x * tiles_y\n\n # Step 4: Calculate final token count\n return (\n total_tiles * self.HIGH_DETAIL_TILE_TOKENS\n ) + self.LOW_DETAIL_IMAGE_TOKENS\n\n def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int:\n \"\"\"Calculate tokens for message content\"\"\"\n if not content:\n return 0\n\n if isinstance(content, str):\n return self.count_text(content)\n\n token_count = 0\n for item in content:\n if isinstance(item, str):\n token_count += self.count_text(item)\n elif isinstance(item, dict):\n if \"text\" in item:\n token_count += self.count_text(item[\"text\"])\n elif \"image_url\" in item:\n token_count += self.count_image(item)\n return token_count\n\n def count_tool_calls(self, tool_calls: List[dict]) -> int:\n \"\"\"Calculate tokens for tool calls\"\"\"\n token_count = 0\n for tool_call in tool_calls:\n if \"function\" in tool_call:\n function = tool_call[\"function\"]\n token_count += self.count_text(function.get(\"name\", \"\"))\n token_count += self.count_text(function.get(\"arguments\", \"\"))\n return token_count\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n \"\"\"Calculate the total number of tokens in a message list\"\"\"\n total_tokens = self.FORMAT_TOKENS # Base format tokens\n\n for message in messages:\n tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message\n\n # Add role tokens\n tokens += self.count_text(message.get(\"role\", \"\"))\n\n # Add content tokens\n if \"content\" in message:\n tokens += self.count_content(message[\"content\"])\n\n # Add tool calls tokens\n if \"tool_calls\" in message:\n tokens += self.count_tool_calls(message[\"tool_calls\"])\n\n # Add name and tool_call_id tokens\n tokens += self.count_text(message.get(\"name\", \"\"))\n tokens += self.count_text(message.get(\"tool_call_id\", \"\"))\n\n total_tokens += tokens\n\n return total_tokens\n\n\nclass LLM:\n _instances: Dict[str, \"LLM\"] = {}\n\n def __new__(\n cls, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if config_name not in cls._instances:\n instance = super().__new__(cls)\n instance.__init__(config_name, llm_config)\n cls._instances[config_name] = instance\n return cls._instances[config_name]\n\n def __init__(\n self, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if not hasattr(self, \"client\"): # Only initialize if not already initialized\n llm_config = llm_config or config.llm\n llm_config = llm_config.get(config_name, llm_config[\"default\"])\n self.model = llm_config.model\n self.max_tokens = llm_config.max_tokens\n self.temperature = llm_config.temperature\n self.api_type = llm_config.api_type\n self.api_key = llm_config.api_key\n self.api_version = llm_config.api_version\n self.base_url = llm_config.base_url\n\n # Add token counting related attributes\n self.total_input_tokens = 0\n self.total_completion_tokens = 0\n self.max_input_tokens = (\n llm_config.max_input_tokens\n if hasattr(llm_config, \"max_input_tokens\")\n else None\n )\n\n # Initialize tokenizer\n try:\n self.tokenizer = tiktoken.encoding_for_model(self.model)\n except KeyError:\n # If the model is not in tiktoken's presets, use cl100k_base as default\n self.tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n\n if self.api_type == \"azure\":\n self.client = AsyncAzureOpenAI(\n base_url=self.base_url,\n api_key=self.api_key,\n api_version=self.api_version,\n )\n elif self.api_type == \"aws\":\n self.client = BedrockClient()\n else:\n self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)\n\n self.token_counter = TokenCounter(self.tokenizer)\n\n def count_tokens(self, text: str) -> int:\n \"\"\"Calculate the number of tokens in a text\"\"\"\n if not text:\n return 0\n return len(self.tokenizer.encode(text))\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n return self.token_counter.count_message_tokens(messages)\n\n def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None:\n \"\"\"Update token counts\"\"\"\n # Only track tokens if max_input_tokens is set\n self.total_input_tokens += input_tokens\n self.total_completion_tokens += completion_tokens\n logger.info(\n f\"Token usage: Input={input_tokens}, Completion={completion_tokens}, \"\n f\"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, \"\n f\"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}\"\n )\n\n def check_token_limit(self, input_tokens: int) -> bool:\n \"\"\"Check if token limits are exceeded\"\"\"\n if self.max_input_tokens is not None:\n return (self.total_input_tokens + input_tokens) <= self.max_input_tokens\n # If max_input_tokens is not set, always return True\n return True\n\n def get_limit_error_message(self, input_tokens: int) -> str:\n \"\"\"Generate error message for token limit exceeded\"\"\"\n if (\n self.max_input_tokens is not None\n and (self.total_input_tokens + input_tokens) > self.max_input_tokens\n ):\n return f\"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})\"\n\n return \"Token limit exceeded\"\n\n @staticmethod\n def format_messages(\n messages: List[Union[dict, Message]], supports_images: bool = False\n ) -> List[dict]:\n \"\"\"\n Format messages for LLM by converting them to OpenAI message format.\n\n Args:\n messages: List of messages that can be either dict or Message objects\n supports_images: Flag indicating if the target model supports image inputs\n\n Returns:\n List[dict]: List of formatted messages in OpenAI format\n\n Raises:\n ValueError: If messages are invalid or missing required fields\n TypeError: If unsupported message types are provided\n\n Examples:\n >>> msgs = [\n ... Message.system_message(\"You are a helpful assistant\"),\n ... {\"role\": \"user\", \"content\": \"Hello\"},\n ... Message.user_message(\"How are you?\")\n ... ]\n >>> formatted = LLM.format_messages(msgs)\n \"\"\"\n formatted_messages = []\n\n for message in messages:\n # Convert Message objects to dictionaries\n if isinstance(message, Message):\n message = message.to_dict()\n\n if isinstance(message, dict):\n # If message is a dict, ensure it has required fields\n if \"role\" not in message:\n raise ValueError(\"Message dict must contain 'role' field\")\n\n # Process base64 images if present and model supports images\n if supports_images and message.get(\"base64_image\"):\n # Initialize or convert content to appropriate format\n if not message.get(\"content\"):\n message[\"content\"] = []\n elif isinstance(message[\"content\"], str):\n message[\"content\"] = [\n {\"type\": \"text\", \"text\": message[\"content\"]}\n ]\n elif isinstance(message[\"content\"], list):\n # Convert string items to proper text objects\n message[\"content\"] = [\n (\n {\"type\": \"text\", \"text\": item}\n if isinstance(item, str)\n else item\n )\n for item in message[\"content\"]\n ]\n\n # Add the image to content\n message[\"content\"].append(\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": f\"data:image/jpeg;base64,{message['base64_image']}\"\n },\n }\n )\n\n # Remove the base64_image field\n del message[\"base64_image\"]\n # If model doesn't support images but message has base64_image, handle gracefully\n elif not supports_images and message.get(\"base64_image\"):\n # Just remove the base64_image field and keep the text content\n del message[\"base64_image\"]\n\n if \"content\" in message or \"tool_calls\" in message:\n formatted_messages.append(message)\n # else: do not include the message\n else:\n raise TypeError(f\"Unsupported message type: {type(message)}\")\n\n # Validate all messages have required fields\n for msg in formatted_messages:\n if msg[\"role\"] not in ROLE_VALUES:\n raise ValueError(f\"Invalid role: {msg['role']}\")\n\n return formatted_messages\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = True,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format system and user messages with image support check\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n params = {\n \"model\": self.model,\n \"messages\": messages,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n if not stream:\n # Non-streaming request\n response = await self.client.chat.completions.create(\n **params, stream=False\n )\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message.content\n\n # Streaming request, For streaming, update estimated token count before making the request\n self.update_token_count(input_tokens)\n\n response = await self.client.chat.completions.create(**params, stream=True)\n\n collected_messages = []\n completion_text = \"\"\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n completion_text += chunk_message\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n # estimate completion tokens for streaming response\n completion_tokens = self.count_tokens(completion_text)\n logger.info(\n f\"Estimated completion tokens for streaming response: {completion_tokens}\"\n )\n self.total_completion_tokens += completion_tokens\n\n return full_response\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError:\n logger.exception(f\"Validation error\")\n raise\n except OpenAIError as oe:\n logger.exception(f\"OpenAI API error\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception:\n logger.exception(f\"Unexpected error in ask\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_with_images(\n self,\n messages: List[Union[dict, Message]],\n images: List[Union[str, dict]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = False,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt with images to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n images: List of image URLs or image data dictionaries\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # For ask_with_images, we always set supports_images to True because\n # this method should only be called with models that support images\n if self.model not in MULTIMODAL_MODELS:\n raise ValueError(\n f\"Model {self.model} does not support images. Use a model from {MULTIMODAL_MODELS}\"\n )\n\n # Format messages with image support\n formatted_messages = self.format_messages(messages, supports_images=True)\n\n # Ensure the last message is from the user to attach images\n if not formatted_messages or formatted_messages[-1][\"role\"] != \"user\":\n raise ValueError(\n \"The last message must be from the user to attach images\"\n )\n\n # Process the last user message to include images\n last_message = formatted_messages[-1]\n\n # Convert content to multimodal format if needed\n content = last_message[\"content\"]\n multimodal_content = (\n [{\"type\": \"text\", \"text\": content}]\n if isinstance(content, str)\n else content\n if isinstance(content, list)\n else []\n )\n\n # Add images to content\n for image in images:\n if isinstance(image, str):\n multimodal_content.append(\n {\"type\": \"image_url\", \"image_url\": {\"url\": image}}\n )\n elif isinstance(image, dict) and \"url\" in image:\n multimodal_content.append({\"type\": \"image_url\", \"image_url\": image})\n elif isinstance(image, dict) and \"image_url\" in image:\n multimodal_content.append(image)\n else:\n raise ValueError(f\"Unsupported image format: {image}\")\n\n # Update the message with multimodal content\n last_message[\"content\"] = multimodal_content\n\n # Add system messages if provided\n if system_msgs:\n all_messages = (\n self.format_messages(system_msgs, supports_images=True)\n + formatted_messages\n )\n else:\n all_messages = formatted_messages\n\n # Calculate tokens and check limits\n input_tokens = self.count_message_tokens(all_messages)\n if not self.check_token_limit(input_tokens):\n raise TokenLimitExceeded(self.get_limit_error_message(input_tokens))\n\n # Set up API parameters\n params = {\n \"model\": self.model,\n \"messages\": all_messages,\n \"stream\": stream,\n }\n\n # Add model-specific parameters\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n # Handle non-streaming request\n if not stream:\n response = await self.client.chat.completions.create(**params)\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n self.update_token_count(response.usage.prompt_tokens)\n return response.choices[0].message.content\n\n # Handle streaming request\n self.update_token_count(input_tokens)\n response = await self.client.chat.completions.create(**params)\n\n collected_messages = []\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n return full_response\n\n except TokenLimitExceeded:\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_with_images: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_with_images: {e}\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_tool(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n timeout: int = 300,\n tools: Optional[List[dict]] = None,\n tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore\n temperature: Optional[float] = None,\n **kwargs,\n ) -> ChatCompletionMessage | None:\n \"\"\"\n Ask LLM using functions/tools and return the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n timeout: Request timeout in seconds\n tools: List of tools to use\n tool_choice: Tool choice strategy\n temperature: Sampling temperature for the response\n **kwargs: Additional completion arguments\n\n Returns:\n ChatCompletionMessage: The model's response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If tools, tool_choice, or messages are invalid\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Validate tool_choice\n if tool_choice not in TOOL_CHOICE_VALUES:\n raise ValueError(f\"Invalid tool_choice: {tool_choice}\")\n\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format messages\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # If there are tools, calculate token count for tool descriptions\n tools_tokens = 0\n if tools:\n for tool in tools:\n tools_tokens += self.count_tokens(str(tool))\n\n input_tokens += tools_tokens\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n # Validate tools if provided\n if tools:\n for tool in tools:\n if not isinstance(tool, dict) or \"type\" not in tool:\n raise ValueError(\"Each tool must be a dict with 'type' field\")\n\n # Set up the completion request\n params = {\n \"model\": self.model,\n \"messages\": messages,\n \"tools\": tools,\n \"tool_choice\": tool_choice,\n \"timeout\": timeout,\n **kwargs,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n params[\"stream\"] = False # Always use non-streaming for tool requests\n response: ChatCompletion = await self.client.chat.completions.create(\n **params\n )\n\n # Check if response is valid\n if not response.choices or not response.choices[0].message:\n print(response)\n # raise ValueError(\"Invalid or empty response from LLM\")\n return None\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_tool: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_tool: {e}\")\n raise\n"], ["/OpenManus/app/tool/web_search.py", "import asyncio\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pydantic import BaseModel, ConfigDict, Field, model_validator\nfrom tenacity import retry, stop_after_attempt, wait_exponential\n\nfrom app.config import config\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.search import (\n BaiduSearchEngine,\n BingSearchEngine,\n DuckDuckGoSearchEngine,\n GoogleSearchEngine,\n WebSearchEngine,\n)\nfrom app.tool.search.base import SearchItem\n\n\nclass SearchResult(BaseModel):\n \"\"\"Represents a single search result returned by a search engine.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n position: int = Field(description=\"Position in search results\")\n url: str = Field(description=\"URL of the search result\")\n title: str = Field(default=\"\", description=\"Title of the search result\")\n description: str = Field(\n default=\"\", description=\"Description or snippet of the search result\"\n )\n source: str = Field(description=\"The search engine that provided this result\")\n raw_content: Optional[str] = Field(\n default=None, description=\"Raw content from the search result page if available\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result.\"\"\"\n return f\"{self.title} ({self.url})\"\n\n\nclass SearchMetadata(BaseModel):\n \"\"\"Metadata about the search operation.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n total_results: int = Field(description=\"Total number of results found\")\n language: str = Field(description=\"Language code used for the search\")\n country: str = Field(description=\"Country code used for the search\")\n\n\nclass SearchResponse(ToolResult):\n \"\"\"Structured response from the web search tool, inheriting ToolResult.\"\"\"\n\n query: str = Field(description=\"The search query that was executed\")\n results: List[SearchResult] = Field(\n default_factory=list, description=\"List of search results\"\n )\n metadata: Optional[SearchMetadata] = Field(\n default=None, description=\"Metadata about the search\"\n )\n\n @model_validator(mode=\"after\")\n def populate_output(self) -> \"SearchResponse\":\n \"\"\"Populate output or error fields based on search results.\"\"\"\n if self.error:\n return self\n\n result_text = [f\"Search results for '{self.query}':\"]\n\n for i, result in enumerate(self.results, 1):\n # Add title with position number\n title = result.title.strip() or \"No title\"\n result_text.append(f\"\\n{i}. {title}\")\n\n # Add URL with proper indentation\n result_text.append(f\" URL: {result.url}\")\n\n # Add description if available\n if result.description.strip():\n result_text.append(f\" Description: {result.description}\")\n\n # Add content preview if available\n if result.raw_content:\n content_preview = result.raw_content[:1000].replace(\"\\n\", \" \").strip()\n if len(result.raw_content) > 1000:\n content_preview += \"...\"\n result_text.append(f\" Content: {content_preview}\")\n\n # Add metadata at the bottom if available\n if self.metadata:\n result_text.extend(\n [\n f\"\\nMetadata:\",\n f\"- Total results: {self.metadata.total_results}\",\n f\"- Language: {self.metadata.language}\",\n f\"- Country: {self.metadata.country}\",\n ]\n )\n\n self.output = \"\\n\".join(result_text)\n return self\n\n\nclass WebContentFetcher:\n \"\"\"Utility class for fetching web content.\"\"\"\n\n @staticmethod\n async def fetch_content(url: str, timeout: int = 10) -> Optional[str]:\n \"\"\"\n Fetch and extract the main content from a webpage.\n\n Args:\n url: The URL to fetch content from\n timeout: Request timeout in seconds\n\n Returns:\n Extracted text content or None if fetching fails\n \"\"\"\n headers = {\n \"WebSearch\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n\n try:\n # Use asyncio to run requests in a thread pool\n response = await asyncio.get_event_loop().run_in_executor(\n None, lambda: requests.get(url, headers=headers, timeout=timeout)\n )\n\n if response.status_code != 200:\n logger.warning(\n f\"Failed to fetch content from {url}: HTTP {response.status_code}\"\n )\n return None\n\n # Parse HTML with BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\", \"header\", \"footer\", \"nav\"]):\n script.extract()\n\n # Get text content\n text = soup.get_text(separator=\"\\n\", strip=True)\n\n # Clean up whitespace and limit size (100KB max)\n text = \" \".join(text.split())\n return text[:10000] if text else None\n\n except Exception as e:\n logger.warning(f\"Error fetching content from {url}: {e}\")\n return None\n\n\nclass WebSearch(BaseTool):\n \"\"\"Search the web for information using various search engines.\"\"\"\n\n name: str = \"web_search\"\n description: str = \"\"\"Search the web for real-time information about any topic.\n This tool returns comprehensive search results with relevant information, URLs, titles, and descriptions.\n If the primary search engine fails, it automatically falls back to alternative engines.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\",\n \"description\": \"(required) The search query to submit to the search engine.\",\n },\n \"num_results\": {\n \"type\": \"integer\",\n \"description\": \"(optional) The number of search results to return. Default is 5.\",\n \"default\": 5,\n },\n \"lang\": {\n \"type\": \"string\",\n \"description\": \"(optional) Language code for search results (default: en).\",\n \"default\": \"en\",\n },\n \"country\": {\n \"type\": \"string\",\n \"description\": \"(optional) Country code for search results (default: us).\",\n \"default\": \"us\",\n },\n \"fetch_content\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to fetch full content from result pages. Default is false.\",\n \"default\": False,\n },\n },\n \"required\": [\"query\"],\n }\n _search_engine: dict[str, WebSearchEngine] = {\n \"google\": GoogleSearchEngine(),\n \"baidu\": BaiduSearchEngine(),\n \"duckduckgo\": DuckDuckGoSearchEngine(),\n \"bing\": BingSearchEngine(),\n }\n content_fetcher: WebContentFetcher = WebContentFetcher()\n\n async def execute(\n self,\n query: str,\n num_results: int = 5,\n lang: Optional[str] = None,\n country: Optional[str] = None,\n fetch_content: bool = False,\n ) -> SearchResponse:\n \"\"\"\n Execute a Web search and return detailed search results.\n\n Args:\n query: The search query to submit to the search engine\n num_results: The number of search results to return (default: 5)\n lang: Language code for search results (default from config)\n country: Country code for search results (default from config)\n fetch_content: Whether to fetch content from result pages (default: False)\n\n Returns:\n A structured response containing search results and metadata\n \"\"\"\n # Get settings from config\n retry_delay = (\n getattr(config.search_config, \"retry_delay\", 60)\n if config.search_config\n else 60\n )\n max_retries = (\n getattr(config.search_config, \"max_retries\", 3)\n if config.search_config\n else 3\n )\n\n # Use config values for lang and country if not specified\n if lang is None:\n lang = (\n getattr(config.search_config, \"lang\", \"en\")\n if config.search_config\n else \"en\"\n )\n\n if country is None:\n country = (\n getattr(config.search_config, \"country\", \"us\")\n if config.search_config\n else \"us\"\n )\n\n search_params = {\"lang\": lang, \"country\": country}\n\n # Try searching with retries when all engines fail\n for retry_count in range(max_retries + 1):\n results = await self._try_all_engines(query, num_results, search_params)\n\n if results:\n # Fetch content if requested\n if fetch_content:\n results = await self._fetch_content_for_results(results)\n\n # Return a successful structured response\n return SearchResponse(\n status=\"success\",\n query=query,\n results=results,\n metadata=SearchMetadata(\n total_results=len(results),\n language=lang,\n country=country,\n ),\n )\n\n if retry_count < max_retries:\n # All engines failed, wait and retry\n logger.warning(\n f\"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}...\"\n )\n await asyncio.sleep(retry_delay)\n else:\n logger.error(\n f\"All search engines failed after {max_retries} retries. Giving up.\"\n )\n\n # Return an error response\n return SearchResponse(\n query=query,\n error=\"All search engines failed to return results after multiple retries.\",\n results=[],\n )\n\n async def _try_all_engines(\n self, query: str, num_results: int, search_params: Dict[str, Any]\n ) -> List[SearchResult]:\n \"\"\"Try all search engines in the configured order.\"\"\"\n engine_order = self._get_engine_order()\n failed_engines = []\n\n for engine_name in engine_order:\n engine = self._search_engine[engine_name]\n logger.info(f\"🔎 Attempting search with {engine_name.capitalize()}...\")\n search_items = await self._perform_search_with_engine(\n engine, query, num_results, search_params\n )\n\n if not search_items:\n continue\n\n if failed_engines:\n logger.info(\n f\"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}\"\n )\n\n # Transform search items into structured results\n return [\n SearchResult(\n position=i + 1,\n url=item.url,\n title=item.title\n or f\"Result {i+1}\", # Ensure we always have a title\n description=item.description or \"\",\n source=engine_name,\n )\n for i, item in enumerate(search_items)\n ]\n\n if failed_engines:\n logger.error(f\"All search engines failed: {', '.join(failed_engines)}\")\n return []\n\n async def _fetch_content_for_results(\n self, results: List[SearchResult]\n ) -> List[SearchResult]:\n \"\"\"Fetch and add web content to search results.\"\"\"\n if not results:\n return []\n\n # Create tasks for each result\n tasks = [self._fetch_single_result_content(result) for result in results]\n\n # Type annotation to help type checker\n fetched_results = await asyncio.gather(*tasks)\n\n # Explicit validation of return type\n return [\n (\n result\n if isinstance(result, SearchResult)\n else SearchResult(**result.dict())\n )\n for result in fetched_results\n ]\n\n async def _fetch_single_result_content(self, result: SearchResult) -> SearchResult:\n \"\"\"Fetch content for a single search result.\"\"\"\n if result.url:\n content = await self.content_fetcher.fetch_content(result.url)\n if content:\n result.raw_content = content\n return result\n\n def _get_engine_order(self) -> List[str]:\n \"\"\"Determines the order in which to try search engines.\"\"\"\n preferred = (\n getattr(config.search_config, \"engine\", \"google\").lower()\n if config.search_config\n else \"google\"\n )\n fallbacks = (\n [engine.lower() for engine in config.search_config.fallback_engines]\n if config.search_config\n and hasattr(config.search_config, \"fallback_engines\")\n else []\n )\n\n # Start with preferred engine, then fallbacks, then remaining engines\n engine_order = [preferred] if preferred in self._search_engine else []\n engine_order.extend(\n [\n fb\n for fb in fallbacks\n if fb in self._search_engine and fb not in engine_order\n ]\n )\n engine_order.extend([e for e in self._search_engine if e not in engine_order])\n\n return engine_order\n\n @retry(\n stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10)\n )\n async def _perform_search_with_engine(\n self,\n engine: WebSearchEngine,\n query: str,\n num_results: int,\n search_params: Dict[str, Any],\n ) -> List[SearchItem]:\n \"\"\"Execute search with the given engine and parameters.\"\"\"\n return await asyncio.get_event_loop().run_in_executor(\n None,\n lambda: list(\n engine.perform_search(\n query,\n num_results=num_results,\n lang=search_params.get(\"lang\"),\n country=search_params.get(\"country\"),\n )\n ),\n )\n\n\nif __name__ == \"__main__\":\n web_search = WebSearch()\n search_response = asyncio.run(\n web_search.execute(\n query=\"Python programming\", fetch_content=True, num_results=1\n )\n )\n print(search_response.to_tool_result())\n"], ["/OpenManus/app/tool/browser_use_tool.py", "import asyncio\nimport base64\nimport json\nfrom typing import Generic, Optional, TypeVar\n\nfrom browser_use import Browser as BrowserUseBrowser\nfrom browser_use import BrowserConfig\nfrom browser_use.browser.context import BrowserContext, BrowserContextConfig\nfrom browser_use.dom.service import DomService\nfrom pydantic import Field, field_validator\nfrom pydantic_core.core_schema import ValidationInfo\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.web_search import WebSearch\n\n\n_BROWSER_DESCRIPTION = \"\"\"\\\nA powerful browser automation tool that allows interaction with web pages through various actions.\n* This tool provides commands for controlling a browser session, navigating web pages, and extracting information\n* It maintains state across calls, keeping the browser session alive until explicitly closed\n* Use this when you need to browse websites, fill forms, click buttons, extract content, or perform web searches\n* Each action requires specific parameters as defined in the tool's dependencies\n\nKey capabilities include:\n* Navigation: Go to specific URLs, go back, search the web, or refresh pages\n* Interaction: Click elements, input text, select from dropdowns, send keyboard commands\n* Scrolling: Scroll up/down by pixel amount or scroll to specific text\n* Content extraction: Extract and analyze content from web pages based on specific goals\n* Tab management: Switch between tabs, open new tabs, or close tabs\n\nNote: When using element indices, refer to the numbered elements shown in the current browser state.\n\"\"\"\n\nContext = TypeVar(\"Context\")\n\n\nclass BrowserUseTool(BaseTool, Generic[Context]):\n name: str = \"browser_use\"\n description: str = _BROWSER_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"action\": {\n \"type\": \"string\",\n \"enum\": [\n \"go_to_url\",\n \"click_element\",\n \"input_text\",\n \"scroll_down\",\n \"scroll_up\",\n \"scroll_to_text\",\n \"send_keys\",\n \"get_dropdown_options\",\n \"select_dropdown_option\",\n \"go_back\",\n \"web_search\",\n \"wait\",\n \"extract_content\",\n \"switch_tab\",\n \"open_tab\",\n \"close_tab\",\n ],\n \"description\": \"The browser action to perform\",\n },\n \"url\": {\n \"type\": \"string\",\n \"description\": \"URL for 'go_to_url' or 'open_tab' actions\",\n },\n \"index\": {\n \"type\": \"integer\",\n \"description\": \"Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions\",\n },\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions\",\n },\n \"scroll_amount\": {\n \"type\": \"integer\",\n \"description\": \"Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions\",\n },\n \"tab_id\": {\n \"type\": \"integer\",\n \"description\": \"Tab ID for 'switch_tab' action\",\n },\n \"query\": {\n \"type\": \"string\",\n \"description\": \"Search query for 'web_search' action\",\n },\n \"goal\": {\n \"type\": \"string\",\n \"description\": \"Extraction goal for 'extract_content' action\",\n },\n \"keys\": {\n \"type\": \"string\",\n \"description\": \"Keys to send for 'send_keys' action\",\n },\n \"seconds\": {\n \"type\": \"integer\",\n \"description\": \"Seconds to wait for 'wait' action\",\n },\n },\n \"required\": [\"action\"],\n \"dependencies\": {\n \"go_to_url\": [\"url\"],\n \"click_element\": [\"index\"],\n \"input_text\": [\"index\", \"text\"],\n \"switch_tab\": [\"tab_id\"],\n \"open_tab\": [\"url\"],\n \"scroll_down\": [\"scroll_amount\"],\n \"scroll_up\": [\"scroll_amount\"],\n \"scroll_to_text\": [\"text\"],\n \"send_keys\": [\"keys\"],\n \"get_dropdown_options\": [\"index\"],\n \"select_dropdown_option\": [\"index\", \"text\"],\n \"go_back\": [],\n \"web_search\": [\"query\"],\n \"wait\": [\"seconds\"],\n \"extract_content\": [\"goal\"],\n },\n }\n\n lock: asyncio.Lock = Field(default_factory=asyncio.Lock)\n browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True)\n context: Optional[BrowserContext] = Field(default=None, exclude=True)\n dom_service: Optional[DomService] = Field(default=None, exclude=True)\n web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True)\n\n # Context for generic functionality\n tool_context: Optional[Context] = Field(default=None, exclude=True)\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n\n @field_validator(\"parameters\", mode=\"before\")\n def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict:\n if not v:\n raise ValueError(\"Parameters cannot be empty\")\n return v\n\n async def _ensure_browser_initialized(self) -> BrowserContext:\n \"\"\"Ensure browser and context are initialized.\"\"\"\n if self.browser is None:\n browser_config_kwargs = {\"headless\": False, \"disable_security\": True}\n\n if config.browser_config:\n from browser_use.browser.browser import ProxySettings\n\n # handle proxy settings.\n if config.browser_config.proxy and config.browser_config.proxy.server:\n browser_config_kwargs[\"proxy\"] = ProxySettings(\n server=config.browser_config.proxy.server,\n username=config.browser_config.proxy.username,\n password=config.browser_config.proxy.password,\n )\n\n browser_attrs = [\n \"headless\",\n \"disable_security\",\n \"extra_chromium_args\",\n \"chrome_instance_path\",\n \"wss_url\",\n \"cdp_url\",\n ]\n\n for attr in browser_attrs:\n value = getattr(config.browser_config, attr, None)\n if value is not None:\n if not isinstance(value, list) or value:\n browser_config_kwargs[attr] = value\n\n self.browser = BrowserUseBrowser(BrowserConfig(**browser_config_kwargs))\n\n if self.context is None:\n context_config = BrowserContextConfig()\n\n # if there is context config in the config, use it.\n if (\n config.browser_config\n and hasattr(config.browser_config, \"new_context_config\")\n and config.browser_config.new_context_config\n ):\n context_config = config.browser_config.new_context_config\n\n self.context = await self.browser.new_context(context_config)\n self.dom_service = DomService(await self.context.get_current_page())\n\n return self.context\n\n async def execute(\n self,\n action: str,\n url: Optional[str] = None,\n index: Optional[int] = None,\n text: Optional[str] = None,\n scroll_amount: Optional[int] = None,\n tab_id: Optional[int] = None,\n query: Optional[str] = None,\n goal: Optional[str] = None,\n keys: Optional[str] = None,\n seconds: Optional[int] = None,\n **kwargs,\n ) -> ToolResult:\n \"\"\"\n Execute a specified browser action.\n\n Args:\n action: The browser action to perform\n url: URL for navigation or new tab\n index: Element index for click or input actions\n text: Text for input action or search query\n scroll_amount: Pixels to scroll for scroll action\n tab_id: Tab ID for switch_tab action\n query: Search query for Google search\n goal: Extraction goal for content extraction\n keys: Keys to send for keyboard actions\n seconds: Seconds to wait\n **kwargs: Additional arguments\n\n Returns:\n ToolResult with the action's output or error\n \"\"\"\n async with self.lock:\n try:\n context = await self._ensure_browser_initialized()\n\n # Get max content length from config\n max_content_length = getattr(\n config.browser_config, \"max_content_length\", 2000\n )\n\n # Navigation actions\n if action == \"go_to_url\":\n if not url:\n return ToolResult(\n error=\"URL is required for 'go_to_url' action\"\n )\n page = await context.get_current_page()\n await page.goto(url)\n await page.wait_for_load_state()\n return ToolResult(output=f\"Navigated to {url}\")\n\n elif action == \"go_back\":\n await context.go_back()\n return ToolResult(output=\"Navigated back\")\n\n elif action == \"refresh\":\n await context.refresh_page()\n return ToolResult(output=\"Refreshed current page\")\n\n elif action == \"web_search\":\n if not query:\n return ToolResult(\n error=\"Query is required for 'web_search' action\"\n )\n # Execute the web search and return results directly without browser navigation\n search_response = await self.web_search_tool.execute(\n query=query, fetch_content=True, num_results=1\n )\n # Navigate to the first search result\n first_search_result = search_response.results[0]\n url_to_navigate = first_search_result.url\n\n page = await context.get_current_page()\n await page.goto(url_to_navigate)\n await page.wait_for_load_state()\n\n return search_response\n\n # Element interaction actions\n elif action == \"click_element\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'click_element' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n download_path = await context._click_element_node(element)\n output = f\"Clicked element at index {index}\"\n if download_path:\n output += f\" - Downloaded file to {download_path}\"\n return ToolResult(output=output)\n\n elif action == \"input_text\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'input_text' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n await context._input_text_element_node(element, text)\n return ToolResult(\n output=f\"Input '{text}' into element at index {index}\"\n )\n\n elif action == \"scroll_down\" or action == \"scroll_up\":\n direction = 1 if action == \"scroll_down\" else -1\n amount = (\n scroll_amount\n if scroll_amount is not None\n else context.config.browser_window_size[\"height\"]\n )\n await context.execute_javascript(\n f\"window.scrollBy(0, {direction * amount});\"\n )\n return ToolResult(\n output=f\"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels\"\n )\n\n elif action == \"scroll_to_text\":\n if not text:\n return ToolResult(\n error=\"Text is required for 'scroll_to_text' action\"\n )\n page = await context.get_current_page()\n try:\n locator = page.get_by_text(text, exact=False)\n await locator.scroll_into_view_if_needed()\n return ToolResult(output=f\"Scrolled to text: '{text}'\")\n except Exception as e:\n return ToolResult(error=f\"Failed to scroll to text: {str(e)}\")\n\n elif action == \"send_keys\":\n if not keys:\n return ToolResult(\n error=\"Keys are required for 'send_keys' action\"\n )\n page = await context.get_current_page()\n await page.keyboard.press(keys)\n return ToolResult(output=f\"Sent keys: {keys}\")\n\n elif action == \"get_dropdown_options\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'get_dropdown_options' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n options = await page.evaluate(\n \"\"\"\n (xpath) => {\n const select = document.evaluate(xpath, document, null,\n XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;\n if (!select) return null;\n return Array.from(select.options).map(opt => ({\n text: opt.text,\n value: opt.value,\n index: opt.index\n }));\n }\n \"\"\",\n element.xpath,\n )\n return ToolResult(output=f\"Dropdown options: {options}\")\n\n elif action == \"select_dropdown_option\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'select_dropdown_option' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n await page.select_option(element.xpath, label=text)\n return ToolResult(\n output=f\"Selected option '{text}' from dropdown at index {index}\"\n )\n\n # Content extraction actions\n elif action == \"extract_content\":\n if not goal:\n return ToolResult(\n error=\"Goal is required for 'extract_content' action\"\n )\n\n page = await context.get_current_page()\n import markdownify\n\n content = markdownify.markdownify(await page.content())\n\n prompt = f\"\"\"\\\nYour task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format.\nExtraction goal: {goal}\n\nPage content:\n{content[:max_content_length]}\n\"\"\"\n messages = [{\"role\": \"system\", \"content\": prompt}]\n\n # Define extraction function schema\n extraction_function = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"extract_content\",\n \"description\": \"Extract specific information from a webpage based on a goal\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"extracted_content\": {\n \"type\": \"object\",\n \"description\": \"The content extracted from the page according to the goal\",\n \"properties\": {\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text content extracted from the page\",\n },\n \"metadata\": {\n \"type\": \"object\",\n \"description\": \"Additional metadata about the extracted content\",\n \"properties\": {\n \"source\": {\n \"type\": \"string\",\n \"description\": \"Source of the extracted content\",\n }\n },\n },\n },\n }\n },\n \"required\": [\"extracted_content\"],\n },\n },\n }\n\n # Use LLM to extract content with required function calling\n response = await self.llm.ask_tool(\n messages,\n tools=[extraction_function],\n tool_choice=\"required\",\n )\n\n if response and response.tool_calls:\n args = json.loads(response.tool_calls[0].function.arguments)\n extracted_content = args.get(\"extracted_content\", {})\n return ToolResult(\n output=f\"Extracted from page:\\n{extracted_content}\\n\"\n )\n\n return ToolResult(output=\"No content was extracted from the page.\")\n\n # Tab management actions\n elif action == \"switch_tab\":\n if tab_id is None:\n return ToolResult(\n error=\"Tab ID is required for 'switch_tab' action\"\n )\n await context.switch_to_tab(tab_id)\n page = await context.get_current_page()\n await page.wait_for_load_state()\n return ToolResult(output=f\"Switched to tab {tab_id}\")\n\n elif action == \"open_tab\":\n if not url:\n return ToolResult(error=\"URL is required for 'open_tab' action\")\n await context.create_new_tab(url)\n return ToolResult(output=f\"Opened new tab with {url}\")\n\n elif action == \"close_tab\":\n await context.close_current_tab()\n return ToolResult(output=\"Closed current tab\")\n\n # Utility actions\n elif action == \"wait\":\n seconds_to_wait = seconds if seconds is not None else 3\n await asyncio.sleep(seconds_to_wait)\n return ToolResult(output=f\"Waited for {seconds_to_wait} seconds\")\n\n else:\n return ToolResult(error=f\"Unknown action: {action}\")\n\n except Exception as e:\n return ToolResult(error=f\"Browser action '{action}' failed: {str(e)}\")\n\n async def get_current_state(\n self, context: Optional[BrowserContext] = None\n ) -> ToolResult:\n \"\"\"\n Get the current browser state as a ToolResult.\n If context is not provided, uses self.context.\n \"\"\"\n try:\n # Use provided context or fall back to self.context\n ctx = context or self.context\n if not ctx:\n return ToolResult(error=\"Browser context not initialized\")\n\n state = await ctx.get_state()\n\n # Create a viewport_info dictionary if it doesn't exist\n viewport_height = 0\n if hasattr(state, \"viewport_info\") and state.viewport_info:\n viewport_height = state.viewport_info.height\n elif hasattr(ctx, \"config\") and hasattr(ctx.config, \"browser_window_size\"):\n viewport_height = ctx.config.browser_window_size.get(\"height\", 0)\n\n # Take a screenshot for the state\n page = await ctx.get_current_page()\n\n await page.bring_to_front()\n await page.wait_for_load_state()\n\n screenshot = await page.screenshot(\n full_page=True, animations=\"disabled\", type=\"jpeg\", quality=100\n )\n\n screenshot = base64.b64encode(screenshot).decode(\"utf-8\")\n\n # Build the state info with all required fields\n state_info = {\n \"url\": state.url,\n \"title\": state.title,\n \"tabs\": [tab.model_dump() for tab in state.tabs],\n \"help\": \"[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.\",\n \"interactive_elements\": (\n state.element_tree.clickable_elements_to_string()\n if state.element_tree\n else \"\"\n ),\n \"scroll_info\": {\n \"pixels_above\": getattr(state, \"pixels_above\", 0),\n \"pixels_below\": getattr(state, \"pixels_below\", 0),\n \"total_height\": getattr(state, \"pixels_above\", 0)\n + getattr(state, \"pixels_below\", 0)\n + viewport_height,\n },\n \"viewport_height\": viewport_height,\n }\n\n return ToolResult(\n output=json.dumps(state_info, indent=4, ensure_ascii=False),\n base64_image=screenshot,\n )\n except Exception as e:\n return ToolResult(error=f\"Failed to get browser state: {str(e)}\")\n\n async def cleanup(self):\n \"\"\"Clean up browser resources.\"\"\"\n async with self.lock:\n if self.context is not None:\n await self.context.close()\n self.context = None\n self.dom_service = None\n if self.browser is not None:\n await self.browser.close()\n self.browser = None\n\n def __del__(self):\n \"\"\"Ensure cleanup when object is destroyed.\"\"\"\n if self.browser is not None or self.context is not None:\n try:\n asyncio.run(self.cleanup())\n except RuntimeError:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.cleanup())\n loop.close()\n\n @classmethod\n def create_with_context(cls, context: Context) -> \"BrowserUseTool[Context]\":\n \"\"\"Factory method to create a BrowserUseTool with a specific context.\"\"\"\n tool = cls()\n tool.tool_context = context\n return tool\n"], ["/OpenManus/app/flow/planning.py", "import json\nimport time\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.schema import AgentState, Message, ToolChoice\nfrom app.tool import PlanningTool\n\n\nclass PlanStepStatus(str, Enum):\n \"\"\"Enum class defining possible statuses of a plan step\"\"\"\n\n NOT_STARTED = \"not_started\"\n IN_PROGRESS = \"in_progress\"\n COMPLETED = \"completed\"\n BLOCKED = \"blocked\"\n\n @classmethod\n def get_all_statuses(cls) -> list[str]:\n \"\"\"Return a list of all possible step status values\"\"\"\n return [status.value for status in cls]\n\n @classmethod\n def get_active_statuses(cls) -> list[str]:\n \"\"\"Return a list of values representing active statuses (not started or in progress)\"\"\"\n return [cls.NOT_STARTED.value, cls.IN_PROGRESS.value]\n\n @classmethod\n def get_status_marks(cls) -> Dict[str, str]:\n \"\"\"Return a mapping of statuses to their marker symbols\"\"\"\n return {\n cls.COMPLETED.value: \"[✓]\",\n cls.IN_PROGRESS.value: \"[→]\",\n cls.BLOCKED.value: \"[!]\",\n cls.NOT_STARTED.value: \"[ ]\",\n }\n\n\nclass PlanningFlow(BaseFlow):\n \"\"\"A flow that manages planning and execution of tasks using agents.\"\"\"\n\n llm: LLM = Field(default_factory=lambda: LLM())\n planning_tool: PlanningTool = Field(default_factory=PlanningTool)\n executor_keys: List[str] = Field(default_factory=list)\n active_plan_id: str = Field(default_factory=lambda: f\"plan_{int(time.time())}\")\n current_step_index: Optional[int] = None\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Set executor keys before super().__init__\n if \"executors\" in data:\n data[\"executor_keys\"] = data.pop(\"executors\")\n\n # Set plan ID if provided\n if \"plan_id\" in data:\n data[\"active_plan_id\"] = data.pop(\"plan_id\")\n\n # Initialize the planning tool if not provided\n if \"planning_tool\" not in data:\n planning_tool = PlanningTool()\n data[\"planning_tool\"] = planning_tool\n\n # Call parent's init with the processed data\n super().__init__(agents, **data)\n\n # Set executor_keys to all agent keys if not specified\n if not self.executor_keys:\n self.executor_keys = list(self.agents.keys())\n\n def get_executor(self, step_type: Optional[str] = None) -> BaseAgent:\n \"\"\"\n Get an appropriate executor agent for the current step.\n Can be extended to select agents based on step type/requirements.\n \"\"\"\n # If step type is provided and matches an agent key, use that agent\n if step_type and step_type in self.agents:\n return self.agents[step_type]\n\n # Otherwise use the first available executor or fall back to primary agent\n for key in self.executor_keys:\n if key in self.agents:\n return self.agents[key]\n\n # Fallback to primary agent\n return self.primary_agent\n\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the planning flow with agents.\"\"\"\n try:\n if not self.primary_agent:\n raise ValueError(\"No primary agent available\")\n\n # Create initial plan if input provided\n if input_text:\n await self._create_initial_plan(input_text)\n\n # Verify plan was created successfully\n if self.active_plan_id not in self.planning_tool.plans:\n logger.error(\n f\"Plan creation failed. Plan ID {self.active_plan_id} not found in planning tool.\"\n )\n return f\"Failed to create plan for: {input_text}\"\n\n result = \"\"\n while True:\n # Get current step to execute\n self.current_step_index, step_info = await self._get_current_step_info()\n\n # Exit if no more steps or plan completed\n if self.current_step_index is None:\n result += await self._finalize_plan()\n break\n\n # Execute current step with appropriate agent\n step_type = step_info.get(\"type\") if step_info else None\n executor = self.get_executor(step_type)\n step_result = await self._execute_step(executor, step_info)\n result += step_result + \"\\n\"\n\n # Check if agent wants to terminate\n if hasattr(executor, \"state\") and executor.state == AgentState.FINISHED:\n break\n\n return result\n except Exception as e:\n logger.error(f\"Error in PlanningFlow: {str(e)}\")\n return f\"Execution failed: {str(e)}\"\n\n async def _create_initial_plan(self, request: str) -> None:\n \"\"\"Create an initial plan based on the request using the flow's LLM and PlanningTool.\"\"\"\n logger.info(f\"Creating initial plan with ID: {self.active_plan_id}\")\n\n system_message_content = (\n \"You are a planning assistant. Create a concise, actionable plan with clear steps. \"\n \"Focus on key milestones rather than detailed sub-steps. \"\n \"Optimize for clarity and efficiency.\"\n )\n agents_description = []\n for key in self.executor_keys:\n if key in self.agents:\n agents_description.append(\n {\n \"name\": key.upper(),\n \"description\": self.agents[key].description,\n }\n )\n if len(agents_description) > 1:\n # Add description of agents to select\n system_message_content += (\n f\"\\nNow we have {agents_description} agents. \"\n f\"The infomation of them are below: {json.dumps(agents_description)}\\n\"\n \"When creating steps in the planning tool, please specify the agent names using the format '[agent_name]'.\"\n )\n\n # Create a system message for plan creation\n system_message = Message.system_message(system_message_content)\n\n # Create a user message with the request\n user_message = Message.user_message(\n f\"Create a reasonable plan with clear steps to accomplish the task: {request}\"\n )\n\n # Call LLM with PlanningTool\n response = await self.llm.ask_tool(\n messages=[user_message],\n system_msgs=[system_message],\n tools=[self.planning_tool.to_param()],\n tool_choice=ToolChoice.AUTO,\n )\n\n # Process tool calls if present\n if response.tool_calls:\n for tool_call in response.tool_calls:\n if tool_call.function.name == \"planning\":\n # Parse the arguments\n args = tool_call.function.arguments\n if isinstance(args, str):\n try:\n args = json.loads(args)\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse tool arguments: {args}\")\n continue\n\n # Ensure plan_id is set correctly and execute the tool\n args[\"plan_id\"] = self.active_plan_id\n\n # Execute the tool via ToolCollection instead of directly\n result = await self.planning_tool.execute(**args)\n\n logger.info(f\"Plan creation result: {str(result)}\")\n return\n\n # If execution reached here, create a default plan\n logger.warning(\"Creating default plan\")\n\n # Create default plan using the ToolCollection\n await self.planning_tool.execute(\n **{\n \"command\": \"create\",\n \"plan_id\": self.active_plan_id,\n \"title\": f\"Plan for: {request[:50]}{'...' if len(request) > 50 else ''}\",\n \"steps\": [\"Analyze request\", \"Execute task\", \"Verify results\"],\n }\n )\n\n async def _get_current_step_info(self) -> tuple[Optional[int], Optional[dict]]:\n \"\"\"\n Parse the current plan to identify the first non-completed step's index and info.\n Returns (None, None) if no active step is found.\n \"\"\"\n if (\n not self.active_plan_id\n or self.active_plan_id not in self.planning_tool.plans\n ):\n logger.error(f\"Plan with ID {self.active_plan_id} not found\")\n return None, None\n\n try:\n # Direct access to plan data from planning tool storage\n plan_data = self.planning_tool.plans[self.active_plan_id]\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Find first non-completed step\n for i, step in enumerate(steps):\n if i >= len(step_statuses):\n status = PlanStepStatus.NOT_STARTED.value\n else:\n status = step_statuses[i]\n\n if status in PlanStepStatus.get_active_statuses():\n # Extract step type/category if available\n step_info = {\"text\": step}\n\n # Try to extract step type from the text (e.g., [SEARCH] or [CODE])\n import re\n\n type_match = re.search(r\"\\[([A-Z_]+)\\]\", step)\n if type_match:\n step_info[\"type\"] = type_match.group(1).lower()\n\n # Mark current step as in_progress\n try:\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=i,\n step_status=PlanStepStatus.IN_PROGRESS.value,\n )\n except Exception as e:\n logger.warning(f\"Error marking step as in_progress: {e}\")\n # Update step status directly if needed\n if i < len(step_statuses):\n step_statuses[i] = PlanStepStatus.IN_PROGRESS.value\n else:\n while len(step_statuses) < i:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n step_statuses.append(PlanStepStatus.IN_PROGRESS.value)\n\n plan_data[\"step_statuses\"] = step_statuses\n\n return i, step_info\n\n return None, None # No active step found\n\n except Exception as e:\n logger.warning(f\"Error finding current step index: {e}\")\n return None, None\n\n async def _execute_step(self, executor: BaseAgent, step_info: dict) -> str:\n \"\"\"Execute the current step with the specified agent using agent.run().\"\"\"\n # Prepare context for the agent with current plan status\n plan_status = await self._get_plan_text()\n step_text = step_info.get(\"text\", f\"Step {self.current_step_index}\")\n\n # Create a prompt for the agent to execute the current step\n step_prompt = f\"\"\"\n CURRENT PLAN STATUS:\n {plan_status}\n\n YOUR CURRENT TASK:\n You are now working on step {self.current_step_index}: \"{step_text}\"\n\n Please only execute this current step using the appropriate tools. When you're done, provide a summary of what you accomplished.\n \"\"\"\n\n # Use agent.run() to execute the step\n try:\n step_result = await executor.run(step_prompt)\n\n # Mark the step as completed after successful execution\n await self._mark_step_completed()\n\n return step_result\n except Exception as e:\n logger.error(f\"Error executing step {self.current_step_index}: {e}\")\n return f\"Error executing step {self.current_step_index}: {str(e)}\"\n\n async def _mark_step_completed(self) -> None:\n \"\"\"Mark the current step as completed.\"\"\"\n if self.current_step_index is None:\n return\n\n try:\n # Mark the step as completed\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=self.current_step_index,\n step_status=PlanStepStatus.COMPLETED.value,\n )\n logger.info(\n f\"Marked step {self.current_step_index} as completed in plan {self.active_plan_id}\"\n )\n except Exception as e:\n logger.warning(f\"Failed to update plan status: {e}\")\n # Update step status directly in planning tool storage\n if self.active_plan_id in self.planning_tool.plans:\n plan_data = self.planning_tool.plans[self.active_plan_id]\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Ensure the step_statuses list is long enough\n while len(step_statuses) <= self.current_step_index:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n\n # Update the status\n step_statuses[self.current_step_index] = PlanStepStatus.COMPLETED.value\n plan_data[\"step_statuses\"] = step_statuses\n\n async def _get_plan_text(self) -> str:\n \"\"\"Get the current plan as formatted text.\"\"\"\n try:\n result = await self.planning_tool.execute(\n command=\"get\", plan_id=self.active_plan_id\n )\n return result.output if hasattr(result, \"output\") else str(result)\n except Exception as e:\n logger.error(f\"Error getting plan: {e}\")\n return self._generate_plan_text_from_storage()\n\n def _generate_plan_text_from_storage(self) -> str:\n \"\"\"Generate plan text directly from storage if the planning tool fails.\"\"\"\n try:\n if self.active_plan_id not in self.planning_tool.plans:\n return f\"Error: Plan with ID {self.active_plan_id} not found\"\n\n plan_data = self.planning_tool.plans[self.active_plan_id]\n title = plan_data.get(\"title\", \"Untitled Plan\")\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n step_notes = plan_data.get(\"step_notes\", [])\n\n # Ensure step_statuses and step_notes match the number of steps\n while len(step_statuses) < len(steps):\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n while len(step_notes) < len(steps):\n step_notes.append(\"\")\n\n # Count steps by status\n status_counts = {status: 0 for status in PlanStepStatus.get_all_statuses()}\n\n for status in step_statuses:\n if status in status_counts:\n status_counts[status] += 1\n\n completed = status_counts[PlanStepStatus.COMPLETED.value]\n total = len(steps)\n progress = (completed / total) * 100 if total > 0 else 0\n\n plan_text = f\"Plan: {title} (ID: {self.active_plan_id})\\n\"\n plan_text += \"=\" * len(plan_text) + \"\\n\\n\"\n\n plan_text += (\n f\"Progress: {completed}/{total} steps completed ({progress:.1f}%)\\n\"\n )\n plan_text += f\"Status: {status_counts[PlanStepStatus.COMPLETED.value]} completed, {status_counts[PlanStepStatus.IN_PROGRESS.value]} in progress, \"\n plan_text += f\"{status_counts[PlanStepStatus.BLOCKED.value]} blocked, {status_counts[PlanStepStatus.NOT_STARTED.value]} not started\\n\\n\"\n plan_text += \"Steps:\\n\"\n\n status_marks = PlanStepStatus.get_status_marks()\n\n for i, (step, status, notes) in enumerate(\n zip(steps, step_statuses, step_notes)\n ):\n # Use status marks to indicate step status\n status_mark = status_marks.get(\n status, status_marks[PlanStepStatus.NOT_STARTED.value]\n )\n\n plan_text += f\"{i}. {status_mark} {step}\\n\"\n if notes:\n plan_text += f\" Notes: {notes}\\n\"\n\n return plan_text\n except Exception as e:\n logger.error(f\"Error generating plan text from storage: {e}\")\n return f\"Error: Unable to retrieve plan with ID {self.active_plan_id}\"\n\n async def _finalize_plan(self) -> str:\n \"\"\"Finalize the plan and provide a summary using the flow's LLM directly.\"\"\"\n plan_text = await self._get_plan_text()\n\n # Create a summary using the flow's LLM directly\n try:\n system_message = Message.system_message(\n \"You are a planning assistant. Your task is to summarize the completed plan.\"\n )\n\n user_message = Message.user_message(\n f\"The plan has been completed. Here is the final plan status:\\n\\n{plan_text}\\n\\nPlease provide a summary of what was accomplished and any final thoughts.\"\n )\n\n response = await self.llm.ask(\n messages=[user_message], system_msgs=[system_message]\n )\n\n return f\"Plan completed:\\n\\n{response}\"\n except Exception as e:\n logger.error(f\"Error finalizing plan with LLM: {e}\")\n\n # Fallback to using an agent for the summary\n try:\n agent = self.primary_agent\n summary_prompt = f\"\"\"\n The plan has been completed. Here is the final plan status:\n\n {plan_text}\n\n Please provide a summary of what was accomplished and any final thoughts.\n \"\"\"\n summary = await agent.run(summary_prompt)\n return f\"Plan completed:\\n\\n{summary}\"\n except Exception as e2:\n logger.error(f\"Error finalizing plan with agent: {e2}\")\n return \"Plan completed. Error generating summary.\"\n"], ["/OpenManus/protocol/a2a/app/agent_executor.py", "import logging\n\nfrom a2a.server.agent_execution import AgentExecutor, RequestContext\nfrom a2a.server.events import Event, EventQueue\nfrom a2a.server.tasks import TaskUpdater\nfrom a2a.types import (\n InvalidParamsError,\n Part,\n Task,\n TextPart,\n UnsupportedOperationError,\n)\nfrom a2a.utils import (\n completed_task,\n new_artifact,\n)\nfrom .agent import A2AManus\nfrom a2a.utils.errors import ServerError\nfrom typing import Callable, Awaitable\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ManusExecutor(AgentExecutor):\n \"\"\"Currency Conversion AgentExecutor Example.\"\"\"\n\n def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):\n self.agent_factory = agent_factory\n\n async def execute(\n self,\n context: RequestContext,\n event_queue: EventQueue,\n ) -> None:\n error = self._validate_request(context)\n if error:\n raise ServerError(error=InvalidParamsError())\n\n query = context.get_user_input()\n try:\n self.agent = await self.agent_factory()\n result = await self.agent.invoke(query, context.context_id)\n print(f\"Final Result ===> {result}\")\n except Exception as e:\n print(\"Error invoking agent: %s\", e)\n raise ServerError(error=ValueError(f\"Error invoking agent: {e}\")) from e\n parts = [\n Part(\n root=TextPart(\n text=(\n result[\"content\"]\n if result[\"content\"]\n else \"failed to generate response\"\n )\n ),\n )\n ]\n event_queue.enqueue_event(\n completed_task(\n context.task_id,\n context.context_id,\n [new_artifact(parts, f\"task_{context.task_id}\")],\n [context.message],\n )\n )\n\n def _validate_request(self, context: RequestContext) -> bool:\n return False\n\n async def cancel(\n self, request: RequestContext, event_queue: EventQueue\n ) -> Task | None:\n raise ServerError(error=UnsupportedOperationError())\n"], ["/OpenManus/app/config.py", "import json\nimport threading\nimport tomllib\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\ndef get_project_root() -> Path:\n \"\"\"Get the project root directory\"\"\"\n return Path(__file__).resolve().parent.parent\n\n\nPROJECT_ROOT = get_project_root()\nWORKSPACE_ROOT = PROJECT_ROOT / \"workspace\"\n\n\nclass LLMSettings(BaseModel):\n model: str = Field(..., description=\"Model name\")\n base_url: str = Field(..., description=\"API base URL\")\n api_key: str = Field(..., description=\"API key\")\n max_tokens: int = Field(4096, description=\"Maximum number of tokens per request\")\n max_input_tokens: Optional[int] = Field(\n None,\n description=\"Maximum input tokens to use across all requests (None for unlimited)\",\n )\n temperature: float = Field(1.0, description=\"Sampling temperature\")\n api_type: str = Field(..., description=\"Azure, Openai, or Ollama\")\n api_version: str = Field(..., description=\"Azure Openai version if AzureOpenai\")\n\n\nclass ProxySettings(BaseModel):\n server: str = Field(None, description=\"Proxy server address\")\n username: Optional[str] = Field(None, description=\"Proxy username\")\n password: Optional[str] = Field(None, description=\"Proxy password\")\n\n\nclass SearchSettings(BaseModel):\n engine: str = Field(default=\"Google\", description=\"Search engine the llm to use\")\n fallback_engines: List[str] = Field(\n default_factory=lambda: [\"DuckDuckGo\", \"Baidu\", \"Bing\"],\n description=\"Fallback search engines to try if the primary engine fails\",\n )\n retry_delay: int = Field(\n default=60,\n description=\"Seconds to wait before retrying all engines again after they all fail\",\n )\n max_retries: int = Field(\n default=3,\n description=\"Maximum number of times to retry all engines when all fail\",\n )\n lang: str = Field(\n default=\"en\",\n description=\"Language code for search results (e.g., en, zh, fr)\",\n )\n country: str = Field(\n default=\"us\",\n description=\"Country code for search results (e.g., us, cn, uk)\",\n )\n\n\nclass RunflowSettings(BaseModel):\n use_data_analysis_agent: bool = Field(\n default=False, description=\"Enable data analysis agent in run flow\"\n )\n\n\nclass BrowserSettings(BaseModel):\n headless: bool = Field(False, description=\"Whether to run browser in headless mode\")\n disable_security: bool = Field(\n True, description=\"Disable browser security features\"\n )\n extra_chromium_args: List[str] = Field(\n default_factory=list, description=\"Extra arguments to pass to the browser\"\n )\n chrome_instance_path: Optional[str] = Field(\n None, description=\"Path to a Chrome instance to use\"\n )\n wss_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via WebSocket\"\n )\n cdp_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via CDP\"\n )\n proxy: Optional[ProxySettings] = Field(\n None, description=\"Proxy settings for the browser\"\n )\n max_content_length: int = Field(\n 2000, description=\"Maximum length for content retrieval operations\"\n )\n\n\nclass SandboxSettings(BaseModel):\n \"\"\"Configuration for the execution sandbox\"\"\"\n\n use_sandbox: bool = Field(False, description=\"Whether to use the sandbox\")\n image: str = Field(\"python:3.12-slim\", description=\"Base image\")\n work_dir: str = Field(\"/workspace\", description=\"Container working directory\")\n memory_limit: str = Field(\"512m\", description=\"Memory limit\")\n cpu_limit: float = Field(1.0, description=\"CPU limit\")\n timeout: int = Field(300, description=\"Default command timeout (seconds)\")\n network_enabled: bool = Field(\n False, description=\"Whether network access is allowed\"\n )\n\n\nclass MCPServerConfig(BaseModel):\n \"\"\"Configuration for a single MCP server\"\"\"\n\n type: str = Field(..., description=\"Server connection type (sse or stdio)\")\n url: Optional[str] = Field(None, description=\"Server URL for SSE connections\")\n command: Optional[str] = Field(None, description=\"Command for stdio connections\")\n args: List[str] = Field(\n default_factory=list, description=\"Arguments for stdio command\"\n )\n\n\nclass MCPSettings(BaseModel):\n \"\"\"Configuration for MCP (Model Context Protocol)\"\"\"\n\n server_reference: str = Field(\n \"app.mcp.server\", description=\"Module reference for the MCP server\"\n )\n servers: Dict[str, MCPServerConfig] = Field(\n default_factory=dict, description=\"MCP server configurations\"\n )\n\n @classmethod\n def load_server_config(cls) -> Dict[str, MCPServerConfig]:\n \"\"\"Load MCP server configuration from JSON file\"\"\"\n config_path = PROJECT_ROOT / \"config\" / \"mcp.json\"\n\n try:\n config_file = config_path if config_path.exists() else None\n if not config_file:\n return {}\n\n with config_file.open() as f:\n data = json.load(f)\n servers = {}\n\n for server_id, server_config in data.get(\"mcpServers\", {}).items():\n servers[server_id] = MCPServerConfig(\n type=server_config[\"type\"],\n url=server_config.get(\"url\"),\n command=server_config.get(\"command\"),\n args=server_config.get(\"args\", []),\n )\n return servers\n except Exception as e:\n raise ValueError(f\"Failed to load MCP server config: {e}\")\n\n\nclass AppConfig(BaseModel):\n llm: Dict[str, LLMSettings]\n sandbox: Optional[SandboxSettings] = Field(\n None, description=\"Sandbox configuration\"\n )\n browser_config: Optional[BrowserSettings] = Field(\n None, description=\"Browser configuration\"\n )\n search_config: Optional[SearchSettings] = Field(\n None, description=\"Search configuration\"\n )\n mcp_config: Optional[MCPSettings] = Field(None, description=\"MCP configuration\")\n run_flow_config: Optional[RunflowSettings] = Field(\n None, description=\"Run flow configuration\"\n )\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass Config:\n _instance = None\n _lock = threading.Lock()\n _initialized = False\n\n def __new__(cls):\n if cls._instance is None:\n with cls._lock:\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n if not self._initialized:\n with self._lock:\n if not self._initialized:\n self._config = None\n self._load_initial_config()\n self._initialized = True\n\n @staticmethod\n def _get_config_path() -> Path:\n root = PROJECT_ROOT\n config_path = root / \"config\" / \"config.toml\"\n if config_path.exists():\n return config_path\n example_path = root / \"config\" / \"config.example.toml\"\n if example_path.exists():\n return example_path\n raise FileNotFoundError(\"No configuration file found in config directory\")\n\n def _load_config(self) -> dict:\n config_path = self._get_config_path()\n with config_path.open(\"rb\") as f:\n return tomllib.load(f)\n\n def _load_initial_config(self):\n raw_config = self._load_config()\n base_llm = raw_config.get(\"llm\", {})\n llm_overrides = {\n k: v for k, v in raw_config.get(\"llm\", {}).items() if isinstance(v, dict)\n }\n\n default_settings = {\n \"model\": base_llm.get(\"model\"),\n \"base_url\": base_llm.get(\"base_url\"),\n \"api_key\": base_llm.get(\"api_key\"),\n \"max_tokens\": base_llm.get(\"max_tokens\", 4096),\n \"max_input_tokens\": base_llm.get(\"max_input_tokens\"),\n \"temperature\": base_llm.get(\"temperature\", 1.0),\n \"api_type\": base_llm.get(\"api_type\", \"\"),\n \"api_version\": base_llm.get(\"api_version\", \"\"),\n }\n\n # handle browser config.\n browser_config = raw_config.get(\"browser\", {})\n browser_settings = None\n\n if browser_config:\n # handle proxy settings.\n proxy_config = browser_config.get(\"proxy\", {})\n proxy_settings = None\n\n if proxy_config and proxy_config.get(\"server\"):\n proxy_settings = ProxySettings(\n **{\n k: v\n for k, v in proxy_config.items()\n if k in [\"server\", \"username\", \"password\"] and v\n }\n )\n\n # filter valid browser config parameters.\n valid_browser_params = {\n k: v\n for k, v in browser_config.items()\n if k in BrowserSettings.__annotations__ and v is not None\n }\n\n # if there is proxy settings, add it to the parameters.\n if proxy_settings:\n valid_browser_params[\"proxy\"] = proxy_settings\n\n # only create BrowserSettings when there are valid parameters.\n if valid_browser_params:\n browser_settings = BrowserSettings(**valid_browser_params)\n\n search_config = raw_config.get(\"search\", {})\n search_settings = None\n if search_config:\n search_settings = SearchSettings(**search_config)\n sandbox_config = raw_config.get(\"sandbox\", {})\n if sandbox_config:\n sandbox_settings = SandboxSettings(**sandbox_config)\n else:\n sandbox_settings = SandboxSettings()\n\n mcp_config = raw_config.get(\"mcp\", {})\n mcp_settings = None\n if mcp_config:\n # Load server configurations from JSON\n mcp_config[\"servers\"] = MCPSettings.load_server_config()\n mcp_settings = MCPSettings(**mcp_config)\n else:\n mcp_settings = MCPSettings(servers=MCPSettings.load_server_config())\n\n run_flow_config = raw_config.get(\"runflow\")\n if run_flow_config:\n run_flow_settings = RunflowSettings(**run_flow_config)\n else:\n run_flow_settings = RunflowSettings()\n config_dict = {\n \"llm\": {\n \"default\": default_settings,\n **{\n name: {**default_settings, **override_config}\n for name, override_config in llm_overrides.items()\n },\n },\n \"sandbox\": sandbox_settings,\n \"browser_config\": browser_settings,\n \"search_config\": search_settings,\n \"mcp_config\": mcp_settings,\n \"run_flow_config\": run_flow_settings,\n }\n\n self._config = AppConfig(**config_dict)\n\n @property\n def llm(self) -> Dict[str, LLMSettings]:\n return self._config.llm\n\n @property\n def sandbox(self) -> SandboxSettings:\n return self._config.sandbox\n\n @property\n def browser_config(self) -> Optional[BrowserSettings]:\n return self._config.browser_config\n\n @property\n def search_config(self) -> Optional[SearchSettings]:\n return self._config.search_config\n\n @property\n def mcp_config(self) -> MCPSettings:\n \"\"\"Get the MCP configuration\"\"\"\n return self._config.mcp_config\n\n @property\n def run_flow_config(self) -> RunflowSettings:\n \"\"\"Get the Run Flow configuration\"\"\"\n return self._config.run_flow_config\n\n @property\n def workspace_root(self) -> Path:\n \"\"\"Get the workspace root directory\"\"\"\n return WORKSPACE_ROOT\n\n @property\n def root_path(self) -> Path:\n \"\"\"Get the root path of the application\"\"\"\n return PROJECT_ROOT\n\n\nconfig = Config()\n"], ["/OpenManus/app/tool/chart_visualization/data_visualization.py", "import asyncio\nimport json\nimport os\nfrom typing import Any, Hashable\n\nimport pandas as pd\nfrom pydantic import Field, model_validator\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\n\n\nclass DataVisualization(BaseTool):\n name: str = \"data_visualization\"\n description: str = \"\"\"Visualize statistical chart or Add insights in chart with JSON info from visualization_preparation tool. You can do steps as follows:\n1. Visualize statistical chart\n2. Choose insights into chart based on step 1 (Optional)\nOutputs:\n1. Charts (png/html)\n2. Charts Insights (.md)(Optional)\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"json_path\": {\n \"type\": \"string\",\n \"description\": \"\"\"file path of json info with \".json\" in the end\"\"\",\n },\n \"output_type\": {\n \"description\": \"Rendering format (html=interactive)\",\n \"type\": \"string\",\n \"default\": \"html\",\n \"enum\": [\"png\", \"html\"],\n },\n \"tool_type\": {\n \"description\": \"visualize chart or add insights\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"language\": {\n \"description\": \"english(en) / chinese(zh)\",\n \"type\": \"string\",\n \"default\": \"en\",\n \"enum\": [\"zh\", \"en\"],\n },\n },\n \"required\": [\"code\"],\n }\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n\n @model_validator(mode=\"after\")\n def initialize_llm(self):\n \"\"\"Initialize llm with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n return self\n\n def get_file_path(\n self,\n json_info: list[dict[str, str]],\n path_str: str,\n directory: str = None,\n ) -> list[str]:\n res = []\n for item in json_info:\n if os.path.exists(item[path_str]):\n res.append(item[path_str])\n elif os.path.exists(\n os.path.join(f\"{directory or config.workspace_root}\", item[path_str])\n ):\n res.append(\n os.path.join(\n f\"{directory or config.workspace_root}\", item[path_str]\n )\n )\n else:\n raise Exception(f\"No such file or directory: {item[path_str]}\")\n return res\n\n def success_output_template(self, result: list[dict[str, str]]) -> str:\n content = \"\"\n if len(result) == 0:\n return \"Is EMPTY!\"\n for item in result:\n content += f\"\"\"## {item['title']}\\nChart saved in: {item['chart_path']}\"\"\"\n if \"insight_path\" in item and item[\"insight_path\"] and \"insight_md\" in item:\n content += \"\\n\" + item[\"insight_md\"]\n else:\n content += \"\\n\"\n return f\"Chart Generated Successful!\\n{content}\"\n\n async def data_visualization(\n self, json_info: list[dict[str, str]], output_type: str, language: str\n ) -> str:\n data_list = []\n csv_file_path = self.get_file_path(json_info, \"csvFilePath\")\n for index, item in enumerate(json_info):\n df = pd.read_csv(csv_file_path[index], encoding=\"utf-8\")\n df = df.astype(object)\n df = df.where(pd.notnull(df), None)\n data_dict_list = df.to_json(orient=\"records\", force_ascii=False)\n\n data_list.append(\n {\n \"file_name\": os.path.basename(csv_file_path[index]).replace(\n \".csv\", \"\"\n ),\n \"dict_data\": data_dict_list,\n \"chartTitle\": item[\"chartTitle\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n dict_data=item[\"dict_data\"],\n chart_description=item[\"chartTitle\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"visualization\",\n language=language,\n )\n for item in data_list\n ]\n\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n csv_path = csv_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {csv_path}: {result['error']}\")\n else:\n success_list.append(\n {\n **result,\n \"title\": json_info[index][\"chartTitle\"],\n }\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error chart generated{'\\n'.join(error_list)}\\n{self.success_output_template(success_list)}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{self.success_output_template(success_list)}\"}\n\n async def add_insighs(\n self, json_info: list[dict[str, str]], output_type: str\n ) -> str:\n data_list = []\n chart_file_path = self.get_file_path(\n json_info, \"chartPath\", os.path.join(config.workspace_root, \"visualization\")\n )\n for index, item in enumerate(json_info):\n if \"insights_id\" in item:\n data_list.append(\n {\n \"file_name\": os.path.basename(chart_file_path[index]).replace(\n f\".{output_type}\", \"\"\n ),\n \"insights_id\": item[\"insights_id\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n insights_id=item[\"insights_id\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"insight\",\n )\n for item in data_list\n ]\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n chart_path = chart_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {chart_path}: {result['error']}\")\n else:\n success_list.append(chart_path)\n success_template = (\n f\"# Charts Update with Insights\\n{','.join(success_list)}\"\n if len(success_list) > 0\n else \"\"\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error in chart insights:{'\\n'.join(error_list)}\\n{success_template}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{success_template}\"}\n\n async def execute(\n self,\n json_path: str,\n output_type: str | None = \"html\",\n tool_type: str | None = \"visualization\",\n language: str | None = \"en\",\n ) -> str:\n try:\n logger.info(f\"📈 data_visualization with {json_path} in: {tool_type} \")\n with open(json_path, \"r\", encoding=\"utf-8\") as file:\n json_info = json.load(file)\n if tool_type == \"visualization\":\n return await self.data_visualization(json_info, output_type, language)\n else:\n return await self.add_insighs(json_info, output_type)\n except Exception as e:\n return {\n \"observation\": f\"Error: {e}\",\n \"success\": False,\n }\n\n async def invoke_vmind(\n self,\n file_name: str,\n output_type: str,\n task_type: str,\n insights_id: list[str] = None,\n dict_data: list[dict[Hashable, Any]] = None,\n chart_description: str = None,\n language: str = \"en\",\n ):\n llm_config = {\n \"base_url\": self.llm.base_url,\n \"model\": self.llm.model,\n \"api_key\": self.llm.api_key,\n }\n vmind_params = {\n \"llm_config\": llm_config,\n \"user_prompt\": chart_description,\n \"dataset\": dict_data,\n \"file_name\": file_name,\n \"output_type\": output_type,\n \"insights_id\": insights_id,\n \"task_type\": task_type,\n \"directory\": str(config.workspace_root),\n \"language\": language,\n }\n # build async sub process\n process = await asyncio.create_subprocess_exec(\n \"npx\",\n \"ts-node\",\n \"src/chartVisualize.ts\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=os.path.dirname(__file__),\n )\n input_json = json.dumps(vmind_params, ensure_ascii=False).encode(\"utf-8\")\n try:\n stdout, stderr = await process.communicate(input_json)\n stdout_str = stdout.decode(\"utf-8\")\n stderr_str = stderr.decode(\"utf-8\")\n if process.returncode == 0:\n return json.loads(stdout_str)\n else:\n return {\"error\": f\"Node.js Error: {stderr_str}\"}\n except Exception as e:\n return {\"error\": f\"Subprocess Error: {str(e)}\"}\n"], ["/OpenManus/app/schema.py", "from enum import Enum\nfrom typing import Any, List, Literal, Optional, Union\n\nfrom pydantic import BaseModel, Field\n\n\nclass Role(str, Enum):\n \"\"\"Message role options\"\"\"\n\n SYSTEM = \"system\"\n USER = \"user\"\n ASSISTANT = \"assistant\"\n TOOL = \"tool\"\n\n\nROLE_VALUES = tuple(role.value for role in Role)\nROLE_TYPE = Literal[ROLE_VALUES] # type: ignore\n\n\nclass ToolChoice(str, Enum):\n \"\"\"Tool choice options\"\"\"\n\n NONE = \"none\"\n AUTO = \"auto\"\n REQUIRED = \"required\"\n\n\nTOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)\nTOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore\n\n\nclass AgentState(str, Enum):\n \"\"\"Agent execution states\"\"\"\n\n IDLE = \"IDLE\"\n RUNNING = \"RUNNING\"\n FINISHED = \"FINISHED\"\n ERROR = \"ERROR\"\n\n\nclass Function(BaseModel):\n name: str\n arguments: str\n\n\nclass ToolCall(BaseModel):\n \"\"\"Represents a tool/function call in a message\"\"\"\n\n id: str\n type: str = \"function\"\n function: Function\n\n\nclass Message(BaseModel):\n \"\"\"Represents a chat message in the conversation\"\"\"\n\n role: ROLE_TYPE = Field(...) # type: ignore\n content: Optional[str] = Field(default=None)\n tool_calls: Optional[List[ToolCall]] = Field(default=None)\n name: Optional[str] = Field(default=None)\n tool_call_id: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n\n def __add__(self, other) -> List[\"Message\"]:\n \"\"\"支持 Message + list 或 Message + Message 的操作\"\"\"\n if isinstance(other, list):\n return [self] + other\n elif isinstance(other, Message):\n return [self, other]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'\"\n )\n\n def __radd__(self, other) -> List[\"Message\"]:\n \"\"\"支持 list + Message 的操作\"\"\"\n if isinstance(other, list):\n return other + [self]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'\"\n )\n\n def to_dict(self) -> dict:\n \"\"\"Convert message to dictionary format\"\"\"\n message = {\"role\": self.role}\n if self.content is not None:\n message[\"content\"] = self.content\n if self.tool_calls is not None:\n message[\"tool_calls\"] = [tool_call.dict() for tool_call in self.tool_calls]\n if self.name is not None:\n message[\"name\"] = self.name\n if self.tool_call_id is not None:\n message[\"tool_call_id\"] = self.tool_call_id\n if self.base64_image is not None:\n message[\"base64_image\"] = self.base64_image\n return message\n\n @classmethod\n def user_message(\n cls, content: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a user message\"\"\"\n return cls(role=Role.USER, content=content, base64_image=base64_image)\n\n @classmethod\n def system_message(cls, content: str) -> \"Message\":\n \"\"\"Create a system message\"\"\"\n return cls(role=Role.SYSTEM, content=content)\n\n @classmethod\n def assistant_message(\n cls, content: Optional[str] = None, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create an assistant message\"\"\"\n return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image)\n\n @classmethod\n def tool_message(\n cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a tool message\"\"\"\n return cls(\n role=Role.TOOL,\n content=content,\n name=name,\n tool_call_id=tool_call_id,\n base64_image=base64_image,\n )\n\n @classmethod\n def from_tool_calls(\n cls,\n tool_calls: List[Any],\n content: Union[str, List[str]] = \"\",\n base64_image: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Create ToolCallsMessage from raw tool calls.\n\n Args:\n tool_calls: Raw tool calls from LLM\n content: Optional message content\n base64_image: Optional base64 encoded image\n \"\"\"\n formatted_calls = [\n {\"id\": call.id, \"function\": call.function.model_dump(), \"type\": \"function\"}\n for call in tool_calls\n ]\n return cls(\n role=Role.ASSISTANT,\n content=content,\n tool_calls=formatted_calls,\n base64_image=base64_image,\n **kwargs,\n )\n\n\nclass Memory(BaseModel):\n messages: List[Message] = Field(default_factory=list)\n max_messages: int = Field(default=100)\n\n def add_message(self, message: Message) -> None:\n \"\"\"Add a message to memory\"\"\"\n self.messages.append(message)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def add_messages(self, messages: List[Message]) -> None:\n \"\"\"Add multiple messages to memory\"\"\"\n self.messages.extend(messages)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def clear(self) -> None:\n \"\"\"Clear all messages\"\"\"\n self.messages.clear()\n\n def get_recent_messages(self, n: int) -> List[Message]:\n \"\"\"Get n most recent messages\"\"\"\n return self.messages[-n:]\n\n def to_dict_list(self) -> List[dict]:\n \"\"\"Convert messages to list of dicts\"\"\"\n return [msg.to_dict() for msg in self.messages]\n"], ["/OpenManus/app/tool/planning.py", "# tool/planning.py\nfrom typing import Dict, List, Literal, Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, ToolResult\n\n\n_PLANNING_TOOL_DESCRIPTION = \"\"\"\nA planning tool that allows the agent to create and manage plans for solving complex tasks.\nThe tool provides functionality for creating plans, updating plan steps, and tracking progress.\n\"\"\"\n\n\nclass PlanningTool(BaseTool):\n \"\"\"\n A planning tool that allows the agent to create and manage plans for solving complex tasks.\n The tool provides functionality for creating plans, updating plan steps, and tracking progress.\n \"\"\"\n\n name: str = \"planning\"\n description: str = _PLANNING_TOOL_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The command to execute. Available commands: create, update, list, get, set_active, mark_step, delete.\",\n \"enum\": [\n \"create\",\n \"update\",\n \"list\",\n \"get\",\n \"set_active\",\n \"mark_step\",\n \"delete\",\n ],\n \"type\": \"string\",\n },\n \"plan_id\": {\n \"description\": \"Unique identifier for the plan. Required for create, update, set_active, and delete commands. Optional for get and mark_step (uses active plan if not specified).\",\n \"type\": \"string\",\n },\n \"title\": {\n \"description\": \"Title for the plan. Required for create command, optional for update command.\",\n \"type\": \"string\",\n },\n \"steps\": {\n \"description\": \"List of plan steps. Required for create command, optional for update command.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"step_index\": {\n \"description\": \"Index of the step to update (0-based). Required for mark_step command.\",\n \"type\": \"integer\",\n },\n \"step_status\": {\n \"description\": \"Status to set for a step. Used with mark_step command.\",\n \"enum\": [\"not_started\", \"in_progress\", \"completed\", \"blocked\"],\n \"type\": \"string\",\n },\n \"step_notes\": {\n \"description\": \"Additional notes for a step. Optional for mark_step command.\",\n \"type\": \"string\",\n },\n },\n \"required\": [\"command\"],\n \"additionalProperties\": False,\n }\n\n plans: dict = {} # Dictionary to store plans by plan_id\n _current_plan_id: Optional[str] = None # Track the current active plan\n\n async def execute(\n self,\n *,\n command: Literal[\n \"create\", \"update\", \"list\", \"get\", \"set_active\", \"mark_step\", \"delete\"\n ],\n plan_id: Optional[str] = None,\n title: Optional[str] = None,\n steps: Optional[List[str]] = None,\n step_index: Optional[int] = None,\n step_status: Optional[\n Literal[\"not_started\", \"in_progress\", \"completed\", \"blocked\"]\n ] = None,\n step_notes: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Execute the planning tool with the given command and parameters.\n\n Parameters:\n - command: The operation to perform\n - plan_id: Unique identifier for the plan\n - title: Title for the plan (used with create command)\n - steps: List of steps for the plan (used with create command)\n - step_index: Index of the step to update (used with mark_step command)\n - step_status: Status to set for a step (used with mark_step command)\n - step_notes: Additional notes for a step (used with mark_step command)\n \"\"\"\n\n if command == \"create\":\n return self._create_plan(plan_id, title, steps)\n elif command == \"update\":\n return self._update_plan(plan_id, title, steps)\n elif command == \"list\":\n return self._list_plans()\n elif command == \"get\":\n return self._get_plan(plan_id)\n elif command == \"set_active\":\n return self._set_active_plan(plan_id)\n elif command == \"mark_step\":\n return self._mark_step(plan_id, step_index, step_status, step_notes)\n elif command == \"delete\":\n return self._delete_plan(plan_id)\n else:\n raise ToolError(\n f\"Unrecognized command: {command}. Allowed commands are: create, update, list, get, set_active, mark_step, delete\"\n )\n\n def _create_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Create a new plan with the given ID, title, and steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: create\")\n\n if plan_id in self.plans:\n raise ToolError(\n f\"A plan with ID '{plan_id}' already exists. Use 'update' to modify existing plans.\"\n )\n\n if not title:\n raise ToolError(\"Parameter `title` is required for command: create\")\n\n if (\n not steps\n or not isinstance(steps, list)\n or not all(isinstance(step, str) for step in steps)\n ):\n raise ToolError(\n \"Parameter `steps` must be a non-empty list of strings for command: create\"\n )\n\n # Create a new plan with initialized step statuses\n plan = {\n \"plan_id\": plan_id,\n \"title\": title,\n \"steps\": steps,\n \"step_statuses\": [\"not_started\"] * len(steps),\n \"step_notes\": [\"\"] * len(steps),\n }\n\n self.plans[plan_id] = plan\n self._current_plan_id = plan_id # Set as active plan\n\n return ToolResult(\n output=f\"Plan created successfully with ID: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _update_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Update an existing plan with new title or steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: update\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n\n if title:\n plan[\"title\"] = title\n\n if steps:\n if not isinstance(steps, list) or not all(\n isinstance(step, str) for step in steps\n ):\n raise ToolError(\n \"Parameter `steps` must be a list of strings for command: update\"\n )\n\n # Preserve existing step statuses for unchanged steps\n old_steps = plan[\"steps\"]\n old_statuses = plan[\"step_statuses\"]\n old_notes = plan[\"step_notes\"]\n\n # Create new step statuses and notes\n new_statuses = []\n new_notes = []\n\n for i, step in enumerate(steps):\n # If the step exists at the same position in old steps, preserve status and notes\n if i < len(old_steps) and step == old_steps[i]:\n new_statuses.append(old_statuses[i])\n new_notes.append(old_notes[i])\n else:\n new_statuses.append(\"not_started\")\n new_notes.append(\"\")\n\n plan[\"steps\"] = steps\n plan[\"step_statuses\"] = new_statuses\n plan[\"step_notes\"] = new_notes\n\n return ToolResult(\n output=f\"Plan updated successfully: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _list_plans(self) -> ToolResult:\n \"\"\"List all available plans.\"\"\"\n if not self.plans:\n return ToolResult(\n output=\"No plans available. Create a plan with the 'create' command.\"\n )\n\n output = \"Available plans:\\n\"\n for plan_id, plan in self.plans.items():\n current_marker = \" (active)\" if plan_id == self._current_plan_id else \"\"\n completed = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"completed\"\n )\n total = len(plan[\"steps\"])\n progress = f\"{completed}/{total} steps completed\"\n output += f\"• {plan_id}{current_marker}: {plan['title']} - {progress}\\n\"\n\n return ToolResult(output=output)\n\n def _get_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Get details of a specific plan.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n return ToolResult(output=self._format_plan(plan))\n\n def _set_active_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Set a plan as the active plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: set_active\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n self._current_plan_id = plan_id\n return ToolResult(\n output=f\"Plan '{plan_id}' is now the active plan.\\n\\n{self._format_plan(self.plans[plan_id])}\"\n )\n\n def _mark_step(\n self,\n plan_id: Optional[str],\n step_index: Optional[int],\n step_status: Optional[str],\n step_notes: Optional[str],\n ) -> ToolResult:\n \"\"\"Mark a step with a specific status and optional notes.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n if step_index is None:\n raise ToolError(\"Parameter `step_index` is required for command: mark_step\")\n\n plan = self.plans[plan_id]\n\n if step_index < 0 or step_index >= len(plan[\"steps\"]):\n raise ToolError(\n f\"Invalid step_index: {step_index}. Valid indices range from 0 to {len(plan['steps'])-1}.\"\n )\n\n if step_status and step_status not in [\n \"not_started\",\n \"in_progress\",\n \"completed\",\n \"blocked\",\n ]:\n raise ToolError(\n f\"Invalid step_status: {step_status}. Valid statuses are: not_started, in_progress, completed, blocked\"\n )\n\n if step_status:\n plan[\"step_statuses\"][step_index] = step_status\n\n if step_notes:\n plan[\"step_notes\"][step_index] = step_notes\n\n return ToolResult(\n output=f\"Step {step_index} updated in plan '{plan_id}'.\\n\\n{self._format_plan(plan)}\"\n )\n\n def _delete_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Delete a plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: delete\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n del self.plans[plan_id]\n\n # If the deleted plan was the active plan, clear the active plan\n if self._current_plan_id == plan_id:\n self._current_plan_id = None\n\n return ToolResult(output=f\"Plan '{plan_id}' has been deleted.\")\n\n def _format_plan(self, plan: Dict) -> str:\n \"\"\"Format a plan for display.\"\"\"\n output = f\"Plan: {plan['title']} (ID: {plan['plan_id']})\\n\"\n output += \"=\" * len(output) + \"\\n\\n\"\n\n # Calculate progress statistics\n total_steps = len(plan[\"steps\"])\n completed = sum(1 for status in plan[\"step_statuses\"] if status == \"completed\")\n in_progress = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"in_progress\"\n )\n blocked = sum(1 for status in plan[\"step_statuses\"] if status == \"blocked\")\n not_started = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"not_started\"\n )\n\n output += f\"Progress: {completed}/{total_steps} steps completed \"\n if total_steps > 0:\n percentage = (completed / total_steps) * 100\n output += f\"({percentage:.1f}%)\\n\"\n else:\n output += \"(0%)\\n\"\n\n output += f\"Status: {completed} completed, {in_progress} in progress, {blocked} blocked, {not_started} not started\\n\\n\"\n output += \"Steps:\\n\"\n\n # Add each step with its status and notes\n for i, (step, status, notes) in enumerate(\n zip(plan[\"steps\"], plan[\"step_statuses\"], plan[\"step_notes\"])\n ):\n status_symbol = {\n \"not_started\": \"[ ]\",\n \"in_progress\": \"[→]\",\n \"completed\": \"[✓]\",\n \"blocked\": \"[!]\",\n }.get(status, \"[ ]\")\n\n output += f\"{i}. {status_symbol} {step}\\n\"\n if notes:\n output += f\" Notes: {notes}\\n\"\n\n return output\n"], ["/OpenManus/app/tool/create_chat_completion.py", "from typing import Any, List, Optional, Type, Union, get_args, get_origin\n\nfrom pydantic import BaseModel, Field\n\nfrom app.tool import BaseTool\n\n\nclass CreateChatCompletion(BaseTool):\n name: str = \"create_chat_completion\"\n description: str = (\n \"Creates a structured completion with specified output formatting.\"\n )\n\n # Type mapping for JSON schema\n type_mapping: dict = {\n str: \"string\",\n int: \"integer\",\n float: \"number\",\n bool: \"boolean\",\n dict: \"object\",\n list: \"array\",\n }\n response_type: Optional[Type] = None\n required: List[str] = Field(default_factory=lambda: [\"response\"])\n\n def __init__(self, response_type: Optional[Type] = str):\n \"\"\"Initialize with a specific response type.\"\"\"\n super().__init__()\n self.response_type = response_type\n self.parameters = self._build_parameters()\n\n def _build_parameters(self) -> dict:\n \"\"\"Build parameters schema based on response type.\"\"\"\n if self.response_type == str:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\",\n \"description\": \"The response text that should be delivered to the user.\",\n },\n },\n \"required\": self.required,\n }\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n schema = self.response_type.model_json_schema()\n return {\n \"type\": \"object\",\n \"properties\": schema[\"properties\"],\n \"required\": schema.get(\"required\", self.required),\n }\n\n return self._create_type_schema(self.response_type)\n\n def _create_type_schema(self, type_hint: Type) -> dict:\n \"\"\"Create a JSON schema for the given type.\"\"\"\n origin = get_origin(type_hint)\n args = get_args(type_hint)\n\n # Handle primitive types\n if origin is None:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Response of type {type_hint.__name__}\",\n }\n },\n \"required\": self.required,\n }\n\n # Handle List type\n if origin is list:\n item_type = args[0] if args else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"array\",\n \"items\": self._get_type_info(item_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Dict type\n if origin is dict:\n value_type = args[1] if len(args) > 1 else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"object\",\n \"additionalProperties\": self._get_type_info(value_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Union type\n if origin is Union:\n return self._create_union_schema(args)\n\n return self._build_parameters()\n\n def _get_type_info(self, type_hint: Type) -> dict:\n \"\"\"Get type information for a single type.\"\"\"\n if isinstance(type_hint, type) and issubclass(type_hint, BaseModel):\n return type_hint.model_json_schema()\n\n return {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Value of type {getattr(type_hint, '__name__', 'any')}\",\n }\n\n def _create_union_schema(self, types: tuple) -> dict:\n \"\"\"Create schema for Union types.\"\"\"\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\"anyOf\": [self._get_type_info(t) for t in types]}\n },\n \"required\": self.required,\n }\n\n async def execute(self, required: list | None = None, **kwargs) -> Any:\n \"\"\"Execute the chat completion with type conversion.\n\n Args:\n required: List of required field names or None\n **kwargs: Response data\n\n Returns:\n Converted response based on response_type\n \"\"\"\n required = required or self.required\n\n # Handle case when required is a list\n if isinstance(required, list) and len(required) > 0:\n if len(required) == 1:\n required_field = required[0]\n result = kwargs.get(required_field, \"\")\n else:\n # Return multiple fields as a dictionary\n return {field: kwargs.get(field, \"\") for field in required}\n else:\n required_field = \"response\"\n result = kwargs.get(required_field, \"\")\n\n # Type conversion logic\n if self.response_type == str:\n return result\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n return self.response_type(**kwargs)\n\n if get_origin(self.response_type) in (list, dict):\n return result # Assuming result is already in correct format\n\n try:\n return self.response_type(result)\n except (ValueError, TypeError):\n return result\n"], ["/OpenManus/app/flow/base.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import BaseModel\n\nfrom app.agent.base import BaseAgent\n\n\nclass BaseFlow(BaseModel, ABC):\n \"\"\"Base class for execution flows supporting multiple agents\"\"\"\n\n agents: Dict[str, BaseAgent]\n tools: Optional[List] = None\n primary_agent_key: Optional[str] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Handle different ways of providing agents\n if isinstance(agents, BaseAgent):\n agents_dict = {\"default\": agents}\n elif isinstance(agents, list):\n agents_dict = {f\"agent_{i}\": agent for i, agent in enumerate(agents)}\n else:\n agents_dict = agents\n\n # If primary agent not specified, use first agent\n primary_key = data.get(\"primary_agent_key\")\n if not primary_key and agents_dict:\n primary_key = next(iter(agents_dict))\n data[\"primary_agent_key\"] = primary_key\n\n # Set the agents dictionary\n data[\"agents\"] = agents_dict\n\n # Initialize using BaseModel's init\n super().__init__(**data)\n\n @property\n def primary_agent(self) -> Optional[BaseAgent]:\n \"\"\"Get the primary agent for the flow\"\"\"\n return self.agents.get(self.primary_agent_key)\n\n def get_agent(self, key: str) -> Optional[BaseAgent]:\n \"\"\"Get a specific agent by key\"\"\"\n return self.agents.get(key)\n\n def add_agent(self, key: str, agent: BaseAgent) -> None:\n \"\"\"Add a new agent to the flow\"\"\"\n self.agents[key] = agent\n\n @abstractmethod\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the flow with given input\"\"\"\n"], ["/OpenManus/app/agent/base.py", "from abc import ABC, abstractmethod\nfrom contextlib import asynccontextmanager\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel, Field, model_validator\n\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.sandbox.client import SANDBOX_CLIENT\nfrom app.schema import ROLE_TYPE, AgentState, Memory, Message\n\n\nclass BaseAgent(BaseModel, ABC):\n \"\"\"Abstract base class for managing agent state and execution.\n\n Provides foundational functionality for state transitions, memory management,\n and a step-based execution loop. Subclasses must implement the `step` method.\n \"\"\"\n\n # Core attributes\n name: str = Field(..., description=\"Unique name of the agent\")\n description: Optional[str] = Field(None, description=\"Optional agent description\")\n\n # Prompts\n system_prompt: Optional[str] = Field(\n None, description=\"System-level instruction prompt\"\n )\n next_step_prompt: Optional[str] = Field(\n None, description=\"Prompt for determining next action\"\n )\n\n # Dependencies\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n memory: Memory = Field(default_factory=Memory, description=\"Agent's memory store\")\n state: AgentState = Field(\n default=AgentState.IDLE, description=\"Current agent state\"\n )\n\n # Execution control\n max_steps: int = Field(default=10, description=\"Maximum steps before termination\")\n current_step: int = Field(default=0, description=\"Current step in execution\")\n\n duplicate_threshold: int = 2\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\" # Allow extra fields for flexibility in subclasses\n\n @model_validator(mode=\"after\")\n def initialize_agent(self) -> \"BaseAgent\":\n \"\"\"Initialize agent with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n if not isinstance(self.memory, Memory):\n self.memory = Memory()\n return self\n\n @asynccontextmanager\n async def state_context(self, new_state: AgentState):\n \"\"\"Context manager for safe agent state transitions.\n\n Args:\n new_state: The state to transition to during the context.\n\n Yields:\n None: Allows execution within the new state.\n\n Raises:\n ValueError: If the new_state is invalid.\n \"\"\"\n if not isinstance(new_state, AgentState):\n raise ValueError(f\"Invalid state: {new_state}\")\n\n previous_state = self.state\n self.state = new_state\n try:\n yield\n except Exception as e:\n self.state = AgentState.ERROR # Transition to ERROR on failure\n raise e\n finally:\n self.state = previous_state # Revert to previous state\n\n def update_memory(\n self,\n role: ROLE_TYPE, # type: ignore\n content: str,\n base64_image: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Add a message to the agent's memory.\n\n Args:\n role: The role of the message sender (user, system, assistant, tool).\n content: The message content.\n base64_image: Optional base64 encoded image.\n **kwargs: Additional arguments (e.g., tool_call_id for tool messages).\n\n Raises:\n ValueError: If the role is unsupported.\n \"\"\"\n message_map = {\n \"user\": Message.user_message,\n \"system\": Message.system_message,\n \"assistant\": Message.assistant_message,\n \"tool\": lambda content, **kw: Message.tool_message(content, **kw),\n }\n\n if role not in message_map:\n raise ValueError(f\"Unsupported message role: {role}\")\n\n # Create message with appropriate parameters based on role\n kwargs = {\"base64_image\": base64_image, **(kwargs if role == \"tool\" else {})}\n self.memory.add_message(message_map[role](content, **kwargs))\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Execute the agent's main loop asynchronously.\n\n Args:\n request: Optional initial user request to process.\n\n Returns:\n A string summarizing the execution results.\n\n Raises:\n RuntimeError: If the agent is not in IDLE state at start.\n \"\"\"\n if self.state != AgentState.IDLE:\n raise RuntimeError(f\"Cannot run agent from state: {self.state}\")\n\n if request:\n self.update_memory(\"user\", request)\n\n results: List[str] = []\n async with self.state_context(AgentState.RUNNING):\n while (\n self.current_step < self.max_steps and self.state != AgentState.FINISHED\n ):\n self.current_step += 1\n logger.info(f\"Executing step {self.current_step}/{self.max_steps}\")\n step_result = await self.step()\n\n # Check for stuck state\n if self.is_stuck():\n self.handle_stuck_state()\n\n results.append(f\"Step {self.current_step}: {step_result}\")\n\n if self.current_step >= self.max_steps:\n self.current_step = 0\n self.state = AgentState.IDLE\n results.append(f\"Terminated: Reached max steps ({self.max_steps})\")\n await SANDBOX_CLIENT.cleanup()\n return \"\\n\".join(results) if results else \"No steps executed\"\n\n @abstractmethod\n async def step(self) -> str:\n \"\"\"Execute a single step in the agent's workflow.\n\n Must be implemented by subclasses to define specific behavior.\n \"\"\"\n\n def handle_stuck_state(self):\n \"\"\"Handle stuck state by adding a prompt to change strategy\"\"\"\n stuck_prompt = \"\\\n Observed duplicate responses. Consider new strategies and avoid repeating ineffective paths already attempted.\"\n self.next_step_prompt = f\"{stuck_prompt}\\n{self.next_step_prompt}\"\n logger.warning(f\"Agent detected stuck state. Added prompt: {stuck_prompt}\")\n\n def is_stuck(self) -> bool:\n \"\"\"Check if the agent is stuck in a loop by detecting duplicate content\"\"\"\n if len(self.memory.messages) < 2:\n return False\n\n last_message = self.memory.messages[-1]\n if not last_message.content:\n return False\n\n # Count identical content occurrences\n duplicate_count = sum(\n 1\n for msg in reversed(self.memory.messages[:-1])\n if msg.role == \"assistant\" and msg.content == last_message.content\n )\n\n return duplicate_count >= self.duplicate_threshold\n\n @property\n def messages(self) -> List[Message]:\n \"\"\"Retrieve a list of messages from the agent's memory.\"\"\"\n return self.memory.messages\n\n @messages.setter\n def messages(self, value: List[Message]):\n \"\"\"Set the list of messages in the agent's memory.\"\"\"\n self.memory.messages = value\n"], ["/OpenManus/protocol/a2a/app/main.py", "import httpx\nimport argparse\n\nfrom a2a.server.apps import A2AStarletteApplication\nfrom a2a.server.request_handlers import DefaultRequestHandler\nfrom a2a.server.tasks import InMemoryTaskStore, InMemoryPushNotifier\nfrom a2a.types import (\n AgentCapabilities,\n AgentCard,\n AgentSkill,\n)\n\nfrom .agent_executor import ManusExecutor\n\nfrom .agent import A2AManus\nfrom app.tool.browser_use_tool import _BROWSER_DESCRIPTION\nfrom app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION\nfrom app.tool.terminate import _TERMINATE_DESCRIPTION\nimport logging\nfrom dotenv import load_dotenv\nimport asyncio\nfrom typing import Optional\n\nload_dotenv()\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nasync def main(host: str = \"localhost\", port: int = 10000):\n \"\"\"Starts the Manus Agent server.\"\"\"\n try:\n capabilities = AgentCapabilities(streaming=False, pushNotifications=True)\n skills = [\n AgentSkill(\n id=\"Python Execute\",\n name=\"Python Execute Tool\",\n description=\"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\",\n tags=[\"Execute Python Code\"],\n examples=[\n \"Execute Python code:'''python \\n Print('Hello World') \\n '''\"\n ],\n ),\n AgentSkill(\n id=\"Browser use\",\n name=\"Browser use Tool\",\n description=_BROWSER_DESCRIPTION,\n tags=[\"Use Browser\"],\n examples=[\"go_to 'https://www.google.com'\"],\n ),\n AgentSkill(\n id=\"Replace String\",\n name=\"Str_replace Tool\",\n description=_STR_REPLACE_EDITOR_DESCRIPTION,\n tags=[\"Operate Files\"],\n examples=[\"Replace 'old' with 'new' in 'file.txt'\"],\n ),\n AgentSkill(\n id=\"Ask human\",\n name=\"Ask human Tool\",\n description=\"Use this tool to ask human for help.\",\n tags=[\"Ask human for help\"],\n examples=[\"Ask human: 'What time is it?'\"],\n ),\n AgentSkill(\n id=\"terminate\",\n name=\"terminate Tool\",\n description=_TERMINATE_DESCRIPTION,\n tags=[\"terminate task\"],\n examples=[\"terminate\"],\n ),\n # Add more skills as needed\n ]\n\n agent_card = AgentCard(\n name=\"Manus Agent\",\n description=\"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\",\n url=f\"http://{host}:{port}/\",\n version=\"1.0.0\",\n defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n capabilities=capabilities,\n skills=skills,\n )\n\n httpx_client = httpx.AsyncClient()\n request_handler = DefaultRequestHandler(\n agent_executor=ManusExecutor(\n agent_factory=lambda: A2AManus.create(max_steps=3)\n ),\n task_store=InMemoryTaskStore(),\n push_notifier=InMemoryPushNotifier(httpx_client),\n )\n\n server = A2AStarletteApplication(\n agent_card=agent_card, http_handler=request_handler\n )\n\n logger.info(f\"Starting server on {host}:{port}\")\n return server.build()\n except Exception as e:\n logger.error(f\"An error occurred during server startup: {e}\")\n exit(1)\n\n\ndef run_server(host: Optional[str] = \"localhost\", port: Optional[int] = 10000):\n try:\n import uvicorn\n\n app = asyncio.run(main(host, port))\n config = uvicorn.Config(\n app=app, host=host, port=port, loop=\"asyncio\", proxy_headers=True\n )\n uvicorn.Server(config=config).run()\n logger.info(f\"Server started on {host}:{port}\")\n except Exception as e:\n logger.error(f\"An error occurred while starting the server: {e}\")\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments for host and port, with default values\n parser = argparse.ArgumentParser(description=\"Start Manus Agent service\")\n parser.add_argument(\n \"--host\",\n type=str,\n default=\"localhost\",\n help=\"Server host address, default is localhost\",\n )\n parser.add_argument(\n \"--port\", type=int, default=10000, help=\"Server port, default is 10000\"\n )\n args = parser.parse_args()\n # Start the server with the specified or default host and port\n run_server(args.host, args.port)\n"], ["/OpenManus/app/tool/crawl4ai.py", "\"\"\"\nCrawl4AI Web Crawler Tool for OpenManus\n\nThis tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents,\nproviding fast, precise, and AI-ready data extraction with clean Markdown generation.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Union\nfrom urllib.parse import urlparse\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\n\n\nclass Crawl4aiTool(BaseTool):\n \"\"\"\n Web crawler tool powered by Crawl4AI.\n\n Provides clean markdown extraction optimized for AI processing.\n \"\"\"\n\n name: str = \"crawl4ai\"\n description: str = \"\"\"Web crawler that extracts clean, AI-ready content from web pages.\n\n Features:\n - Extracts clean markdown content optimized for LLMs\n - Handles JavaScript-heavy sites and dynamic content\n - Supports multiple URLs in a single request\n - Fast and reliable with built-in error handling\n\n Perfect for content analysis, research, and feeding web content to AI models.\"\"\"\n\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"urls\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"(required) List of URLs to crawl. Can be a single URL or multiple URLs.\",\n \"minItems\": 1,\n },\n \"timeout\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Timeout in seconds for each URL. Default is 30.\",\n \"default\": 30,\n \"minimum\": 5,\n \"maximum\": 120,\n },\n \"bypass_cache\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to bypass cache and fetch fresh content. Default is false.\",\n \"default\": False,\n },\n \"word_count_threshold\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Minimum word count for content blocks. Default is 10.\",\n \"default\": 10,\n \"minimum\": 1,\n },\n },\n \"required\": [\"urls\"],\n }\n\n async def execute(\n self,\n urls: Union[str, List[str]],\n timeout: int = 30,\n bypass_cache: bool = False,\n word_count_threshold: int = 10,\n ) -> ToolResult:\n \"\"\"\n Execute web crawling for the specified URLs.\n\n Args:\n urls: Single URL string or list of URLs to crawl\n timeout: Timeout in seconds for each URL\n bypass_cache: Whether to bypass cache\n word_count_threshold: Minimum word count for content blocks\n\n Returns:\n ToolResult with crawl results\n \"\"\"\n # Normalize URLs to list\n if isinstance(urls, str):\n url_list = [urls]\n else:\n url_list = urls\n\n # Validate URLs\n valid_urls = []\n for url in url_list:\n if self._is_valid_url(url):\n valid_urls.append(url)\n else:\n logger.warning(f\"Invalid URL skipped: {url}\")\n\n if not valid_urls:\n return ToolResult(error=\"No valid URLs provided\")\n\n try:\n # Import crawl4ai components\n from crawl4ai import (\n AsyncWebCrawler,\n BrowserConfig,\n CacheMode,\n CrawlerRunConfig,\n )\n\n # Configure browser settings\n browser_config = BrowserConfig(\n headless=True,\n verbose=False,\n browser_type=\"chromium\",\n ignore_https_errors=True,\n java_script_enabled=True,\n )\n\n # Configure crawler settings\n run_config = CrawlerRunConfig(\n cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED,\n word_count_threshold=word_count_threshold,\n process_iframes=True,\n remove_overlay_elements=True,\n excluded_tags=[\"script\", \"style\"],\n page_timeout=timeout * 1000, # Convert to milliseconds\n verbose=False,\n wait_until=\"domcontentloaded\",\n )\n\n results = []\n successful_count = 0\n failed_count = 0\n\n # Process each URL\n async with AsyncWebCrawler(config=browser_config) as crawler:\n for url in valid_urls:\n try:\n logger.info(f\"🕷️ Crawling URL: {url}\")\n start_time = asyncio.get_event_loop().time()\n\n result = await crawler.arun(url=url, config=run_config)\n\n end_time = asyncio.get_event_loop().time()\n execution_time = end_time - start_time\n\n if result.success:\n # Count words in markdown\n word_count = 0\n if hasattr(result, \"markdown\") and result.markdown:\n word_count = len(result.markdown.split())\n\n # Count links\n links_count = 0\n if hasattr(result, \"links\") and result.links:\n internal_links = result.links.get(\"internal\", [])\n external_links = result.links.get(\"external\", [])\n links_count = len(internal_links) + len(external_links)\n\n # Count images\n images_count = 0\n if hasattr(result, \"media\") and result.media:\n images = result.media.get(\"images\", [])\n images_count = len(images)\n\n results.append(\n {\n \"url\": url,\n \"success\": True,\n \"status_code\": getattr(result, \"status_code\", 200),\n \"title\": result.metadata.get(\"title\")\n if result.metadata\n else None,\n \"markdown\": result.markdown\n if hasattr(result, \"markdown\")\n else None,\n \"word_count\": word_count,\n \"links_count\": links_count,\n \"images_count\": images_count,\n \"execution_time\": execution_time,\n }\n )\n successful_count += 1\n logger.info(\n f\"✅ Successfully crawled {url} in {execution_time:.2f}s\"\n )\n\n else:\n results.append(\n {\n \"url\": url,\n \"success\": False,\n \"error_message\": getattr(\n result, \"error_message\", \"Unknown error\"\n ),\n \"execution_time\": execution_time,\n }\n )\n failed_count += 1\n logger.warning(f\"❌ Failed to crawl {url}\")\n\n except Exception as e:\n error_msg = f\"Error crawling {url}: {str(e)}\"\n logger.error(error_msg)\n results.append(\n {\"url\": url, \"success\": False, \"error_message\": error_msg}\n )\n failed_count += 1\n\n # Format output\n output_lines = [f\"🕷️ Crawl4AI Results Summary:\"]\n output_lines.append(f\"📊 Total URLs: {len(valid_urls)}\")\n output_lines.append(f\"✅ Successful: {successful_count}\")\n output_lines.append(f\"❌ Failed: {failed_count}\")\n output_lines.append(\"\")\n\n for i, result in enumerate(results, 1):\n output_lines.append(f\"{i}. {result['url']}\")\n\n if result[\"success\"]:\n output_lines.append(\n f\" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})\"\n )\n if result.get(\"title\"):\n output_lines.append(f\" 📄 Title: {result['title']}\")\n\n if result.get(\"markdown\"):\n # Show first 300 characters of markdown content\n content_preview = result[\"markdown\"]\n if len(result[\"markdown\"]) > 300:\n content_preview += \"...\"\n output_lines.append(f\" 📝 Content: {content_preview}\")\n\n output_lines.append(\n f\" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images\"\n )\n\n if result.get(\"execution_time\"):\n output_lines.append(\n f\" ⏱️ Time: {result['execution_time']:.2f}s\"\n )\n else:\n output_lines.append(f\" ❌ Status: Failed\")\n if result.get(\"error_message\"):\n output_lines.append(f\" 🚫 Error: {result['error_message']}\")\n\n output_lines.append(\"\")\n\n return ToolResult(output=\"\\n\".join(output_lines))\n\n except ImportError as e:\n error_msg = \"Crawl4AI is not installed. Please install it with: pip install crawl4ai\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n except Exception as e:\n error_msg = f\"Crawl4AI execution failed: {str(e)}\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n\n def _is_valid_url(self, url: str) -> bool:\n \"\"\"Validate if a URL is properly formatted.\"\"\"\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc]) and result.scheme in [\n \"http\",\n \"https\",\n ]\n except Exception:\n return False\n"], ["/OpenManus/app/sandbox/core/manager.py", "import asyncio\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom typing import Dict, Optional, Set\n\nimport docker\nfrom docker.errors import APIError, ImageNotFound\n\nfrom app.config import SandboxSettings\nfrom app.logger import logger\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxManager:\n \"\"\"Docker sandbox manager.\n\n Manages multiple DockerSandbox instances lifecycle including creation,\n monitoring, and cleanup. Provides concurrent access control and automatic\n cleanup mechanisms for sandbox resources.\n\n Attributes:\n max_sandboxes: Maximum allowed number of sandboxes.\n idle_timeout: Sandbox idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n _sandboxes: Active sandbox instance mapping.\n _last_used: Last used time record for sandboxes.\n \"\"\"\n\n def __init__(\n self,\n max_sandboxes: int = 100,\n idle_timeout: int = 3600,\n cleanup_interval: int = 300,\n ):\n \"\"\"Initializes sandbox manager.\n\n Args:\n max_sandboxes: Maximum sandbox count limit.\n idle_timeout: Idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n \"\"\"\n self.max_sandboxes = max_sandboxes\n self.idle_timeout = idle_timeout\n self.cleanup_interval = cleanup_interval\n\n # Docker client\n self._client = docker.from_env()\n\n # Resource mappings\n self._sandboxes: Dict[str, DockerSandbox] = {}\n self._last_used: Dict[str, float] = {}\n\n # Concurrency control\n self._locks: Dict[str, asyncio.Lock] = {}\n self._global_lock = asyncio.Lock()\n self._active_operations: Set[str] = set()\n\n # Cleanup task\n self._cleanup_task: Optional[asyncio.Task] = None\n self._is_shutting_down = False\n\n # Start automatic cleanup\n self.start_cleanup_task()\n\n async def ensure_image(self, image: str) -> bool:\n \"\"\"Ensures Docker image is available.\n\n Args:\n image: Image name.\n\n Returns:\n bool: Whether image is available.\n \"\"\"\n try:\n self._client.images.get(image)\n return True\n except ImageNotFound:\n try:\n logger.info(f\"Pulling image {image}...\")\n await asyncio.get_event_loop().run_in_executor(\n None, self._client.images.pull, image\n )\n return True\n except (APIError, Exception) as e:\n logger.error(f\"Failed to pull image {image}: {e}\")\n return False\n\n @asynccontextmanager\n async def sandbox_operation(self, sandbox_id: str):\n \"\"\"Context manager for sandbox operations.\n\n Provides concurrency control and usage time updates.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Raises:\n KeyError: If sandbox not found.\n \"\"\"\n if sandbox_id not in self._locks:\n self._locks[sandbox_id] = asyncio.Lock()\n\n async with self._locks[sandbox_id]:\n if sandbox_id not in self._sandboxes:\n raise KeyError(f\"Sandbox {sandbox_id} not found\")\n\n self._active_operations.add(sandbox_id)\n try:\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n yield self._sandboxes[sandbox_id]\n finally:\n self._active_operations.remove(sandbox_id)\n\n async def create_sandbox(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> str:\n \"\"\"Creates a new sandbox instance.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n\n Returns:\n str: Sandbox ID.\n\n Raises:\n RuntimeError: If max sandbox count reached or creation fails.\n \"\"\"\n async with self._global_lock:\n if len(self._sandboxes) >= self.max_sandboxes:\n raise RuntimeError(\n f\"Maximum number of sandboxes ({self.max_sandboxes}) reached\"\n )\n\n config = config or SandboxSettings()\n if not await self.ensure_image(config.image):\n raise RuntimeError(f\"Failed to ensure Docker image: {config.image}\")\n\n sandbox_id = str(uuid.uuid4())\n try:\n sandbox = DockerSandbox(config, volume_bindings)\n await sandbox.create()\n\n self._sandboxes[sandbox_id] = sandbox\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n self._locks[sandbox_id] = asyncio.Lock()\n\n logger.info(f\"Created sandbox {sandbox_id}\")\n return sandbox_id\n\n except Exception as e:\n logger.error(f\"Failed to create sandbox: {e}\")\n if sandbox_id in self._sandboxes:\n await self.delete_sandbox(sandbox_id)\n raise RuntimeError(f\"Failed to create sandbox: {e}\")\n\n async def get_sandbox(self, sandbox_id: str) -> DockerSandbox:\n \"\"\"Gets a sandbox instance.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Returns:\n DockerSandbox: Sandbox instance.\n\n Raises:\n KeyError: If sandbox does not exist.\n \"\"\"\n async with self.sandbox_operation(sandbox_id) as sandbox:\n return sandbox\n\n def start_cleanup_task(self) -> None:\n \"\"\"Starts automatic cleanup task.\"\"\"\n\n async def cleanup_loop():\n while not self._is_shutting_down:\n try:\n await self._cleanup_idle_sandboxes()\n except Exception as e:\n logger.error(f\"Error in cleanup loop: {e}\")\n await asyncio.sleep(self.cleanup_interval)\n\n self._cleanup_task = asyncio.create_task(cleanup_loop())\n\n async def _cleanup_idle_sandboxes(self) -> None:\n \"\"\"Cleans up idle sandboxes.\"\"\"\n current_time = asyncio.get_event_loop().time()\n to_cleanup = []\n\n async with self._global_lock:\n for sandbox_id, last_used in self._last_used.items():\n if (\n sandbox_id not in self._active_operations\n and current_time - last_used > self.idle_timeout\n ):\n to_cleanup.append(sandbox_id)\n\n for sandbox_id in to_cleanup:\n try:\n await self.delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Error cleaning up sandbox {sandbox_id}: {e}\")\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up all resources.\"\"\"\n logger.info(\"Starting manager cleanup...\")\n self._is_shutting_down = True\n\n # Cancel cleanup task\n if self._cleanup_task:\n self._cleanup_task.cancel()\n try:\n await asyncio.wait_for(self._cleanup_task, timeout=1.0)\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n\n # Get all sandbox IDs to clean up\n async with self._global_lock:\n sandbox_ids = list(self._sandboxes.keys())\n\n # Concurrently clean up all sandboxes\n cleanup_tasks = []\n for sandbox_id in sandbox_ids:\n task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id))\n cleanup_tasks.append(task)\n\n if cleanup_tasks:\n # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting\n try:\n await asyncio.wait(cleanup_tasks, timeout=30.0)\n except asyncio.TimeoutError:\n logger.error(\"Sandbox cleanup timed out\")\n\n # Clean up remaining references\n self._sandboxes.clear()\n self._last_used.clear()\n self._locks.clear()\n self._active_operations.clear()\n\n logger.info(\"Manager cleanup completed\")\n\n async def _safe_delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Safely deletes a single sandbox.\n\n Args:\n sandbox_id: Sandbox ID to delete.\n \"\"\"\n try:\n if sandbox_id in self._active_operations:\n logger.warning(\n f\"Sandbox {sandbox_id} has active operations, waiting for completion\"\n )\n for _ in range(10): # Wait at most 10 times\n await asyncio.sleep(0.5)\n if sandbox_id not in self._active_operations:\n break\n else:\n logger.warning(\n f\"Timeout waiting for sandbox {sandbox_id} operations to complete\"\n )\n\n # Get reference to sandbox object\n sandbox = self._sandboxes.get(sandbox_id)\n if sandbox:\n await sandbox.cleanup()\n\n # Remove sandbox record from manager\n async with self._global_lock:\n self._sandboxes.pop(sandbox_id, None)\n self._last_used.pop(sandbox_id, None)\n self._locks.pop(sandbox_id, None)\n logger.info(f\"Deleted sandbox {sandbox_id}\")\n except Exception as e:\n logger.error(f\"Error during cleanup of sandbox {sandbox_id}: {e}\")\n\n async def delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Deletes specified sandbox.\n\n Args:\n sandbox_id: Sandbox ID.\n \"\"\"\n if sandbox_id not in self._sandboxes:\n return\n\n try:\n await self._safe_delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Failed to delete sandbox {sandbox_id}: {e}\")\n\n async def __aenter__(self) -> \"SandboxManager\":\n \"\"\"Async context manager entry.\"\"\"\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n\n def get_stats(self) -> Dict:\n \"\"\"Gets manager statistics.\n\n Returns:\n Dict: Statistics information.\n \"\"\"\n return {\n \"total_sandboxes\": len(self._sandboxes),\n \"active_operations\": len(self._active_operations),\n \"max_sandboxes\": self.max_sandboxes,\n \"idle_timeout\": self.idle_timeout,\n \"cleanup_interval\": self.cleanup_interval,\n \"is_shutting_down\": self._is_shutting_down,\n }\n"], ["/OpenManus/app/agent/manus.py", "from typing import Dict, List, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.browser import BrowserContextHelper\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.logger import logger\nfrom app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.ask_human import AskHuman\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.mcp import MCPClients, MCPClientTool\nfrom app.tool.python_execute import PythonExecute\nfrom app.tool.str_replace_editor import StrReplaceEditor\n\n\nclass Manus(ToolCallAgent):\n \"\"\"A versatile general-purpose agent with support for both local and MCP tools.\"\"\"\n\n name: str = \"Manus\"\n description: str = \"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # MCP clients for remote tool access\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n PythonExecute(),\n BrowserUseTool(),\n StrReplaceEditor(),\n AskHuman(),\n Terminate(),\n )\n )\n\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n # Track connected MCP servers\n connected_servers: Dict[str, str] = Field(\n default_factory=dict\n ) # server_id -> url/command\n _initialized: bool = False\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"Manus\":\n \"\"\"Initialize basic components synchronously.\"\"\"\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n @classmethod\n async def create(cls, **kwargs) -> \"Manus\":\n \"\"\"Factory method to create and properly initialize a Manus instance.\"\"\"\n instance = cls(**kwargs)\n await instance.initialize_mcp_servers()\n instance._initialized = True\n return instance\n\n async def initialize_mcp_servers(self) -> None:\n \"\"\"Initialize connections to configured MCP servers.\"\"\"\n for server_id, server_config in config.mcp_config.servers.items():\n try:\n if server_config.type == \"sse\":\n if server_config.url:\n await self.connect_mcp_server(server_config.url, server_id)\n logger.info(\n f\"Connected to MCP server {server_id} at {server_config.url}\"\n )\n elif server_config.type == \"stdio\":\n if server_config.command:\n await self.connect_mcp_server(\n server_config.command,\n server_id,\n use_stdio=True,\n stdio_args=server_config.args,\n )\n logger.info(\n f\"Connected to MCP server {server_id} using command {server_config.command}\"\n )\n except Exception as e:\n logger.error(f\"Failed to connect to MCP server {server_id}: {e}\")\n\n async def connect_mcp_server(\n self,\n server_url: str,\n server_id: str = \"\",\n use_stdio: bool = False,\n stdio_args: List[str] = None,\n ) -> None:\n \"\"\"Connect to an MCP server and add its tools.\"\"\"\n if use_stdio:\n await self.mcp_clients.connect_stdio(\n server_url, stdio_args or [], server_id\n )\n self.connected_servers[server_id or server_url] = server_url\n else:\n await self.mcp_clients.connect_sse(server_url, server_id)\n self.connected_servers[server_id or server_url] = server_url\n\n # Update available tools with only the new tools from this server\n new_tools = [\n tool for tool in self.mcp_clients.tools if tool.server_id == server_id\n ]\n self.available_tools.add_tools(*new_tools)\n\n async def disconnect_mcp_server(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from an MCP server and remove its tools.\"\"\"\n await self.mcp_clients.disconnect(server_id)\n if server_id:\n self.connected_servers.pop(server_id, None)\n else:\n self.connected_servers.clear()\n\n # Rebuild available tools without the disconnected server's tools\n base_tools = [\n tool\n for tool in self.available_tools.tools\n if not isinstance(tool, MCPClientTool)\n ]\n self.available_tools = ToolCollection(*base_tools)\n self.available_tools.add_tools(*self.mcp_clients.tools)\n\n async def cleanup(self):\n \"\"\"Clean up Manus agent resources.\"\"\"\n if self.browser_context_helper:\n await self.browser_context_helper.cleanup_browser()\n # Disconnect from all MCP servers only if we were initialized\n if self._initialized:\n await self.disconnect_mcp_server()\n self._initialized = False\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions with appropriate context.\"\"\"\n if not self._initialized:\n await self.initialize_mcp_servers()\n self._initialized = True\n\n original_prompt = self.next_step_prompt\n recent_messages = self.memory.messages[-3:] if self.memory.messages else []\n browser_in_use = any(\n tc.function.name == BrowserUseTool().name\n for msg in recent_messages\n if msg.tool_calls\n for tc in msg.tool_calls\n )\n\n if browser_in_use:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n\n result = await super().think()\n\n # Restore original prompt\n self.next_step_prompt = original_prompt\n\n return result\n"], ["/OpenManus/app/agent/mcp.py", "from typing import Any, Dict, List, Optional, Tuple\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.mcp import MULTIMEDIA_RESPONSE_PROMPT, NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import AgentState, Message\nfrom app.tool.base import ToolResult\nfrom app.tool.mcp import MCPClients\n\n\nclass MCPAgent(ToolCallAgent):\n \"\"\"Agent for interacting with MCP (Model Context Protocol) servers.\n\n This agent connects to an MCP server using either SSE or stdio transport\n and makes the server's tools available through the agent's tool interface.\n \"\"\"\n\n name: str = \"mcp_agent\"\n description: str = \"An agent that connects to an MCP server and uses its tools.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n # Initialize MCP tool collection\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n available_tools: MCPClients = None # Will be set in initialize()\n\n max_steps: int = 20\n connection_type: str = \"stdio\" # \"stdio\" or \"sse\"\n\n # Track tool schemas to detect changes\n tool_schemas: Dict[str, Dict[str, Any]] = Field(default_factory=dict)\n _refresh_tools_interval: int = 5 # Refresh tools every N steps\n\n # Special tool names that should trigger termination\n special_tool_names: List[str] = Field(default_factory=lambda: [\"terminate\"])\n\n async def initialize(\n self,\n connection_type: Optional[str] = None,\n server_url: Optional[str] = None,\n command: Optional[str] = None,\n args: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Initialize the MCP connection.\n\n Args:\n connection_type: Type of connection to use (\"stdio\" or \"sse\")\n server_url: URL of the MCP server (for SSE connection)\n command: Command to run (for stdio connection)\n args: Arguments for the command (for stdio connection)\n \"\"\"\n if connection_type:\n self.connection_type = connection_type\n\n # Connect to the MCP server based on connection type\n if self.connection_type == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL is required for SSE connection\")\n await self.mcp_clients.connect_sse(server_url=server_url)\n elif self.connection_type == \"stdio\":\n if not command:\n raise ValueError(\"Command is required for stdio connection\")\n await self.mcp_clients.connect_stdio(command=command, args=args or [])\n else:\n raise ValueError(f\"Unsupported connection type: {self.connection_type}\")\n\n # Set available_tools to our MCP instance\n self.available_tools = self.mcp_clients\n\n # Store initial tool schemas\n await self._refresh_tools()\n\n # Add system message about available tools\n tool_names = list(self.mcp_clients.tool_map.keys())\n tools_info = \", \".join(tool_names)\n\n # Add system prompt and available tools information\n self.memory.add_message(\n Message.system_message(\n f\"{self.system_prompt}\\n\\nAvailable MCP tools: {tools_info}\"\n )\n )\n\n async def _refresh_tools(self) -> Tuple[List[str], List[str]]:\n \"\"\"Refresh the list of available tools from the MCP server.\n\n Returns:\n A tuple of (added_tools, removed_tools)\n \"\"\"\n if not self.mcp_clients.sessions:\n return [], []\n\n # Get current tool schemas directly from the server\n response = await self.mcp_clients.list_tools()\n current_tools = {tool.name: tool.inputSchema for tool in response.tools}\n\n # Determine added, removed, and changed tools\n current_names = set(current_tools.keys())\n previous_names = set(self.tool_schemas.keys())\n\n added_tools = list(current_names - previous_names)\n removed_tools = list(previous_names - current_names)\n\n # Check for schema changes in existing tools\n changed_tools = []\n for name in current_names.intersection(previous_names):\n if current_tools[name] != self.tool_schemas.get(name):\n changed_tools.append(name)\n\n # Update stored schemas\n self.tool_schemas = current_tools\n\n # Log and notify about changes\n if added_tools:\n logger.info(f\"Added MCP tools: {added_tools}\")\n self.memory.add_message(\n Message.system_message(f\"New tools available: {', '.join(added_tools)}\")\n )\n if removed_tools:\n logger.info(f\"Removed MCP tools: {removed_tools}\")\n self.memory.add_message(\n Message.system_message(\n f\"Tools no longer available: {', '.join(removed_tools)}\"\n )\n )\n if changed_tools:\n logger.info(f\"Changed MCP tools: {changed_tools}\")\n\n return added_tools, removed_tools\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action.\"\"\"\n # Check MCP session and tools availability\n if not self.mcp_clients.sessions or not self.mcp_clients.tool_map:\n logger.info(\"MCP service is no longer available, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Refresh tools periodically\n if self.current_step % self._refresh_tools_interval == 0:\n await self._refresh_tools()\n # All tools removed indicates shutdown\n if not self.mcp_clients.tool_map:\n logger.info(\"MCP service has shut down, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Use the parent class's think method\n return await super().think()\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs) -> None:\n \"\"\"Handle special tool execution and state changes\"\"\"\n # First process with parent handler\n await super()._handle_special_tool(name, result, **kwargs)\n\n # Handle multimedia responses\n if isinstance(result, ToolResult) and result.base64_image:\n self.memory.add_message(\n Message.system_message(\n MULTIMEDIA_RESPONSE_PROMPT.format(tool_name=name)\n )\n )\n\n def _should_finish_execution(self, name: str, **kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n # Terminate if the tool name is 'terminate'\n return name.lower() == \"terminate\"\n\n async def cleanup(self) -> None:\n \"\"\"Clean up MCP connection when done.\"\"\"\n if self.mcp_clients.sessions:\n await self.mcp_clients.disconnect()\n logger.info(\"MCP connection closed\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n result = await super().run(request)\n return result\n finally:\n # Ensure cleanup happens even if there's an error\n await self.cleanup()\n"], ["/OpenManus/app/agent/toolcall.py", "import asyncio\nimport json\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.react import ReActAgent\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger\nfrom app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice\nfrom app.tool import CreateChatCompletion, Terminate, ToolCollection\n\n\nTOOL_CALL_REQUIRED = \"Tool calls required but none provided\"\n\n\nclass ToolCallAgent(ReActAgent):\n \"\"\"Base agent class for handling tool/function calls with enhanced abstraction\"\"\"\n\n name: str = \"toolcall\"\n description: str = \"an agent that can execute tool calls.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n available_tools: ToolCollection = ToolCollection(\n CreateChatCompletion(), Terminate()\n )\n tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n tool_calls: List[ToolCall] = Field(default_factory=list)\n _current_base64_image: Optional[str] = None\n\n max_steps: int = 30\n max_observe: Optional[Union[int, bool]] = None\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools\"\"\"\n if self.next_step_prompt:\n user_msg = Message.user_message(self.next_step_prompt)\n self.messages += [user_msg]\n\n try:\n # Get response with tool options\n response = await self.llm.ask_tool(\n messages=self.messages,\n system_msgs=(\n [Message.system_message(self.system_prompt)]\n if self.system_prompt\n else None\n ),\n tools=self.available_tools.to_params(),\n tool_choice=self.tool_choices,\n )\n except ValueError:\n raise\n except Exception as e:\n # Check if this is a RetryError containing TokenLimitExceeded\n if hasattr(e, \"__cause__\") and isinstance(e.__cause__, TokenLimitExceeded):\n token_limit_error = e.__cause__\n logger.error(\n f\"🚨 Token limit error (from RetryError): {token_limit_error}\"\n )\n self.memory.add_message(\n Message.assistant_message(\n f\"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}\"\n )\n )\n self.state = AgentState.FINISHED\n return False\n raise\n\n self.tool_calls = tool_calls = (\n response.tool_calls if response and response.tool_calls else []\n )\n content = response.content if response and response.content else \"\"\n\n # Log response info\n logger.info(f\"✨ {self.name}'s thoughts: {content}\")\n logger.info(\n f\"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use\"\n )\n if tool_calls:\n logger.info(\n f\"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}\"\n )\n logger.info(f\"🔧 Tool arguments: {tool_calls[0].function.arguments}\")\n\n try:\n if response is None:\n raise RuntimeError(\"No response received from the LLM\")\n\n # Handle different tool_choices modes\n if self.tool_choices == ToolChoice.NONE:\n if tool_calls:\n logger.warning(\n f\"🤔 Hmm, {self.name} tried to use tools when they weren't available!\"\n )\n if content:\n self.memory.add_message(Message.assistant_message(content))\n return True\n return False\n\n # Create and add assistant message\n assistant_msg = (\n Message.from_tool_calls(content=content, tool_calls=self.tool_calls)\n if self.tool_calls\n else Message.assistant_message(content)\n )\n self.memory.add_message(assistant_msg)\n\n if self.tool_choices == ToolChoice.REQUIRED and not self.tool_calls:\n return True # Will be handled in act()\n\n # For 'auto' mode, continue with content if no commands but content exists\n if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:\n return bool(content)\n\n return bool(self.tool_calls)\n except Exception as e:\n logger.error(f\"🚨 Oops! The {self.name}'s thinking process hit a snag: {e}\")\n self.memory.add_message(\n Message.assistant_message(\n f\"Error encountered while processing: {str(e)}\"\n )\n )\n return False\n\n async def act(self) -> str:\n \"\"\"Execute tool calls and handle their results\"\"\"\n if not self.tool_calls:\n if self.tool_choices == ToolChoice.REQUIRED:\n raise ValueError(TOOL_CALL_REQUIRED)\n\n # Return last message content if no tool calls\n return self.messages[-1].content or \"No content or commands to execute\"\n\n results = []\n for command in self.tool_calls:\n # Reset base64_image for each tool call\n self._current_base64_image = None\n\n result = await self.execute_tool(command)\n\n if self.max_observe:\n result = result[: self.max_observe]\n\n logger.info(\n f\"🎯 Tool '{command.function.name}' completed its mission! Result: {result}\"\n )\n\n # Add tool response to memory\n tool_msg = Message.tool_message(\n content=result,\n tool_call_id=command.id,\n name=command.function.name,\n base64_image=self._current_base64_image,\n )\n self.memory.add_message(tool_msg)\n results.append(result)\n\n return \"\\n\\n\".join(results)\n\n async def execute_tool(self, command: ToolCall) -> str:\n \"\"\"Execute a single tool call with robust error handling\"\"\"\n if not command or not command.function or not command.function.name:\n return \"Error: Invalid command format\"\n\n name = command.function.name\n if name not in self.available_tools.tool_map:\n return f\"Error: Unknown tool '{name}'\"\n\n try:\n # Parse arguments\n args = json.loads(command.function.arguments or \"{}\")\n\n # Execute the tool\n logger.info(f\"🔧 Activating tool: '{name}'...\")\n result = await self.available_tools.execute(name=name, tool_input=args)\n\n # Handle special tools\n await self._handle_special_tool(name=name, result=result)\n\n # Check if result is a ToolResult with base64_image\n if hasattr(result, \"base64_image\") and result.base64_image:\n # Store the base64_image for later use in tool_message\n self._current_base64_image = result.base64_image\n\n # Format result for display (standard case)\n observation = (\n f\"Observed output of cmd `{name}` executed:\\n{str(result)}\"\n if result\n else f\"Cmd `{name}` completed with no output\"\n )\n\n return observation\n except json.JSONDecodeError:\n error_msg = f\"Error parsing arguments for {name}: Invalid JSON format\"\n logger.error(\n f\"📝 Oops! The arguments for '{name}' don't make sense - invalid JSON, arguments:{command.function.arguments}\"\n )\n return f\"Error: {error_msg}\"\n except Exception as e:\n error_msg = f\"⚠️ Tool '{name}' encountered a problem: {str(e)}\"\n logger.exception(error_msg)\n return f\"Error: {error_msg}\"\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs):\n \"\"\"Handle special tool execution and state changes\"\"\"\n if not self._is_special_tool(name):\n return\n\n if self._should_finish_execution(name=name, result=result, **kwargs):\n # Set agent state to finished\n logger.info(f\"🏁 Special tool '{name}' has completed the task!\")\n self.state = AgentState.FINISHED\n\n @staticmethod\n def _should_finish_execution(**kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n return True\n\n def _is_special_tool(self, name: str) -> bool:\n \"\"\"Check if tool name is in special tools list\"\"\"\n return name.lower() in [n.lower() for n in self.special_tool_names]\n\n async def cleanup(self):\n \"\"\"Clean up resources used by the agent's tools.\"\"\"\n logger.info(f\"🧹 Cleaning up resources for agent '{self.name}'...\")\n for tool_name, tool_instance in self.available_tools.tool_map.items():\n if hasattr(tool_instance, \"cleanup\") and asyncio.iscoroutinefunction(\n tool_instance.cleanup\n ):\n try:\n logger.debug(f\"🧼 Cleaning up tool: {tool_name}\")\n await tool_instance.cleanup()\n except Exception as e:\n logger.error(\n f\"🚨 Error cleaning up tool '{tool_name}': {e}\", exc_info=True\n )\n logger.info(f\"✨ Cleanup complete for agent '{self.name}'.\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n return await super().run(request)\n finally:\n await self.cleanup()\n"], ["/OpenManus/app/tool/base.py", "from abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass BaseTool(ABC, BaseModel):\n name: str\n description: str\n parameters: Optional[dict] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n async def __call__(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n return await self.execute(**kwargs)\n\n @abstractmethod\n async def execute(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n\n def to_param(self) -> Dict:\n \"\"\"Convert tool to function call format.\"\"\"\n return {\n \"type\": \"function\",\n \"function\": {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": self.parameters,\n },\n }\n\n\nclass ToolResult(BaseModel):\n \"\"\"Represents the result of a tool execution.\"\"\"\n\n output: Any = Field(default=None)\n error: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n system: Optional[str] = Field(default=None)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __bool__(self):\n return any(getattr(self, field) for field in self.__fields__)\n\n def __add__(self, other: \"ToolResult\"):\n def combine_fields(\n field: Optional[str], other_field: Optional[str], concatenate: bool = True\n ):\n if field and other_field:\n if concatenate:\n return field + other_field\n raise ValueError(\"Cannot combine tool results\")\n return field or other_field\n\n return ToolResult(\n output=combine_fields(self.output, other.output),\n error=combine_fields(self.error, other.error),\n base64_image=combine_fields(self.base64_image, other.base64_image, False),\n system=combine_fields(self.system, other.system),\n )\n\n def __str__(self):\n return f\"Error: {self.error}\" if self.error else self.output\n\n def replace(self, **kwargs):\n \"\"\"Returns a new ToolResult with the given fields replaced.\"\"\"\n # return self.copy(update=kwargs)\n return type(self)(**{**self.dict(), **kwargs})\n\n\nclass CLIResult(ToolResult):\n \"\"\"A ToolResult that can be rendered as a CLI output.\"\"\"\n\n\nclass ToolFailure(ToolResult):\n \"\"\"A ToolResult that represents a failure.\"\"\"\n"], ["/OpenManus/app/mcp/server.py", "import logging\nimport sys\n\n\nlogging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])\n\nimport argparse\nimport asyncio\nimport atexit\nimport json\nfrom inspect import Parameter, Signature\nfrom typing import Any, Dict, Optional\n\nfrom mcp.server.fastmcp import FastMCP\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\n\n\nclass MCPServer:\n \"\"\"MCP Server implementation with tool registration and management.\"\"\"\n\n def __init__(self, name: str = \"openmanus\"):\n self.server = FastMCP(name)\n self.tools: Dict[str, BaseTool] = {}\n\n # Initialize standard tools\n self.tools[\"bash\"] = Bash()\n self.tools[\"browser\"] = BrowserUseTool()\n self.tools[\"editor\"] = StrReplaceEditor()\n self.tools[\"terminate\"] = Terminate()\n\n def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:\n \"\"\"Register a tool with parameter validation and documentation.\"\"\"\n tool_name = method_name or tool.name\n tool_param = tool.to_param()\n tool_function = tool_param[\"function\"]\n\n # Define the async function to be registered\n async def tool_method(**kwargs):\n logger.info(f\"Executing {tool_name}: {kwargs}\")\n result = await tool.execute(**kwargs)\n\n logger.info(f\"Result of {tool_name}: {result}\")\n\n # Handle different types of results (match original logic)\n if hasattr(result, \"model_dump\"):\n return json.dumps(result.model_dump())\n elif isinstance(result, dict):\n return json.dumps(result)\n return result\n\n # Set method metadata\n tool_method.__name__ = tool_name\n tool_method.__doc__ = self._build_docstring(tool_function)\n tool_method.__signature__ = self._build_signature(tool_function)\n\n # Store parameter schema (important for tools that access it programmatically)\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n tool_method._parameter_schema = {\n param_name: {\n \"description\": param_details.get(\"description\", \"\"),\n \"type\": param_details.get(\"type\", \"any\"),\n \"required\": param_name in required_params,\n }\n for param_name, param_details in param_props.items()\n }\n\n # Register with server\n self.server.tool()(tool_method)\n logger.info(f\"Registered tool: {tool_name}\")\n\n def _build_docstring(self, tool_function: dict) -> str:\n \"\"\"Build a formatted docstring from tool function metadata.\"\"\"\n description = tool_function.get(\"description\", \"\")\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n # Build docstring (match original format)\n docstring = description\n if param_props:\n docstring += \"\\n\\nParameters:\\n\"\n for param_name, param_details in param_props.items():\n required_str = (\n \"(required)\" if param_name in required_params else \"(optional)\"\n )\n param_type = param_details.get(\"type\", \"any\")\n param_desc = param_details.get(\"description\", \"\")\n docstring += (\n f\" {param_name} ({param_type}) {required_str}: {param_desc}\\n\"\n )\n\n return docstring\n\n def _build_signature(self, tool_function: dict) -> Signature:\n \"\"\"Build a function signature from tool function metadata.\"\"\"\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n parameters = []\n\n # Follow original type mapping\n for param_name, param_details in param_props.items():\n param_type = param_details.get(\"type\", \"\")\n default = Parameter.empty if param_name in required_params else None\n\n # Map JSON Schema types to Python types (same as original)\n annotation = Any\n if param_type == \"string\":\n annotation = str\n elif param_type == \"integer\":\n annotation = int\n elif param_type == \"number\":\n annotation = float\n elif param_type == \"boolean\":\n annotation = bool\n elif param_type == \"object\":\n annotation = dict\n elif param_type == \"array\":\n annotation = list\n\n # Create parameter with same structure as original\n param = Parameter(\n name=param_name,\n kind=Parameter.KEYWORD_ONLY,\n default=default,\n annotation=annotation,\n )\n parameters.append(param)\n\n return Signature(parameters=parameters)\n\n async def cleanup(self) -> None:\n \"\"\"Clean up server resources.\"\"\"\n logger.info(\"Cleaning up resources\")\n # Follow original cleanup logic - only clean browser tool\n if \"browser\" in self.tools and hasattr(self.tools[\"browser\"], \"cleanup\"):\n await self.tools[\"browser\"].cleanup()\n\n def register_all_tools(self) -> None:\n \"\"\"Register all tools with the server.\"\"\"\n for tool in self.tools.values():\n self.register_tool(tool)\n\n def run(self, transport: str = \"stdio\") -> None:\n \"\"\"Run the MCP server.\"\"\"\n # Register all tools\n self.register_all_tools()\n\n # Register cleanup function (match original behavior)\n atexit.register(lambda: asyncio.run(self.cleanup()))\n\n # Start server (with same logging as original)\n logger.info(f\"Starting OpenManus server ({transport} mode)\")\n self.server.run(transport=transport)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"OpenManus MCP Server\")\n parser.add_argument(\n \"--transport\",\n choices=[\"stdio\"],\n default=\"stdio\",\n help=\"Communication method: stdio or http (default: stdio)\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/tool/str_replace_editor.py", "\"\"\"File and directory manipulation tool with sandbox support.\"\"\"\n\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, DefaultDict, List, Literal, Optional, get_args\n\nfrom app.config import config\nfrom app.exceptions import ToolError\nfrom app.tool import BaseTool\nfrom app.tool.base import CLIResult, ToolResult\nfrom app.tool.file_operators import (\n FileOperator,\n LocalFileOperator,\n PathLike,\n SandboxFileOperator,\n)\n\n\nCommand = Literal[\n \"view\",\n \"create\",\n \"str_replace\",\n \"insert\",\n \"undo_edit\",\n]\n\n# Constants\nSNIPPET_LINES: int = 4\nMAX_RESPONSE_LEN: int = 16000\nTRUNCATED_MESSAGE: str = (\n \"To save on context only part of this file has been shown to you. \"\n \"You should retry this tool after you have searched inside the file with `grep -n` \"\n \"in order to find the line numbers of what you are looking for.\"\n)\n\n# Tool description\n_STR_REPLACE_EDITOR_DESCRIPTION = \"\"\"Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with ``\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n\"\"\"\n\n\ndef maybe_truncate(\n content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN\n) -> str:\n \"\"\"Truncate content and append a notice if content exceeds the specified length.\"\"\"\n if not truncate_after or len(content) <= truncate_after:\n return content\n return content[:truncate_after] + TRUNCATED_MESSAGE\n\n\nclass StrReplaceEditor(BaseTool):\n \"\"\"A tool for viewing, creating, and editing files with sandbox support.\"\"\"\n\n name: str = \"str_replace_editor\"\n description: str = _STR_REPLACE_EDITOR_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\",\n \"enum\": [\"view\", \"create\", \"str_replace\", \"insert\", \"undo_edit\"],\n \"type\": \"string\",\n },\n \"path\": {\n \"description\": \"Absolute path to file or directory.\",\n \"type\": \"string\",\n },\n \"file_text\": {\n \"description\": \"Required parameter of `create` command, with the content of the file to be created.\",\n \"type\": \"string\",\n },\n \"old_str\": {\n \"description\": \"Required parameter of `str_replace` command containing the string in `path` to replace.\",\n \"type\": \"string\",\n },\n \"new_str\": {\n \"description\": \"Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\",\n \"type\": \"string\",\n },\n \"insert_line\": {\n \"description\": \"Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\",\n \"type\": \"integer\",\n },\n \"view_range\": {\n \"description\": \"Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\",\n \"items\": {\"type\": \"integer\"},\n \"type\": \"array\",\n },\n },\n \"required\": [\"command\", \"path\"],\n }\n _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list)\n _local_operator: LocalFileOperator = LocalFileOperator()\n _sandbox_operator: SandboxFileOperator = SandboxFileOperator()\n\n # def _get_operator(self, use_sandbox: bool) -> FileOperator:\n def _get_operator(self) -> FileOperator:\n \"\"\"Get the appropriate file operator based on execution mode.\"\"\"\n return (\n self._sandbox_operator\n if config.sandbox.use_sandbox\n else self._local_operator\n )\n\n async def execute(\n self,\n *,\n command: Command,\n path: str,\n file_text: str | None = None,\n view_range: list[int] | None = None,\n old_str: str | None = None,\n new_str: str | None = None,\n insert_line: int | None = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Execute a file operation command.\"\"\"\n # Get the appropriate file operator\n operator = self._get_operator()\n\n # Validate path and command combination\n await self.validate_path(command, Path(path), operator)\n\n # Execute the appropriate command\n if command == \"view\":\n result = await self.view(path, view_range, operator)\n elif command == \"create\":\n if file_text is None:\n raise ToolError(\"Parameter `file_text` is required for command: create\")\n await operator.write_file(path, file_text)\n self._file_history[path].append(file_text)\n result = ToolResult(output=f\"File created successfully at: {path}\")\n elif command == \"str_replace\":\n if old_str is None:\n raise ToolError(\n \"Parameter `old_str` is required for command: str_replace\"\n )\n result = await self.str_replace(path, old_str, new_str, operator)\n elif command == \"insert\":\n if insert_line is None:\n raise ToolError(\n \"Parameter `insert_line` is required for command: insert\"\n )\n if new_str is None:\n raise ToolError(\"Parameter `new_str` is required for command: insert\")\n result = await self.insert(path, insert_line, new_str, operator)\n elif command == \"undo_edit\":\n result = await self.undo_edit(path, operator)\n else:\n # This should be caught by type checking, but we include it for safety\n raise ToolError(\n f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {\", \".join(get_args(Command))}'\n )\n\n return str(result)\n\n async def validate_path(\n self, command: str, path: Path, operator: FileOperator\n ) -> None:\n \"\"\"Validate path and command combination based on execution environment.\"\"\"\n # Check if path is absolute\n if not path.is_absolute():\n raise ToolError(f\"The path {path} is not an absolute path\")\n\n # Only check if path exists for non-create commands\n if command != \"create\":\n if not await operator.exists(path):\n raise ToolError(\n f\"The path {path} does not exist. Please provide a valid path.\"\n )\n\n # Check if path is a directory\n is_dir = await operator.is_directory(path)\n if is_dir and command != \"view\":\n raise ToolError(\n f\"The path {path} is a directory and only the `view` command can be used on directories\"\n )\n\n # Check if file exists for create command\n elif command == \"create\":\n exists = await operator.exists(path)\n if exists:\n raise ToolError(\n f\"File already exists at: {path}. Cannot overwrite files using command `create`.\"\n )\n\n async def view(\n self,\n path: PathLike,\n view_range: Optional[List[int]] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Display file or directory content.\"\"\"\n # Determine if path is a directory\n is_dir = await operator.is_directory(path)\n\n if is_dir:\n # Directory handling\n if view_range:\n raise ToolError(\n \"The `view_range` parameter is not allowed when `path` points to a directory.\"\n )\n\n return await self._view_directory(path, operator)\n else:\n # File handling\n return await self._view_file(path, operator, view_range)\n\n @staticmethod\n async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult:\n \"\"\"Display directory contents.\"\"\"\n find_cmd = f\"find {path} -maxdepth 2 -not -path '*/\\\\.*'\"\n\n # Execute command using the operator\n returncode, stdout, stderr = await operator.run_command(find_cmd)\n\n if not stderr:\n stdout = (\n f\"Here's the files and directories up to 2 levels deep in {path}, \"\n f\"excluding hidden items:\\n{stdout}\\n\"\n )\n\n return CLIResult(output=stdout, error=stderr)\n\n async def _view_file(\n self,\n path: PathLike,\n operator: FileOperator,\n view_range: Optional[List[int]] = None,\n ) -> CLIResult:\n \"\"\"Display file content, optionally within a specified line range.\"\"\"\n # Read file content\n file_content = await operator.read_file(path)\n init_line = 1\n\n # Apply view range if specified\n if view_range:\n if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range):\n raise ToolError(\n \"Invalid `view_range`. It should be a list of two integers.\"\n )\n\n file_lines = file_content.split(\"\\n\")\n n_lines_file = len(file_lines)\n init_line, final_line = view_range\n\n # Validate view range\n if init_line < 1 or init_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be \"\n f\"within the range of lines of the file: {[1, n_lines_file]}\"\n )\n if final_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"smaller than the number of lines in the file: `{n_lines_file}`\"\n )\n if final_line != -1 and final_line < init_line:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"larger or equal than its first `{init_line}`\"\n )\n\n # Apply range\n if final_line == -1:\n file_content = \"\\n\".join(file_lines[init_line - 1 :])\n else:\n file_content = \"\\n\".join(file_lines[init_line - 1 : final_line])\n\n # Format and return result\n return CLIResult(\n output=self._make_output(file_content, str(path), init_line=init_line)\n )\n\n async def str_replace(\n self,\n path: PathLike,\n old_str: str,\n new_str: Optional[str] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Replace a unique string in a file with a new string.\"\"\"\n # Read file content and expand tabs\n file_content = (await operator.read_file(path)).expandtabs()\n old_str = old_str.expandtabs()\n new_str = new_str.expandtabs() if new_str is not None else \"\"\n\n # Check if old_str is unique in the file\n occurrences = file_content.count(old_str)\n if occurrences == 0:\n raise ToolError(\n f\"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}.\"\n )\n elif occurrences > 1:\n # Find line numbers of occurrences\n file_content_lines = file_content.split(\"\\n\")\n lines = [\n idx + 1\n for idx, line in enumerate(file_content_lines)\n if old_str in line\n ]\n raise ToolError(\n f\"No replacement was performed. Multiple occurrences of old_str `{old_str}` \"\n f\"in lines {lines}. Please ensure it is unique\"\n )\n\n # Replace old_str with new_str\n new_file_content = file_content.replace(old_str, new_str)\n\n # Write the new content to the file\n await operator.write_file(path, new_file_content)\n\n # Save the original content to history\n self._file_history[path].append(file_content)\n\n # Create a snippet of the edited section\n replacement_line = file_content.split(old_str)[0].count(\"\\n\")\n start_line = max(0, replacement_line - SNIPPET_LINES)\n end_line = replacement_line + SNIPPET_LINES + new_str.count(\"\\n\")\n snippet = \"\\n\".join(new_file_content.split(\"\\n\")[start_line : end_line + 1])\n\n # Prepare the success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet, f\"a snippet of {path}\", start_line + 1\n )\n success_msg += \"Review the changes and make sure they are as expected. Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def insert(\n self,\n path: PathLike,\n insert_line: int,\n new_str: str,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Insert text at a specific line in a file.\"\"\"\n # Read and prepare content\n file_text = (await operator.read_file(path)).expandtabs()\n new_str = new_str.expandtabs()\n file_text_lines = file_text.split(\"\\n\")\n n_lines_file = len(file_text_lines)\n\n # Validate insert_line\n if insert_line < 0 or insert_line > n_lines_file:\n raise ToolError(\n f\"Invalid `insert_line` parameter: {insert_line}. It should be within \"\n f\"the range of lines of the file: {[0, n_lines_file]}\"\n )\n\n # Perform insertion\n new_str_lines = new_str.split(\"\\n\")\n new_file_text_lines = (\n file_text_lines[:insert_line]\n + new_str_lines\n + file_text_lines[insert_line:]\n )\n\n # Create a snippet for preview\n snippet_lines = (\n file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]\n + new_str_lines\n + file_text_lines[insert_line : insert_line + SNIPPET_LINES]\n )\n\n # Join lines and write to file\n new_file_text = \"\\n\".join(new_file_text_lines)\n snippet = \"\\n\".join(snippet_lines)\n\n await operator.write_file(path, new_file_text)\n self._file_history[path].append(file_text)\n\n # Prepare success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet,\n \"a snippet of the edited file\",\n max(1, insert_line - SNIPPET_LINES + 1),\n )\n success_msg += \"Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def undo_edit(\n self, path: PathLike, operator: FileOperator = None\n ) -> CLIResult:\n \"\"\"Revert the last edit made to a file.\"\"\"\n if not self._file_history[path]:\n raise ToolError(f\"No edit history found for {path}.\")\n\n old_text = self._file_history[path].pop()\n await operator.write_file(path, old_text)\n\n return CLIResult(\n output=f\"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}\"\n )\n\n def _make_output(\n self,\n file_content: str,\n file_descriptor: str,\n init_line: int = 1,\n expand_tabs: bool = True,\n ) -> str:\n \"\"\"Format file content for display with line numbers.\"\"\"\n file_content = maybe_truncate(file_content)\n if expand_tabs:\n file_content = file_content.expandtabs()\n\n # Add line numbers to each line\n file_content = \"\\n\".join(\n [\n f\"{i + init_line:6}\\t{line}\"\n for i, line in enumerate(file_content.split(\"\\n\"))\n ]\n )\n\n return (\n f\"Here's the result of running `cat -n` on {file_descriptor}:\\n\"\n + file_content\n + \"\\n\"\n )\n"], ["/OpenManus/app/tool/mcp.py", "from contextlib import AsyncExitStack\nfrom typing import Dict, List, Optional\n\nfrom mcp import ClientSession, StdioServerParameters\nfrom mcp.client.sse import sse_client\nfrom mcp.client.stdio import stdio_client\nfrom mcp.types import ListToolsResult, TextContent\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.tool_collection import ToolCollection\n\n\nclass MCPClientTool(BaseTool):\n \"\"\"Represents a tool proxy that can be called on the MCP server from the client side.\"\"\"\n\n session: Optional[ClientSession] = None\n server_id: str = \"\" # Add server identifier\n original_name: str = \"\"\n\n async def execute(self, **kwargs) -> ToolResult:\n \"\"\"Execute the tool by making a remote call to the MCP server.\"\"\"\n if not self.session:\n return ToolResult(error=\"Not connected to MCP server\")\n\n try:\n logger.info(f\"Executing tool: {self.original_name}\")\n result = await self.session.call_tool(self.original_name, kwargs)\n content_str = \", \".join(\n item.text for item in result.content if isinstance(item, TextContent)\n )\n return ToolResult(output=content_str or \"No output returned.\")\n except Exception as e:\n return ToolResult(error=f\"Error executing tool: {str(e)}\")\n\n\nclass MCPClients(ToolCollection):\n \"\"\"\n A collection of tools that connects to multiple MCP servers and manages available tools through the Model Context Protocol.\n \"\"\"\n\n sessions: Dict[str, ClientSession] = {}\n exit_stacks: Dict[str, AsyncExitStack] = {}\n description: str = \"MCP client tools for server interaction\"\n\n def __init__(self):\n super().__init__() # Initialize with empty tools list\n self.name = \"mcp\" # Keep name for backward compatibility\n\n async def connect_sse(self, server_url: str, server_id: str = \"\") -> None:\n \"\"\"Connect to an MCP server using SSE transport.\"\"\"\n if not server_url:\n raise ValueError(\"Server URL is required.\")\n\n server_id = server_id or server_url\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n streams_context = sse_client(url=server_url)\n streams = await exit_stack.enter_async_context(streams_context)\n session = await exit_stack.enter_async_context(ClientSession(*streams))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def connect_stdio(\n self, command: str, args: List[str], server_id: str = \"\"\n ) -> None:\n \"\"\"Connect to an MCP server using stdio transport.\"\"\"\n if not command:\n raise ValueError(\"Server command is required.\")\n\n server_id = server_id or command\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n server_params = StdioServerParameters(command=command, args=args)\n stdio_transport = await exit_stack.enter_async_context(\n stdio_client(server_params)\n )\n read, write = stdio_transport\n session = await exit_stack.enter_async_context(ClientSession(read, write))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def _initialize_and_list_tools(self, server_id: str) -> None:\n \"\"\"Initialize session and populate tool map.\"\"\"\n session = self.sessions.get(server_id)\n if not session:\n raise RuntimeError(f\"Session not initialized for server {server_id}\")\n\n await session.initialize()\n response = await session.list_tools()\n\n # Create proper tool objects for each server tool\n for tool in response.tools:\n original_name = tool.name\n tool_name = f\"mcp_{server_id}_{original_name}\"\n tool_name = self._sanitize_tool_name(tool_name)\n\n server_tool = MCPClientTool(\n name=tool_name,\n description=tool.description,\n parameters=tool.inputSchema,\n session=session,\n server_id=server_id,\n original_name=original_name,\n )\n self.tool_map[tool_name] = server_tool\n\n # Update tools tuple\n self.tools = tuple(self.tool_map.values())\n logger.info(\n f\"Connected to server {server_id} with tools: {[tool.name for tool in response.tools]}\"\n )\n\n def _sanitize_tool_name(self, name: str) -> str:\n \"\"\"Sanitize tool name to match MCPClientTool requirements.\"\"\"\n import re\n\n # Replace invalid characters with underscores\n sanitized = re.sub(r\"[^a-zA-Z0-9_-]\", \"_\", name)\n\n # Remove consecutive underscores\n sanitized = re.sub(r\"_+\", \"_\", sanitized)\n\n # Remove leading/trailing underscores\n sanitized = sanitized.strip(\"_\")\n\n # Truncate to 64 characters if needed\n if len(sanitized) > 64:\n sanitized = sanitized[:64]\n\n return sanitized\n\n async def list_tools(self) -> ListToolsResult:\n \"\"\"List all available tools.\"\"\"\n tools_result = ListToolsResult(tools=[])\n for session in self.sessions.values():\n response = await session.list_tools()\n tools_result.tools += response.tools\n return tools_result\n\n async def disconnect(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from a specific MCP server or all servers if no server_id provided.\"\"\"\n if server_id:\n if server_id in self.sessions:\n try:\n exit_stack = self.exit_stacks.get(server_id)\n\n # Close the exit stack which will handle session cleanup\n if exit_stack:\n try:\n await exit_stack.aclose()\n except RuntimeError as e:\n if \"cancel scope\" in str(e).lower():\n logger.warning(\n f\"Cancel scope error during disconnect from {server_id}, continuing with cleanup: {e}\"\n )\n else:\n raise\n\n # Clean up references\n self.sessions.pop(server_id, None)\n self.exit_stacks.pop(server_id, None)\n\n # Remove tools associated with this server\n self.tool_map = {\n k: v\n for k, v in self.tool_map.items()\n if v.server_id != server_id\n }\n self.tools = tuple(self.tool_map.values())\n logger.info(f\"Disconnected from MCP server {server_id}\")\n except Exception as e:\n logger.error(f\"Error disconnecting from server {server_id}: {e}\")\n else:\n # Disconnect from all servers in a deterministic order\n for sid in sorted(list(self.sessions.keys())):\n await self.disconnect(sid)\n self.tool_map = {}\n self.tools = tuple()\n logger.info(\"Disconnected from all MCP servers\")\n"], ["/OpenManus/run_mcp.py", "#!/usr/bin/env python\nimport argparse\nimport asyncio\nimport sys\n\nfrom app.agent.mcp import MCPAgent\nfrom app.config import config\nfrom app.logger import logger\n\n\nclass MCPRunner:\n \"\"\"Runner class for MCP Agent with proper path handling and configuration.\"\"\"\n\n def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()\n\n async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n \"\"\"Initialize the MCP agent with the appropriate connection.\"\"\"\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: # sse\n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n\n logger.info(f\"Connected to MCP server via {connection_type}\")\n\n async def run_interactive(self) -> None:\n \"\"\"Run the agent in interactive mode.\"\"\"\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")\n\n async def run_single_prompt(self, prompt: str) -> None:\n \"\"\"Run the agent with a single prompt.\"\"\"\n await self.agent.run(prompt)\n\n async def run_default(self) -> None:\n \"\"\"Run the agent in default mode.\"\"\"\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")\n\n async def cleanup(self) -> None:\n \"\"\"Clean up agent resources.\"\"\"\n await self.agent.cleanup()\n logger.info(\"Session ended\")\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Run the MCP Agent\")\n parser.add_argument(\n \"--connection\",\n \"-c\",\n choices=[\"stdio\", \"sse\"],\n default=\"stdio\",\n help=\"Connection type: stdio or sse\",\n )\n parser.add_argument(\n \"--server-url\",\n default=\"http://127.0.0.1:8000/sse\",\n help=\"URL for SSE connection\",\n )\n parser.add_argument(\n \"--interactive\", \"-i\", action=\"store_true\", help=\"Run in interactive mode\"\n )\n parser.add_argument(\"--prompt\", \"-p\", help=\"Single prompt to execute and exit\")\n return parser.parse_args()\n\n\nasync def run_mcp() -> None:\n \"\"\"Main entry point for the MCP runner.\"\"\"\n args = parse_args()\n runner = MCPRunner()\n\n try:\n await runner.initialize(args.connection, args.server_url)\n\n if args.prompt:\n await runner.run_single_prompt(args.prompt)\n elif args.interactive:\n await runner.run_interactive()\n else:\n await runner.run_default()\n\n except KeyboardInterrupt:\n logger.info(\"Program interrupted by user\")\n except Exception as e:\n logger.error(f\"Error running MCPAgent: {str(e)}\", exc_info=True)\n sys.exit(1)\n finally:\n await runner.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_mcp())\n"], ["/OpenManus/app/sandbox/core/sandbox.py", "import asyncio\nimport io\nimport os\nimport tarfile\nimport tempfile\nimport uuid\nfrom typing import Dict, Optional\n\nimport docker\nfrom docker.errors import NotFound\nfrom docker.models.containers import Container\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.exceptions import SandboxTimeoutError\nfrom app.sandbox.core.terminal import AsyncDockerizedTerminal\n\n\nclass DockerSandbox:\n \"\"\"Docker sandbox environment.\n\n Provides a containerized execution environment with resource limits,\n file operations, and command execution capabilities.\n\n Attributes:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n client: Docker client.\n container: Docker container instance.\n terminal: Container terminal interface.\n \"\"\"\n\n def __init__(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Initializes a sandbox instance.\n\n Args:\n config: Sandbox configuration. Default configuration used if None.\n volume_bindings: Volume mappings in {host_path: container_path} format.\n \"\"\"\n self.config = config or SandboxSettings()\n self.volume_bindings = volume_bindings or {}\n self.client = docker.from_env()\n self.container: Optional[Container] = None\n self.terminal: Optional[AsyncDockerizedTerminal] = None\n\n async def create(self) -> \"DockerSandbox\":\n \"\"\"Creates and starts the sandbox container.\n\n Returns:\n Current sandbox instance.\n\n Raises:\n docker.errors.APIError: If Docker API call fails.\n RuntimeError: If container creation or startup fails.\n \"\"\"\n try:\n # Prepare container config\n host_config = self.client.api.create_host_config(\n mem_limit=self.config.memory_limit,\n cpu_period=100000,\n cpu_quota=int(100000 * self.config.cpu_limit),\n network_mode=\"none\" if not self.config.network_enabled else \"bridge\",\n binds=self._prepare_volume_bindings(),\n )\n\n # Generate unique container name with sandbox_ prefix\n container_name = f\"sandbox_{uuid.uuid4().hex[:8]}\"\n\n # Create container\n container = await asyncio.to_thread(\n self.client.api.create_container,\n image=self.config.image,\n command=\"tail -f /dev/null\",\n hostname=\"sandbox\",\n working_dir=self.config.work_dir,\n host_config=host_config,\n name=container_name,\n tty=True,\n detach=True,\n )\n\n self.container = self.client.containers.get(container[\"Id\"])\n\n # Start container\n await asyncio.to_thread(self.container.start)\n\n # Initialize terminal\n self.terminal = AsyncDockerizedTerminal(\n container[\"Id\"],\n self.config.work_dir,\n env_vars={\"PYTHONUNBUFFERED\": \"1\"}\n # Ensure Python output is not buffered\n )\n await self.terminal.init()\n\n return self\n\n except Exception as e:\n await self.cleanup() # Ensure resources are cleaned up\n raise RuntimeError(f\"Failed to create sandbox: {e}\") from e\n\n def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]:\n \"\"\"Prepares volume binding configuration.\n\n Returns:\n Volume binding configuration dictionary.\n \"\"\"\n bindings = {}\n\n # Create and add working directory mapping\n work_dir = self._ensure_host_dir(self.config.work_dir)\n bindings[work_dir] = {\"bind\": self.config.work_dir, \"mode\": \"rw\"}\n\n # Add custom volume bindings\n for host_path, container_path in self.volume_bindings.items():\n bindings[host_path] = {\"bind\": container_path, \"mode\": \"rw\"}\n\n return bindings\n\n @staticmethod\n def _ensure_host_dir(path: str) -> str:\n \"\"\"Ensures directory exists on the host.\n\n Args:\n path: Directory path.\n\n Returns:\n Actual path on the host.\n \"\"\"\n host_path = os.path.join(\n tempfile.gettempdir(),\n f\"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}\",\n )\n os.makedirs(host_path, exist_ok=True)\n return host_path\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the sandbox.\n\n Args:\n cmd: Command to execute.\n timeout: Timeout in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If sandbox not initialized or command execution fails.\n TimeoutError: If command execution times out.\n \"\"\"\n if not self.terminal:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n return await self.terminal.run_command(\n cmd, timeout=timeout or self.config.timeout\n )\n except TimeoutError:\n raise SandboxTimeoutError(\n f\"Command execution timed out after {timeout or self.config.timeout} seconds\"\n )\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads a file from the container.\n\n Args:\n path: File path.\n\n Returns:\n File contents as string.\n\n Raises:\n FileNotFoundError: If file does not exist.\n RuntimeError: If read operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n # Get file archive\n resolved_path = self._safe_resolve_path(path)\n tar_stream, _ = await asyncio.to_thread(\n self.container.get_archive, resolved_path\n )\n\n # Read file content from tar stream\n content = await self._read_from_tar(tar_stream)\n return content.decode(\"utf-8\")\n\n except NotFound:\n raise FileNotFoundError(f\"File not found: {path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to read file: {e}\")\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to a file in the container.\n\n Args:\n path: Target path.\n content: File content.\n\n Raises:\n RuntimeError: If write operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n resolved_path = self._safe_resolve_path(path)\n parent_dir = os.path.dirname(resolved_path)\n\n # Create parent directory\n if parent_dir:\n await self.run_command(f\"mkdir -p {parent_dir}\")\n\n # Prepare file data\n tar_stream = await self._create_tar_stream(\n os.path.basename(path), content.encode(\"utf-8\")\n )\n\n # Write file\n await asyncio.to_thread(\n self.container.put_archive, parent_dir or \"/\", tar_stream\n )\n\n except Exception as e:\n raise RuntimeError(f\"Failed to write file: {e}\")\n\n def _safe_resolve_path(self, path: str) -> str:\n \"\"\"Safely resolves container path, preventing path traversal.\n\n Args:\n path: Original path.\n\n Returns:\n Resolved absolute path.\n\n Raises:\n ValueError: If path contains potentially unsafe patterns.\n \"\"\"\n # Check for path traversal attempts\n if \"..\" in path.split(\"/\"):\n raise ValueError(\"Path contains potentially unsafe patterns\")\n\n resolved = (\n os.path.join(self.config.work_dir, path)\n if not os.path.isabs(path)\n else path\n )\n return resolved\n\n async def copy_from(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file from the container.\n\n Args:\n src_path: Source file path (container).\n dst_path: Destination path (host).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n # Ensure destination file's parent directory exists\n parent_dir = os.path.dirname(dst_path)\n if parent_dir:\n os.makedirs(parent_dir, exist_ok=True)\n\n # Get file stream\n resolved_src = self._safe_resolve_path(src_path)\n stream, stat = await asyncio.to_thread(\n self.container.get_archive, resolved_src\n )\n\n # Create temporary directory to extract file\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Write stream to temporary file\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with open(tar_path, \"wb\") as f:\n for chunk in stream:\n f.write(chunk)\n\n # Extract file\n with tarfile.open(tar_path) as tar:\n members = tar.getmembers()\n if not members:\n raise FileNotFoundError(f\"Source file is empty: {src_path}\")\n\n # If destination is a directory, we should preserve relative path structure\n if os.path.isdir(dst_path):\n tar.extractall(dst_path)\n else:\n # If destination is a file, we only extract the source file's content\n if len(members) > 1:\n raise RuntimeError(\n f\"Source path is a directory but destination is a file: {src_path}\"\n )\n\n with open(dst_path, \"wb\") as dst:\n src_file = tar.extractfile(members[0])\n if src_file is None:\n raise RuntimeError(\n f\"Failed to extract file: {src_path}\"\n )\n dst.write(src_file.read())\n\n except docker.errors.NotFound:\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n async def copy_to(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file to the container.\n\n Args:\n src_path: Source file path (host).\n dst_path: Destination path (container).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n if not os.path.exists(src_path):\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n\n # Create destination directory in container\n resolved_dst = self._safe_resolve_path(dst_path)\n container_dir = os.path.dirname(resolved_dst)\n if container_dir:\n await self.run_command(f\"mkdir -p {container_dir}\")\n\n # Create tar file to upload\n with tempfile.TemporaryDirectory() as tmp_dir:\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with tarfile.open(tar_path, \"w\") as tar:\n # Handle directory source path\n if os.path.isdir(src_path):\n os.path.basename(src_path.rstrip(\"/\"))\n for root, _, files in os.walk(src_path):\n for file in files:\n file_path = os.path.join(root, file)\n arcname = os.path.join(\n os.path.basename(dst_path),\n os.path.relpath(file_path, src_path),\n )\n tar.add(file_path, arcname=arcname)\n else:\n # Add single file to tar\n tar.add(src_path, arcname=os.path.basename(dst_path))\n\n # Read tar file content\n with open(tar_path, \"rb\") as f:\n data = f.read()\n\n # Upload to container\n await asyncio.to_thread(\n self.container.put_archive,\n os.path.dirname(resolved_dst) or \"/\",\n data,\n )\n\n # Verify file was created successfully\n try:\n await self.run_command(f\"test -e {resolved_dst}\")\n except Exception:\n raise RuntimeError(f\"Failed to verify file creation: {dst_path}\")\n\n except FileNotFoundError:\n raise\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n @staticmethod\n async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO:\n \"\"\"Creates a tar file stream.\n\n Args:\n name: Filename.\n content: File content.\n\n Returns:\n Tar file stream.\n \"\"\"\n tar_stream = io.BytesIO()\n with tarfile.open(fileobj=tar_stream, mode=\"w\") as tar:\n tarinfo = tarfile.TarInfo(name=name)\n tarinfo.size = len(content)\n tar.addfile(tarinfo, io.BytesIO(content))\n tar_stream.seek(0)\n return tar_stream\n\n @staticmethod\n async def _read_from_tar(tar_stream) -> bytes:\n \"\"\"Reads file content from a tar stream.\n\n Args:\n tar_stream: Tar file stream.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If read operation fails.\n \"\"\"\n with tempfile.NamedTemporaryFile() as tmp:\n for chunk in tar_stream:\n tmp.write(chunk)\n tmp.seek(0)\n\n with tarfile.open(fileobj=tmp) as tar:\n member = tar.next()\n if not member:\n raise RuntimeError(\"Empty tar archive\")\n\n file_content = tar.extractfile(member)\n if not file_content:\n raise RuntimeError(\"Failed to extract file content\")\n\n return file_content.read()\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up sandbox resources.\"\"\"\n errors = []\n try:\n if self.terminal:\n try:\n await self.terminal.close()\n except Exception as e:\n errors.append(f\"Terminal cleanup error: {e}\")\n finally:\n self.terminal = None\n\n if self.container:\n try:\n await asyncio.to_thread(self.container.stop, timeout=5)\n except Exception as e:\n errors.append(f\"Container stop error: {e}\")\n\n try:\n await asyncio.to_thread(self.container.remove, force=True)\n except Exception as e:\n errors.append(f\"Container remove error: {e}\")\n finally:\n self.container = None\n\n except Exception as e:\n errors.append(f\"General cleanup error: {e}\")\n\n if errors:\n print(f\"Warning: Errors during cleanup: {', '.join(errors)}\")\n\n async def __aenter__(self) -> \"DockerSandbox\":\n \"\"\"Async context manager entry.\"\"\"\n return await self.create()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n"], ["/OpenManus/app/tool/bash.py", "import asyncio\nimport os\nfrom typing import Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, CLIResult\n\n\n_BASH_DESCRIPTION = \"\"\"Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says \"Command timed out. Sending SIGINT to the process\", the assistant should retry running the command in the background.\n\"\"\"\n\n\nclass _BashSession:\n \"\"\"A session of a bash shell.\"\"\"\n\n _started: bool\n _process: asyncio.subprocess.Process\n\n command: str = \"/bin/bash\"\n _output_delay: float = 0.2 # seconds\n _timeout: float = 120.0 # seconds\n _sentinel: str = \"<>\"\n\n def __init__(self):\n self._started = False\n self._timed_out = False\n\n async def start(self):\n if self._started:\n return\n\n self._process = await asyncio.create_subprocess_shell(\n self.command,\n preexec_fn=os.setsid,\n shell=True,\n bufsize=0,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n self._started = True\n\n def stop(self):\n \"\"\"Terminate the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return\n self._process.terminate()\n\n async def run(self, command: str):\n \"\"\"Execute a command in the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return CLIResult(\n system=\"tool must be restarted\",\n error=f\"bash has exited with returncode {self._process.returncode}\",\n )\n if self._timed_out:\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n )\n\n # we know these are not None because we created the process with PIPEs\n assert self._process.stdin\n assert self._process.stdout\n assert self._process.stderr\n\n # send command to the process\n self._process.stdin.write(\n command.encode() + f\"; echo '{self._sentinel}'\\n\".encode()\n )\n await self._process.stdin.drain()\n\n # read output from the process, until the sentinel is found\n try:\n async with asyncio.timeout(self._timeout):\n while True:\n await asyncio.sleep(self._output_delay)\n # if we read directly from stdout/stderr, it will wait forever for\n # EOF. use the StreamReader buffer directly instead.\n output = (\n self._process.stdout._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if self._sentinel in output:\n # strip the sentinel and break\n output = output[: output.index(self._sentinel)]\n break\n except asyncio.TimeoutError:\n self._timed_out = True\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n ) from None\n\n if output.endswith(\"\\n\"):\n output = output[:-1]\n\n error = (\n self._process.stderr._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if error.endswith(\"\\n\"):\n error = error[:-1]\n\n # clear the buffers so that the next output can be read correctly\n self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n\n return CLIResult(output=output, error=error)\n\n\nclass Bash(BaseTool):\n \"\"\"A tool for executing bash commands\"\"\"\n\n name: str = \"bash\"\n description: str = _BASH_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"description\": \"The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.\",\n },\n },\n \"required\": [\"command\"],\n }\n\n _session: Optional[_BashSession] = None\n\n async def execute(\n self, command: str | None = None, restart: bool = False, **kwargs\n ) -> CLIResult:\n if restart:\n if self._session:\n self._session.stop()\n self._session = _BashSession()\n await self._session.start()\n\n return CLIResult(system=\"tool has been restarted.\")\n\n if self._session is None:\n self._session = _BashSession()\n await self._session.start()\n\n if command is not None:\n return await self._session.run(command)\n\n raise ToolError(\"no command provided.\")\n\n\nif __name__ == \"__main__\":\n bash = Bash()\n rst = asyncio.run(bash.execute(\"ls -l\"))\n print(rst)\n"], ["/OpenManus/app/sandbox/core/terminal.py", "\"\"\"\nAsynchronous Docker Terminal\n\nThis module provides asynchronous terminal functionality for Docker containers,\nallowing interactive command execution with timeout control.\n\"\"\"\n\nimport asyncio\nimport re\nimport socket\nfrom typing import Dict, Optional, Tuple, Union\n\nimport docker\nfrom docker import APIClient\nfrom docker.errors import APIError\nfrom docker.models.containers import Container\n\n\nclass DockerSession:\n def __init__(self, container_id: str) -> None:\n \"\"\"Initializes a Docker session.\n\n Args:\n container_id: ID of the Docker container.\n \"\"\"\n self.api = APIClient()\n self.container_id = container_id\n self.exec_id = None\n self.socket = None\n\n async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None:\n \"\"\"Creates an interactive session with the container.\n\n Args:\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n\n Raises:\n RuntimeError: If socket connection fails.\n \"\"\"\n startup_command = [\n \"bash\",\n \"-c\",\n f\"cd {working_dir} && \"\n \"PROMPT_COMMAND='' \"\n \"PS1='$ ' \"\n \"exec bash --norc --noprofile\",\n ]\n\n exec_data = self.api.exec_create(\n self.container_id,\n startup_command,\n stdin=True,\n tty=True,\n stdout=True,\n stderr=True,\n privileged=True,\n user=\"root\",\n environment={**env_vars, \"TERM\": \"dumb\", \"PS1\": \"$ \", \"PROMPT_COMMAND\": \"\"},\n )\n self.exec_id = exec_data[\"Id\"]\n\n socket_data = self.api.exec_start(\n self.exec_id, socket=True, tty=True, stream=True, demux=True\n )\n\n if hasattr(socket_data, \"_sock\"):\n self.socket = socket_data._sock\n self.socket.setblocking(False)\n else:\n raise RuntimeError(\"Failed to get socket connection\")\n\n await self._read_until_prompt()\n\n async def close(self) -> None:\n \"\"\"Cleans up session resources.\n\n 1. Sends exit command\n 2. Closes socket connection\n 3. Checks and cleans up exec instance\n \"\"\"\n try:\n if self.socket:\n # Send exit command to close bash session\n try:\n self.socket.sendall(b\"exit\\n\")\n # Allow time for command execution\n await asyncio.sleep(0.1)\n except:\n pass # Ignore sending errors, continue cleanup\n\n # Close socket connection\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except:\n pass # Some platforms may not support shutdown\n\n self.socket.close()\n self.socket = None\n\n if self.exec_id:\n try:\n # Check exec instance status\n exec_inspect = self.api.exec_inspect(self.exec_id)\n if exec_inspect.get(\"Running\", False):\n # If still running, wait for it to complete\n await asyncio.sleep(0.5)\n except:\n pass # Ignore inspection errors, continue cleanup\n\n self.exec_id = None\n\n except Exception as e:\n # Log error but don't raise, ensure cleanup continues\n print(f\"Warning: Error during session cleanup: {e}\")\n\n async def _read_until_prompt(self) -> str:\n \"\"\"Reads output until prompt is found.\n\n Returns:\n String containing output up to the prompt.\n\n Raises:\n socket.error: If socket communication fails.\n \"\"\"\n buffer = b\"\"\n while b\"$ \" not in buffer:\n try:\n chunk = self.socket.recv(4096)\n if chunk:\n buffer += chunk\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n return buffer.decode(\"utf-8\")\n\n async def execute(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes a command and returns cleaned output.\n\n Args:\n command: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string with prompt markers removed.\n\n Raises:\n RuntimeError: If session not initialized or execution fails.\n TimeoutError: If command execution exceeds timeout.\n \"\"\"\n if not self.socket:\n raise RuntimeError(\"Session not initialized\")\n\n try:\n # Sanitize command to prevent shell injection\n sanitized_command = self._sanitize_command(command)\n full_command = f\"{sanitized_command}\\necho $?\\n\"\n self.socket.sendall(full_command.encode())\n\n async def read_output() -> str:\n buffer = b\"\"\n result_lines = []\n command_sent = False\n\n while True:\n try:\n chunk = self.socket.recv(4096)\n if not chunk:\n break\n\n buffer += chunk\n lines = buffer.split(b\"\\n\")\n\n buffer = lines[-1]\n lines = lines[:-1]\n\n for line in lines:\n line = line.rstrip(b\"\\r\")\n\n if not command_sent:\n command_sent = True\n continue\n\n if line.strip() == b\"echo $?\" or line.strip().isdigit():\n continue\n\n if line.strip():\n result_lines.append(line)\n\n if buffer.endswith(b\"$ \"):\n break\n\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n\n output = b\"\\n\".join(result_lines).decode(\"utf-8\")\n output = re.sub(r\"\\n\\$ echo \\$\\$?.*$\", \"\", output)\n\n return output\n\n if timeout:\n result = await asyncio.wait_for(read_output(), timeout)\n else:\n result = await read_output()\n\n return result.strip()\n\n except asyncio.TimeoutError:\n raise TimeoutError(f\"Command execution timed out after {timeout} seconds\")\n except Exception as e:\n raise RuntimeError(f\"Failed to execute command: {e}\")\n\n def _sanitize_command(self, command: str) -> str:\n \"\"\"Sanitizes the command string to prevent shell injection.\n\n Args:\n command: Raw command string.\n\n Returns:\n Sanitized command string.\n\n Raises:\n ValueError: If command contains potentially dangerous patterns.\n \"\"\"\n\n # Additional checks for specific risky commands\n risky_commands = [\n \"rm -rf /\",\n \"rm -rf /*\",\n \"mkfs\",\n \"dd if=/dev/zero\",\n \":(){:|:&};:\",\n \"chmod -R 777 /\",\n \"chown -R\",\n ]\n\n for risky in risky_commands:\n if risky in command.lower():\n raise ValueError(\n f\"Command contains potentially dangerous operation: {risky}\"\n )\n\n return command\n\n\nclass AsyncDockerizedTerminal:\n def __init__(\n self,\n container: Union[str, Container],\n working_dir: str = \"/workspace\",\n env_vars: Optional[Dict[str, str]] = None,\n default_timeout: int = 60,\n ) -> None:\n \"\"\"Initializes an asynchronous terminal for Docker containers.\n\n Args:\n container: Docker container ID or Container object.\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n default_timeout: Default command execution timeout in seconds.\n \"\"\"\n self.client = docker.from_env()\n self.container = (\n container\n if isinstance(container, Container)\n else self.client.containers.get(container)\n )\n self.working_dir = working_dir\n self.env_vars = env_vars or {}\n self.default_timeout = default_timeout\n self.session = None\n\n async def init(self) -> None:\n \"\"\"Initializes the terminal environment.\n\n Ensures working directory exists and creates an interactive session.\n\n Raises:\n RuntimeError: If initialization fails.\n \"\"\"\n await self._ensure_workdir()\n\n self.session = DockerSession(self.container.id)\n await self.session.create(self.working_dir, self.env_vars)\n\n async def _ensure_workdir(self) -> None:\n \"\"\"Ensures working directory exists in container.\n\n Raises:\n RuntimeError: If directory creation fails.\n \"\"\"\n try:\n await self._exec_simple(f\"mkdir -p {self.working_dir}\")\n except APIError as e:\n raise RuntimeError(f\"Failed to create working directory: {e}\")\n\n async def _exec_simple(self, cmd: str) -> Tuple[int, str]:\n \"\"\"Executes a simple command using Docker's exec_run.\n\n Args:\n cmd: Command to execute.\n\n Returns:\n Tuple of (exit_code, output).\n \"\"\"\n result = await asyncio.to_thread(\n self.container.exec_run, cmd, environment=self.env_vars\n )\n return result.exit_code, result.output.decode(\"utf-8\")\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the container with timeout.\n\n Args:\n cmd: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If terminal not initialized.\n \"\"\"\n if not self.session:\n raise RuntimeError(\"Terminal not initialized\")\n\n return await self.session.execute(cmd, timeout=timeout or self.default_timeout)\n\n async def close(self) -> None:\n \"\"\"Closes the terminal session.\"\"\"\n if self.session:\n await self.session.close()\n\n async def __aenter__(self) -> \"AsyncDockerizedTerminal\":\n \"\"\"Async context manager entry.\"\"\"\n await self.init()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.close()\n"], ["/OpenManus/app/agent/browser.py", "import json\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import Message, ToolChoice\nfrom app.tool import BrowserUseTool, Terminate, ToolCollection\n\n\n# Avoid circular import if BrowserAgent needs BrowserContextHelper\nif TYPE_CHECKING:\n from app.agent.base import BaseAgent # Or wherever memory is defined\n\n\nclass BrowserContextHelper:\n def __init__(self, agent: \"BaseAgent\"):\n self.agent = agent\n self._current_base64_image: Optional[str] = None\n\n async def get_browser_state(self) -> Optional[dict]:\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if not browser_tool or not hasattr(browser_tool, \"get_current_state\"):\n logger.warning(\"BrowserUseTool not found or doesn't have get_current_state\")\n return None\n try:\n result = await browser_tool.get_current_state()\n if result.error:\n logger.debug(f\"Browser state error: {result.error}\")\n return None\n if hasattr(result, \"base64_image\") and result.base64_image:\n self._current_base64_image = result.base64_image\n else:\n self._current_base64_image = None\n return json.loads(result.output)\n except Exception as e:\n logger.debug(f\"Failed to get browser state: {str(e)}\")\n return None\n\n async def format_next_step_prompt(self) -> str:\n \"\"\"Gets browser state and formats the browser prompt.\"\"\"\n browser_state = await self.get_browser_state()\n url_info, tabs_info, content_above_info, content_below_info = \"\", \"\", \"\", \"\"\n results_info = \"\" # Or get from agent if needed elsewhere\n\n if browser_state and not browser_state.get(\"error\"):\n url_info = f\"\\n URL: {browser_state.get('url', 'N/A')}\\n Title: {browser_state.get('title', 'N/A')}\"\n tabs = browser_state.get(\"tabs\", [])\n if tabs:\n tabs_info = f\"\\n {len(tabs)} tab(s) available\"\n pixels_above = browser_state.get(\"pixels_above\", 0)\n pixels_below = browser_state.get(\"pixels_below\", 0)\n if pixels_above > 0:\n content_above_info = f\" ({pixels_above} pixels)\"\n if pixels_below > 0:\n content_below_info = f\" ({pixels_below} pixels)\"\n\n if self._current_base64_image:\n image_message = Message.user_message(\n content=\"Current browser screenshot:\",\n base64_image=self._current_base64_image,\n )\n self.agent.memory.add_message(image_message)\n self._current_base64_image = None # Consume the image after adding\n\n return NEXT_STEP_PROMPT.format(\n url_placeholder=url_info,\n tabs_placeholder=tabs_info,\n content_above_placeholder=content_above_info,\n content_below_placeholder=content_below_info,\n results_placeholder=results_info,\n )\n\n async def cleanup_browser(self):\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if browser_tool and hasattr(browser_tool, \"cleanup\"):\n await browser_tool.cleanup()\n\n\nclass BrowserAgent(ToolCallAgent):\n \"\"\"\n A browser agent that uses the browser_use library to control a browser.\n\n This agent can navigate web pages, interact with elements, fill forms,\n extract content, and perform other browser-based actions to accomplish tasks.\n \"\"\"\n\n name: str = \"browser\"\n description: str = \"A browser agent that can control a browser to accomplish tasks\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # Configure the available tools\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())\n )\n\n # Use Auto for tool choice to allow both tool usage and free-form responses\n tool_choices: ToolChoice = ToolChoice.AUTO\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"BrowserAgent\":\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools, with browser state info added\"\"\"\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n return await super().think()\n\n async def cleanup(self):\n \"\"\"Clean up browser agent resources by calling parent cleanup.\"\"\"\n await self.browser_context_helper.cleanup_browser()\n"], ["/OpenManus/run_flow.py", "import asyncio\nimport time\n\nfrom app.agent.data_analysis import DataAnalysis\nfrom app.agent.manus import Manus\nfrom app.config import config\nfrom app.flow.flow_factory import FlowFactory, FlowType\nfrom app.logger import logger\n\n\nasync def run_flow():\n agents = {\n \"manus\": Manus(),\n }\n if config.run_flow_config.use_data_analysis_agent:\n agents[\"data_analysis\"] = DataAnalysis()\n try:\n prompt = input(\"Enter your prompt: \")\n\n if prompt.strip().isspace() or not prompt:\n logger.warning(\"Empty prompt provided.\")\n return\n\n flow = FlowFactory.create_flow(\n flow_type=FlowType.PLANNING,\n agents=agents,\n )\n logger.warning(\"Processing your request...\")\n\n try:\n start_time = time.time()\n result = await asyncio.wait_for(\n flow.execute(prompt),\n timeout=3600, # 60 minute timeout for the entire execution\n )\n elapsed_time = time.time() - start_time\n logger.info(f\"Request processed in {elapsed_time:.2f} seconds\")\n logger.info(result)\n except asyncio.TimeoutError:\n logger.error(\"Request processing timed out after 1 hour\")\n logger.info(\n \"Operation terminated due to timeout. Please try a simpler request.\"\n )\n\n except KeyboardInterrupt:\n logger.info(\"Operation cancelled by user.\")\n except Exception as e:\n logger.error(f\"Error: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_flow())\n"], ["/OpenManus/app/tool/python_execute.py", "import multiprocessing\nimport sys\nfrom io import StringIO\nfrom typing import Dict\n\nfrom app.tool.base import BaseTool\n\n\nclass PythonExecute(BaseTool):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n \"description\": \"The Python code to execute.\",\n },\n },\n \"required\": [\"code\"],\n }\n\n def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:\n original_stdout = sys.stdout\n try:\n output_buffer = StringIO()\n sys.stdout = output_buffer\n exec(code, safe_globals, safe_globals)\n result_dict[\"observation\"] = output_buffer.getvalue()\n result_dict[\"success\"] = True\n except Exception as e:\n result_dict[\"observation\"] = str(e)\n result_dict[\"success\"] = False\n finally:\n sys.stdout = original_stdout\n\n async def execute(\n self,\n code: str,\n timeout: int = 5,\n ) -> Dict:\n \"\"\"\n Executes the provided Python code with a timeout.\n\n Args:\n code (str): The Python code to execute.\n timeout (int): Execution timeout in seconds.\n\n Returns:\n Dict: Contains 'output' with execution output or error message and 'success' status.\n \"\"\"\n\n with multiprocessing.Manager() as manager:\n result = manager.dict({\"observation\": \"\", \"success\": False})\n if isinstance(__builtins__, dict):\n safe_globals = {\"__builtins__\": __builtins__}\n else:\n safe_globals = {\"__builtins__\": __builtins__.__dict__.copy()}\n proc = multiprocessing.Process(\n target=self._run_code, args=(code, result, safe_globals)\n )\n proc.start()\n proc.join(timeout)\n\n # timeout process\n if proc.is_alive():\n proc.terminate()\n proc.join(1)\n return {\n \"observation\": f\"Execution timeout after {timeout} seconds\",\n \"success\": False,\n }\n return dict(result)\n"], ["/OpenManus/app/tool/search/bing_search.py", "from typing import List, Optional, Tuple\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom app.logger import logger\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nABSTRACT_MAX_LENGTH = 300\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3\",\n \"Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866\",\n]\n\nHEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": USER_AGENTS[0],\n \"Referer\": \"https://www.bing.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n}\n\nBING_HOST_URL = \"https://www.bing.com\"\nBING_SEARCH_URL = \"https://www.bing.com/search?q=\"\n\n\nclass BingSearchEngine(WebSearchEngine):\n session: Optional[requests.Session] = None\n\n def __init__(self, **data):\n \"\"\"Initialize the BingSearch tool with a requests session.\"\"\"\n super().__init__(**data)\n self.session = requests.Session()\n self.session.headers.update(HEADERS)\n\n def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:\n \"\"\"\n Synchronous Bing search implementation to retrieve search results.\n\n Args:\n query (str): The search query to submit to Bing.\n num_results (int, optional): Maximum number of results to return. Defaults to 10.\n\n Returns:\n List[SearchItem]: A list of search items with title, URL, and description.\n \"\"\"\n if not query:\n return []\n\n list_result = []\n first = 1\n next_url = BING_SEARCH_URL + query\n\n while len(list_result) < num_results:\n data, next_url = self._parse_html(\n next_url, rank_start=len(list_result), first=first\n )\n if data:\n list_result.extend(data)\n if not next_url:\n break\n first += 10\n\n return list_result[:num_results]\n\n def _parse_html(\n self, url: str, rank_start: int = 0, first: int = 1\n ) -> Tuple[List[SearchItem], str]:\n \"\"\"\n Parse Bing search result HTML to extract search results and the next page URL.\n\n Returns:\n tuple: (List of SearchItem objects, next page URL or None)\n \"\"\"\n try:\n res = self.session.get(url=url)\n res.encoding = \"utf-8\"\n root = BeautifulSoup(res.text, \"lxml\")\n\n list_data = []\n ol_results = root.find(\"ol\", id=\"b_results\")\n if not ol_results:\n return [], None\n\n for li in ol_results.find_all(\"li\", class_=\"b_algo\"):\n title = \"\"\n url = \"\"\n abstract = \"\"\n try:\n h2 = li.find(\"h2\")\n if h2:\n title = h2.text.strip()\n url = h2.a[\"href\"].strip()\n\n p = li.find(\"p\")\n if p:\n abstract = p.text.strip()\n\n if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:\n abstract = abstract[:ABSTRACT_MAX_LENGTH]\n\n rank_start += 1\n\n # Create a SearchItem object\n list_data.append(\n SearchItem(\n title=title or f\"Bing Result {rank_start}\",\n url=url,\n description=abstract,\n )\n )\n except Exception:\n continue\n\n next_btn = root.find(\"a\", title=\"Next page\")\n if not next_btn:\n return list_data, None\n\n next_url = BING_HOST_URL + next_btn[\"href\"]\n return list_data, next_url\n except Exception as e:\n logger.warning(f\"Error parsing HTML: {e}\")\n return [], None\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Bing search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n return self._search_sync(query, num_results=num_results)\n"], ["/OpenManus/app/tool/search/base.py", "from typing import List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass SearchItem(BaseModel):\n \"\"\"Represents a single search result item\"\"\"\n\n title: str = Field(description=\"The title of the search result\")\n url: str = Field(description=\"The URL of the search result\")\n description: Optional[str] = Field(\n default=None, description=\"A description or snippet of the search result\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result item.\"\"\"\n return f\"{self.title} - {self.url}\"\n\n\nclass WebSearchEngine(BaseModel):\n \"\"\"Base class for web search engines.\"\"\"\n\n model_config = {\"arbitrary_types_allowed\": True}\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Perform a web search and return a list of search items.\n\n Args:\n query (str): The search query to submit to the search engine.\n num_results (int, optional): The number of search results to return. Default is 10.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n List[SearchItem]: A list of SearchItem objects matching the search query.\n \"\"\"\n raise NotImplementedError\n"], ["/OpenManus/app/tool/tool_collection.py", "\"\"\"Collection classes for managing multiple tools.\"\"\"\nfrom typing import Any, Dict, List\n\nfrom app.exceptions import ToolError\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolFailure, ToolResult\n\n\nclass ToolCollection:\n \"\"\"A collection of defined tools.\"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, *tools: BaseTool):\n self.tools = tools\n self.tool_map = {tool.name: tool for tool in tools}\n\n def __iter__(self):\n return iter(self.tools)\n\n def to_params(self) -> List[Dict[str, Any]]:\n return [tool.to_param() for tool in self.tools]\n\n async def execute(\n self, *, name: str, tool_input: Dict[str, Any] = None\n ) -> ToolResult:\n tool = self.tool_map.get(name)\n if not tool:\n return ToolFailure(error=f\"Tool {name} is invalid\")\n try:\n result = await tool(**tool_input)\n return result\n except ToolError as e:\n return ToolFailure(error=e.message)\n\n async def execute_all(self) -> List[ToolResult]:\n \"\"\"Execute all tools in the collection sequentially.\"\"\"\n results = []\n for tool in self.tools:\n try:\n result = await tool()\n results.append(result)\n except ToolError as e:\n results.append(ToolFailure(error=e.message))\n return results\n\n def get_tool(self, name: str) -> BaseTool:\n return self.tool_map.get(name)\n\n def add_tool(self, tool: BaseTool):\n \"\"\"Add a single tool to the collection.\n\n If a tool with the same name already exists, it will be skipped and a warning will be logged.\n \"\"\"\n if tool.name in self.tool_map:\n logger.warning(f\"Tool {tool.name} already exists in collection, skipping\")\n return self\n\n self.tools += (tool,)\n self.tool_map[tool.name] = tool\n return self\n\n def add_tools(self, *tools: BaseTool):\n \"\"\"Add multiple tools to the collection.\n\n If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged.\n \"\"\"\n for tool in tools:\n self.add_tool(tool)\n return self\n"], ["/OpenManus/app/sandbox/client.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, Optional, Protocol\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxFileOperations(Protocol):\n \"\"\"Protocol for sandbox file operations.\"\"\"\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n \"\"\"\n ...\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n \"\"\"\n ...\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file content from container.\n\n Args:\n path: File path in container.\n\n Returns:\n str: File content.\n \"\"\"\n ...\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to file in container.\n\n Args:\n path: File path in container.\n content: Content to write.\n \"\"\"\n ...\n\n\nclass BaseSandboxClient(ABC):\n \"\"\"Base sandbox client interface.\"\"\"\n\n @abstractmethod\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates sandbox.\"\"\"\n\n @abstractmethod\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes command.\"\"\"\n\n @abstractmethod\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container.\"\"\"\n\n @abstractmethod\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file to container.\"\"\"\n\n @abstractmethod\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file.\"\"\"\n\n @abstractmethod\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file.\"\"\"\n\n @abstractmethod\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n\n\nclass LocalSandboxClient(BaseSandboxClient):\n \"\"\"Local sandbox client implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes local sandbox client.\"\"\"\n self.sandbox: Optional[DockerSandbox] = None\n\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates a sandbox.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mappings.\n\n Raises:\n RuntimeError: If sandbox creation fails.\n \"\"\"\n self.sandbox = DockerSandbox(config, volume_bindings)\n await self.sandbox.create()\n\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs command in sandbox.\n\n Args:\n command: Command to execute.\n timeout: Execution timeout in seconds.\n\n Returns:\n Command output.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.run_command(command, timeout)\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_from(container_path, local_path)\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_to(local_path, container_path)\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file from container.\n\n Args:\n path: File path in container.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.read_file(path)\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file to container.\n\n Args:\n path: File path in container.\n content: File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.write_file(path, content)\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n if self.sandbox:\n await self.sandbox.cleanup()\n self.sandbox = None\n\n\ndef create_sandbox_client() -> LocalSandboxClient:\n \"\"\"Creates a sandbox client.\n\n Returns:\n LocalSandboxClient: Sandbox client instance.\n \"\"\"\n return LocalSandboxClient()\n\n\nSANDBOX_CLIENT = create_sandbox_client()\n"], ["/OpenManus/app/tool/file_operators.py", "\"\"\"File operation interfaces and implementations for local and sandbox environments.\"\"\"\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import Optional, Protocol, Tuple, Union, runtime_checkable\n\nfrom app.config import SandboxSettings\nfrom app.exceptions import ToolError\nfrom app.sandbox.client import SANDBOX_CLIENT\n\n\nPathLike = Union[str, Path]\n\n\n@runtime_checkable\nclass FileOperator(Protocol):\n \"\"\"Interface for file operations in different environments.\"\"\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file.\"\"\"\n ...\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file.\"\"\"\n ...\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n ...\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n ...\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command and return (return_code, stdout, stderr).\"\"\"\n ...\n\n\nclass LocalFileOperator(FileOperator):\n \"\"\"File operations implementation for local filesystem.\"\"\"\n\n encoding: str = \"utf-8\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a local file.\"\"\"\n try:\n return Path(path).read_text(encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to read {path}: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a local file.\"\"\"\n try:\n Path(path).write_text(content, encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path}: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n return Path(path).is_dir()\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n return Path(path).exists()\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command locally.\"\"\"\n process = await asyncio.create_subprocess_shell(\n cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n try:\n stdout, stderr = await asyncio.wait_for(\n process.communicate(), timeout=timeout\n )\n return (\n process.returncode or 0,\n stdout.decode(),\n stderr.decode(),\n )\n except asyncio.TimeoutError as exc:\n try:\n process.kill()\n except ProcessLookupError:\n pass\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds\"\n ) from exc\n\n\nclass SandboxFileOperator(FileOperator):\n \"\"\"File operations implementation for sandbox environment.\"\"\"\n\n def __init__(self):\n self.sandbox_client = SANDBOX_CLIENT\n\n async def _ensure_sandbox_initialized(self):\n \"\"\"Ensure sandbox is initialized.\"\"\"\n if not self.sandbox_client.sandbox:\n await self.sandbox_client.create(config=SandboxSettings())\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n return await self.sandbox_client.read_file(str(path))\n except Exception as e:\n raise ToolError(f\"Failed to read {path} in sandbox: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n await self.sandbox_client.write_file(str(path), content)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path} in sandbox: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -d {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -e {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a command in sandbox environment.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n stdout = await self.sandbox_client.run_command(\n cmd, timeout=int(timeout) if timeout else None\n )\n return (\n 0, # Always return 0 since we don't have explicit return code from sandbox\n stdout,\n \"\", # No stderr capture in the current sandbox implementation\n )\n except TimeoutError as exc:\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds in sandbox\"\n ) from exc\n except Exception as exc:\n return 1, \"\", f\"Error executing command in sandbox: {str(exc)}\"\n"], ["/OpenManus/app/tool/terminate.py", "from app.tool.base import BaseTool\n\n\n_TERMINATE_DESCRIPTION = \"\"\"Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task.\nWhen you have finished all the tasks, call this tool to end the work.\"\"\"\n\n\nclass Terminate(BaseTool):\n name: str = \"terminate\"\n description: str = _TERMINATE_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"status\": {\n \"type\": \"string\",\n \"description\": \"The finish status of the interaction.\",\n \"enum\": [\"success\", \"failure\"],\n }\n },\n \"required\": [\"status\"],\n }\n\n async def execute(self, status: str) -> str:\n \"\"\"Finish the current execution\"\"\"\n return f\"The interaction has been completed with status: {status}\"\n"], ["/OpenManus/app/tool/chart_visualization/python_execute.py", "from app.config import config\nfrom app.tool.python_execute import PythonExecute\n\n\nclass NormalPythonExecute(PythonExecute):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"\"\"Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, data process / data report / others\",\n \"type\": \"string\",\n \"default\": \"process\",\n \"enum\": [\"process\", \"report\", \"others\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code to execute.\n# Note\n1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights.\n2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also\n3. Save any report / processed files / each analysis result in worksapce directory: {directory}\n4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization.\n5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also\"\"\".format(\n directory=config.workspace_root\n ),\n },\n },\n \"required\": [\"code\"],\n }\n\n async def execute(self, code: str, code_type: str | None = None, timeout=5):\n return await super().execute(code, timeout)\n"], ["/OpenManus/main.py", "import argparse\nimport asyncio\n\nfrom app.agent.manus import Manus\nfrom app.logger import logger\n\n\nasync def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Run Manus agent with a prompt\")\n parser.add_argument(\n \"--prompt\", type=str, required=False, help=\"Input prompt for the agent\"\n )\n args = parser.parse_args()\n\n # Create and initialize Manus agent\n agent = await Manus.create()\n try:\n # Use command line prompt if provided, otherwise ask for input\n prompt = args.prompt if args.prompt else input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await agent.run(prompt)\n logger.info(\"Request processing completed.\")\n except KeyboardInterrupt:\n logger.warning(\"Operation interrupted.\")\n finally:\n # Ensure agent resources are cleaned up before exiting\n await agent.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n"], ["/OpenManus/app/prompt/browser.py", "SYSTEM_PROMPT = \"\"\"\\\nYou are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.\n\n# Input Format\nTask\nPrevious steps\nCurrent URL\nOpen Tabs\nInteractive Elements\n[index]text\n- index: Numeric identifier for interaction\n- type: HTML element type (button, input, etc.)\n- text: Element description\nExample:\n[33]\n\n- Only elements with numeric indexes in [] are interactive\n- elements without [] provide only context\n\n# Response Rules\n1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:\n{{\"current_state\": {{\"evaluation_previous_goal\": \"Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not\",\n\"memory\": \"Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz\",\n\"next_goal\": \"What needs to be done with the next immediate action\"}},\n\"action\":[{{\"one_action_name\": {{// action-specific parameter}}}}, // ... more actions in sequence]}}\n\n2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.\nCommon action sequences:\n- Form filling: [{{\"input_text\": {{\"index\": 1, \"text\": \"username\"}}}}, {{\"input_text\": {{\"index\": 2, \"text\": \"password\"}}}}, {{\"click_element\": {{\"index\": 3}}}}]\n- Navigation and extraction: [{{\"go_to_url\": {{\"url\": \"https://example.com\"}}}}, {{\"extract_content\": {{\"goal\": \"extract the names\"}}}}]\n- Actions are executed in the given order\n- If the page changes after an action, the sequence is interrupted and you get the new state.\n- Only provide the action sequence until an action which changes the page state significantly.\n- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page\n- only use multiple actions if it makes sense.\n\n3. ELEMENT INTERACTION:\n- Only use indexes of the interactive elements\n- Elements marked with \"[]Non-interactive text\" are non-interactive\n\n4. NAVIGATION & ERROR HANDLING:\n- If no suitable elements exist, use other functions to complete the task\n- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.\n- Handle popups/cookies by accepting or closing them\n- Use scroll to find elements you are looking for\n- If you want to research something, open a new tab instead of using the current tab\n- If captcha pops up, try to solve it - else try a different approach\n- If the page is not fully loaded, use wait action\n\n5. TASK COMPLETION:\n- Use the done action as the last action as soon as the ultimate task is complete\n- Dont use \"done\" before you are done with everything the user asked you, except you reach the last step of max_steps.\n- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!\n- If you have to do something repeatedly for example the task says for \"each\", or \"for all\", or \"x times\", count always inside \"memory\" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.\n- Don't hallucinate actions\n- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.\n\n6. VISUAL CONTEXT:\n- When an image is provided, use it to understand the page layout\n- Bounding boxes with labels on their top right corner correspond to element indexes\n\n7. Form filling:\n- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.\n\n8. Long tasks:\n- Keep track of the status and subresults in the memory.\n\n9. Extraction:\n- If your task is to find information - call extract_content on the specific pages to get and store the information.\nYour responses must be always JSON with the specified format.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nWhat should I do next to achieve my goal?\n\nWhen you see [Current state starts here], focus on the following:\n- Current URL and page title{url_placeholder}\n- Available tabs{tabs_placeholder}\n- Interactive elements and their indices\n- Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated)\n- Any action results or errors{results_placeholder}\n\nFor browser interactions:\n- To navigate: browser_use with action=\"go_to_url\", url=\"...\"\n- To click: browser_use with action=\"click_element\", index=N\n- To type: browser_use with action=\"input_text\", index=N, text=\"...\"\n- To extract: browser_use with action=\"extract_content\", goal=\"...\"\n- To scroll: browser_use with action=\"scroll_down\" or \"scroll_up\"\n\nConsider both what's visible and what might be beyond the current viewport.\nBe methodical - remember your progress and what you've learned so far.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/tool/chart_visualization/chart_prepare.py", "from app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass VisualizationPrepare(NormalPythonExecute):\n \"\"\"A tool for Chart Generation Preparation\"\"\"\n\n name: str = \"visualization_preparation\"\n description: str = \"Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional).\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, visualization: csv -> chart; insight: choose insight into chart\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code for data_visualization prepare.\n## Visualization Type\n1. Data loading logic\n2. Csv Data and chart description generate\n2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv)\n2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.)\n3. Save information in json file.( format: {\"csvFilePath\": string, \"chartTitle\": string}[])\n## Insight Type\n1. Select the insights from the data_visualization results that you want to add to the chart.\n2. Save information in json file.( format: {\"chartPath\": string, \"insights_id\": number[]}[])\n# Note\n1. You can generate one or multiple csv data with different visualization needs.\n2. Make each chart data esay, clean and different.\n3. Json file saving in utf-8 with path print: print(json_path)\n\"\"\",\n },\n },\n \"required\": [\"code\", \"code_type\"],\n }\n"], ["/OpenManus/app/agent/react.py", "from abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.llm import LLM\nfrom app.schema import AgentState, Memory\n\n\nclass ReActAgent(BaseAgent, ABC):\n name: str\n description: Optional[str] = None\n\n system_prompt: Optional[str] = None\n next_step_prompt: Optional[str] = None\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n memory: Memory = Field(default_factory=Memory)\n state: AgentState = AgentState.IDLE\n\n max_steps: int = 10\n current_step: int = 0\n\n @abstractmethod\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action\"\"\"\n\n @abstractmethod\n async def act(self) -> str:\n \"\"\"Execute decided actions\"\"\"\n\n async def step(self) -> str:\n \"\"\"Execute a single step: think and act.\"\"\"\n should_act = await self.think()\n if not should_act:\n return \"Thinking complete - no action needed\"\n return await self.act()\n"], ["/OpenManus/app/tool/search/duckduckgo_search.py", "from typing import List\n\nfrom duckduckgo_search import DDGS\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass DuckDuckGoSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n DuckDuckGo search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = DDGS().text(query, max_results=num_results)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\", url=item, description=None\n )\n )\n elif isinstance(item, dict):\n # Extract data from the dictionary\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=item.get(\"href\", \"\"),\n description=item.get(\"body\", None),\n )\n )\n else:\n # Try to extract attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=getattr(item, \"href\", \"\"),\n description=getattr(item, \"body\", None),\n )\n )\n except Exception:\n # Fallback\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\",\n url=str(item),\n description=None,\n )\n )\n\n return results\n"], ["/OpenManus/app/tool/ask_human.py", "from app.tool import BaseTool\n\n\nclass AskHuman(BaseTool):\n \"\"\"Add a tool to ask human for help.\"\"\"\n\n name: str = \"ask_human\"\n description: str = \"Use this tool to ask human for help.\"\n parameters: str = {\n \"type\": \"object\",\n \"properties\": {\n \"inquire\": {\n \"type\": \"string\",\n \"description\": \"The question you want to ask human.\",\n }\n },\n \"required\": [\"inquire\"],\n }\n\n async def execute(self, inquire: str) -> str:\n return input(f\"\"\"Bot: {inquire}\\n\\nYou: \"\"\").strip()\n"], ["/OpenManus/app/tool/search/google_search.py", "from typing import List\n\nfrom googlesearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass GoogleSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Google search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results, advanced=True)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n {\"title\": f\"Google Result {i+1}\", \"url\": item, \"description\": \"\"}\n )\n else:\n results.append(\n SearchItem(\n title=item.title, url=item.url, description=item.description\n )\n )\n\n return results\n"], ["/OpenManus/app/tool/search/baidu_search.py", "from typing import List\n\nfrom baidusearch.baidusearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass BaiduSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Baidu search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results)\n\n # Convert raw results to SearchItem format\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(title=f\"Baidu Result {i+1}\", url=item, description=None)\n )\n elif isinstance(item, dict):\n # If it's a dictionary with details\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"Baidu Result {i+1}\"),\n url=item.get(\"url\", \"\"),\n description=item.get(\"abstract\", None),\n )\n )\n else:\n # Try to get attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"Baidu Result {i+1}\"),\n url=getattr(item, \"url\", \"\"),\n description=getattr(item, \"abstract\", None),\n )\n )\n except Exception:\n # Fallback to a basic result\n results.append(\n SearchItem(\n title=f\"Baidu Result {i+1}\", url=str(item), description=None\n )\n )\n\n return results\n"], ["/OpenManus/app/flow/flow_factory.py", "from enum import Enum\nfrom typing import Dict, List, Union\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.flow.planning import PlanningFlow\n\n\nclass FlowType(str, Enum):\n PLANNING = \"planning\"\n\n\nclass FlowFactory:\n \"\"\"Factory for creating different types of flows with support for multiple agents\"\"\"\n\n @staticmethod\n def create_flow(\n flow_type: FlowType,\n agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]],\n **kwargs,\n ) -> BaseFlow:\n flows = {\n FlowType.PLANNING: PlanningFlow,\n }\n\n flow_class = flows.get(flow_type)\n if not flow_class:\n raise ValueError(f\"Unknown flow type: {flow_type}\")\n\n return flow_class(agents, **kwargs)\n"], ["/OpenManus/app/agent/data_analysis.py", "from pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass DataAnalysis(ToolCallAgent):\n \"\"\"\n A data analysis agent that uses planning to solve various data analysis tasks.\n\n This agent extends ToolCallAgent with a comprehensive set of tools and capabilities,\n including Data Analysis, Chart Visualization, Data Report.\n \"\"\"\n\n name: str = \"Data_Analysis\"\n description: str = \"An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 15000\n max_steps: int = 20\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n NormalPythonExecute(),\n VisualizationPrepare(),\n DataVisualization(),\n Terminate(),\n )\n )\n"], ["/OpenManus/app/agent/swe.py", "from typing import List\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.prompt.swe import SYSTEM_PROMPT\nfrom app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection\n\n\nclass SWEAgent(ToolCallAgent):\n \"\"\"An agent that implements the SWEAgent paradigm for executing code and natural conversations.\"\"\"\n\n name: str = \"swe\"\n description: str = \"an autonomous AI programmer that interacts directly with the computer to solve tasks.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = \"\"\n\n available_tools: ToolCollection = ToolCollection(\n Bash(), StrReplaceEditor(), Terminate()\n )\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n max_steps: int = 20\n"], ["/OpenManus/app/logger.py", "import sys\nfrom datetime import datetime\n\nfrom loguru import logger as _logger\n\nfrom app.config import PROJECT_ROOT\n\n\n_print_level = \"INFO\"\n\n\ndef define_log_level(print_level=\"INFO\", logfile_level=\"DEBUG\", name: str = None):\n \"\"\"Adjust the log level to above level\"\"\"\n global _print_level\n _print_level = print_level\n\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%Y%m%d%H%M%S\")\n log_name = (\n f\"{name}_{formatted_date}\" if name else formatted_date\n ) # name a log with prefix name\n\n _logger.remove()\n _logger.add(sys.stderr, level=print_level)\n _logger.add(PROJECT_ROOT / f\"logs/{log_name}.log\", level=logfile_level)\n return _logger\n\n\nlogger = define_log_level()\n\n\nif __name__ == \"__main__\":\n logger.info(\"Starting application\")\n logger.debug(\"Debug message\")\n logger.warning(\"Warning message\")\n logger.error(\"Error message\")\n logger.critical(\"Critical message\")\n\n try:\n raise ValueError(\"Test error\")\n except Exception as e:\n logger.exception(f\"An error occurred: {e}\")\n"], ["/OpenManus/app/exceptions.py", "class ToolError(Exception):\n \"\"\"Raised when a tool encounters an error.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n\nclass OpenManusError(Exception):\n \"\"\"Base exception for all OpenManus errors\"\"\"\n\n\nclass TokenLimitExceeded(OpenManusError):\n \"\"\"Exception raised when the token limit is exceeded\"\"\"\n"], ["/OpenManus/run_mcp_server.py", "# coding: utf-8\n# A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues.\nfrom app.mcp.server import MCPServer, parse_args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/prompt/mcp.py", "\"\"\"Prompts for the MCP Agent.\"\"\"\n\nSYSTEM_PROMPT = \"\"\"You are an AI assistant with access to a Model Context Protocol (MCP) server.\nYou can use the tools provided by the MCP server to complete tasks.\nThe MCP server will dynamically expose tools that you can use - always check the available tools first.\n\nWhen using an MCP tool:\n1. Choose the appropriate tool based on your task requirements\n2. Provide properly formatted arguments as required by the tool\n3. Observe the results and use them to determine next steps\n4. Tools may change during operation - new tools might appear or existing ones might disappear\n\nFollow these guidelines:\n- Call tools with valid parameters as documented in their schemas\n- Handle errors gracefully by understanding what went wrong and trying again with corrected parameters\n- For multimedia responses (like images), you'll receive a description of the content\n- Complete user requests step by step, using the most appropriate tools\n- If multiple tools need to be called in sequence, make one call at a time and wait for results\n\nRemember to clearly explain your reasoning and actions to the user.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on the current state and available tools, what should be done next?\nThink step by step about the problem and identify which MCP tool would be most helpful for the current stage.\nIf you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.\n\"\"\"\n\n# Additional specialized prompts\nTOOL_ERROR_PROMPT = \"\"\"You encountered an error with the tool '{tool_name}'.\nTry to understand what went wrong and correct your approach.\nCommon issues include:\n- Missing or incorrect parameters\n- Invalid parameter formats\n- Using a tool that's no longer available\n- Attempting an operation that's not supported\n\nPlease check the tool specifications and try again with corrected parameters.\n\"\"\"\n\nMULTIMEDIA_RESPONSE_PROMPT = \"\"\"You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.\nThis content has been processed and described for you.\nUse this information to continue the task or provide insights to the user.\n\"\"\"\n"], ["/OpenManus/app/prompt/manus.py", "SYSTEM_PROMPT = (\n \"You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all.\"\n \"The initial directory is: {directory}\"\n)\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/prompt/swe.py", "SYSTEM_PROMPT = \"\"\"SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.\n\nThe special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time.\nIn addition to typical bash commands, you can also use specific commands to help you navigate and edit files.\nTo call a command, you need to invoke it with a function call/tool call.\n\nPlease note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.\nIf you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.\n\nRESPONSE FORMAT:\nYour shell prompt is formatted as follows:\n(Open file: )\n(Current directory: )\nbash-$\n\nFirst, you should _always_ include a general thought about what you're going to do next.\nThen, for every response, you must include exactly _ONE_ tool call/function call.\n\nRemember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference.\nIf you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call.\nNote that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them.\n\"\"\"\n"], ["/OpenManus/app/__init__.py", "# Python version check: 3.11-3.13\nimport sys\n\n\nif sys.version_info < (3, 11) or sys.version_info > (3, 13):\n print(\n \"Warning: Unsupported Python version {ver}, please use 3.11-3.13\".format(\n ver=\".\".join(map(str, sys.version_info))\n )\n )\n"], ["/OpenManus/app/prompt/visualization.py", "SYSTEM_PROMPT = \"\"\"You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests.\n# Note:\n1. The workspace directory is: {directory}; Read / write file in workspace\n2. Generate analysis conclusion report in the end\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on user needs, break down the problem and use different tools step by step to solve it.\n# Note\n1. Each step select the most appropriate tool proactively (ONLY ONE).\n2. After using each tool, clearly explain the execution results and suggest the next steps.\n3. When observation with Error, review and fix it.\"\"\"\n"], ["/OpenManus/app/tool/chart_visualization/__init__.py", "from app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\n__all__ = [\"DataVisualization\", \"VisualizationPrepare\", \"NormalPythonExecute\"]\n"], ["/OpenManus/app/sandbox/__init__.py", "\"\"\"\nDocker Sandbox Module\n\nProvides secure containerized execution environment with resource limits\nand isolation for running untrusted code.\n\"\"\"\nfrom app.sandbox.client import (\n BaseSandboxClient,\n LocalSandboxClient,\n create_sandbox_client,\n)\nfrom app.sandbox.core.exceptions import (\n SandboxError,\n SandboxResourceError,\n SandboxTimeoutError,\n)\nfrom app.sandbox.core.manager import SandboxManager\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\n__all__ = [\n \"DockerSandbox\",\n \"SandboxManager\",\n \"BaseSandboxClient\",\n \"LocalSandboxClient\",\n \"create_sandbox_client\",\n \"SandboxError\",\n \"SandboxTimeoutError\",\n \"SandboxResourceError\",\n]\n"], ["/OpenManus/app/agent/__init__.py", "from app.agent.base import BaseAgent\nfrom app.agent.browser import BrowserAgent\nfrom app.agent.mcp import MCPAgent\nfrom app.agent.react import ReActAgent\nfrom app.agent.swe import SWEAgent\nfrom app.agent.toolcall import ToolCallAgent\n\n\n__all__ = [\n \"BaseAgent\",\n \"BrowserAgent\",\n \"ReActAgent\",\n \"SWEAgent\",\n \"ToolCallAgent\",\n \"MCPAgent\",\n]\n"], ["/OpenManus/app/prompt/toolcall.py", "SYSTEM_PROMPT = \"You are an agent that can execute tool calls\"\n\nNEXT_STEP_PROMPT = (\n \"If you want to stop interaction, use `terminate` tool/function call.\"\n)\n"], ["/OpenManus/app/prompt/planning.py", "PLANNING_SYSTEM_PROMPT = \"\"\"\nYou are an expert Planning Agent tasked with solving problems efficiently through structured plans.\nYour job is:\n1. Analyze requests to understand the task scope\n2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool\n3. Execute steps using available tools as needed\n4. Track progress and adapt plans when necessary\n5. Use `finish` to conclude immediately when the task is complete\n\n\nAvailable tools will vary by task but may include:\n- `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.)\n- `finish`: End the task when complete\nBreak tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps.\nThink about dependencies and verification methods.\nKnow when to conclude - don't continue thinking once objectives are met.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on the current state, what's your next action?\nChoose the most efficient path forward:\n1. Is the plan sufficient, or does it need refinement?\n2. Can you execute the next step immediately?\n3. Is the task complete? If so, use `finish` right away.\n\nBe concise in your reasoning, then select the appropriate tool or action.\n\"\"\"\n"], ["/OpenManus/app/tool/__init__.py", "from app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.create_chat_completion import CreateChatCompletion\nfrom app.tool.planning import PlanningTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\nfrom app.tool.tool_collection import ToolCollection\nfrom app.tool.web_search import WebSearch\nfrom app.tool.crawl4ai import Crawl4aiTool\n\n\n__all__ = [\n \"BaseTool\",\n \"Bash\",\n \"BrowserUseTool\",\n \"Terminate\",\n \"StrReplaceEditor\",\n \"WebSearch\",\n \"ToolCollection\",\n \"CreateChatCompletion\",\n \"PlanningTool\",\n \"Crawl4aiTool\"\n]\n"], ["/OpenManus/app/tool/search/__init__.py", "from app.tool.search.baidu_search import BaiduSearchEngine\nfrom app.tool.search.base import WebSearchEngine\nfrom app.tool.search.bing_search import BingSearchEngine\nfrom app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine\nfrom app.tool.search.google_search import GoogleSearchEngine\n\n\n__all__ = [\n \"WebSearchEngine\",\n \"BaiduSearchEngine\",\n \"DuckDuckGoSearchEngine\",\n \"GoogleSearchEngine\",\n \"BingSearchEngine\",\n]\n"], ["/OpenManus/examples/benchmarks/__init__.py", "\"\"\"\nOpenManus benchmark system for standardized agent evaluation.\n\"\"\"\n"], ["/OpenManus/app/sandbox/core/exceptions.py", "\"\"\"Exception classes for the sandbox system.\n\nThis module defines custom exceptions used throughout the sandbox system to\nhandle various error conditions in a structured way.\n\"\"\"\n\n\nclass SandboxError(Exception):\n \"\"\"Base exception for sandbox-related errors.\"\"\"\n\n\nclass SandboxTimeoutError(SandboxError):\n \"\"\"Exception raised when a sandbox operation times out.\"\"\"\n\n\nclass SandboxResourceError(SandboxError):\n \"\"\"Exception raised for resource-related errors.\"\"\"\n"], ["/OpenManus/app/mcp/__init__.py", ""], ["/OpenManus/app/flow/__init__.py", ""], ["/OpenManus/app/prompt/__init__.py", ""], ["/OpenManus/protocol/a2a/__init__.py", ""], ["/OpenManus/protocol/a2a/app/__init__.py", ""]], "task_instance_info": {"created_time": "2025-08-20 15:01:10", "created_task_model": "DeepSeek-R1", "class_skeleton": "class A2AManus(Manus):\n def get_agent_response(self, config, agent_response):\n pass", "code_description": "This code segment functions to: Execute an agent-based processing workflow by handling task invocation with session identifiers, generating structured response payloads indicating operational status and output content, and enforcing compatibility constraints for input data formats."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass A2AManus(Manus):\n def get_agent_response(self, config, agent_response):\n return ResponseFormat(\n status=\"completed\" if agent_response else \"input_required\",\n message=agent_response.content if agent_response else \"Please provide your input to proceed\"\n )\n[TASK_END]\n```", "inference_time": "2025-08-20 15-01-29"}, "editdistance_info": {"edit_distance": 17.4825, "calculate_time": "2025-08-20 15:01:29", "true_code_clean": "class A2AManus(Manus):\n async def invoke(self, query, sessionId) -> str:\n config = {\"configurable\": {\"thread_id\": sessionId}}\n response = await self.run(query)\n return self.get_agent_response(config, response)\n async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:\n raise NotImplementedError(\"Streaming is not supported by Manus yet.\")\n def get_agent_response(self, config, agent_response):\n return {\n \"is_task_complete\": True,\n \"require_user_input\": False,\n \"content\": agent_response,\n }\n SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = [\"text\", \"text/plain\"]", "predict_code_clean": "class A2AManus(Manus):\n def get_agent_response(self, config, agent_response):\n return ResponseFormat(\n status=\"completed\" if agent_response else \"input_required\",\n message=agent_response.content if agent_response else \"Please provide your input to proceed\"\n )"}} {"repo_name": "OpenManus", "file_name": "/OpenManus/app/agent/browser.py", "inference_info": {"prefix_code": "import json\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import Message, ToolChoice\nfrom app.tool import BrowserUseTool, Terminate, ToolCollection\n\n\n# Avoid circular import if BrowserAgent needs BrowserContextHelper\nif TYPE_CHECKING:\n from app.agent.base import BaseAgent # Or wherever memory is defined\n\n\nclass BrowserContextHelper:\n def __init__(self, agent: \"BaseAgent\"):\n self.agent = agent\n self._current_base64_image: Optional[str] = None\n\n async def get_browser_state(self) -> Optional[dict]:\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if not browser_tool or not hasattr(browser_tool, \"get_current_state\"):\n logger.warning(\"BrowserUseTool not found or doesn't have get_current_state\")\n return None\n try:\n result = await browser_tool.get_current_state()\n if result.error:\n logger.debug(f\"Browser state error: {result.error}\")\n return None\n if hasattr(result, \"base64_image\") and result.base64_image:\n self._current_base64_image = result.base64_image\n else:\n self._current_base64_image = None\n return json.loads(result.output)\n except Exception as e:\n logger.debug(f\"Failed to get browser state: {str(e)}\")\n return None\n\n async def format_next_step_prompt(self) -> str:\n \"\"\"Gets browser state and formats the browser prompt.\"\"\"\n browser_state = await self.get_browser_state()\n url_info, tabs_info, content_above_info, content_below_info = \"\", \"\", \"\", \"\"\n results_info = \"\" # Or get from agent if needed elsewhere\n\n if browser_state and not browser_state.get(\"error\"):\n url_info = f\"\\n URL: {browser_state.get('url', 'N/A')}\\n Title: {browser_state.get('title', 'N/A')}\"\n tabs = browser_state.get(\"tabs\", [])\n if tabs:\n tabs_info = f\"\\n {len(tabs)} tab(s) available\"\n pixels_above = browser_state.get(\"pixels_above\", 0)\n pixels_below = browser_state.get(\"pixels_below\", 0)\n if pixels_above > 0:\n content_above_info = f\" ({pixels_above} pixels)\"\n if pixels_below > 0:\n content_below_info = f\" ({pixels_below} pixels)\"\n\n if self._current_base64_image:\n image_message = Message.user_message(\n content=\"Current browser screenshot:\",\n base64_image=self._current_base64_image,\n )\n self.agent.memory.add_message(image_message)\n self._current_base64_image = None # Consume the image after adding\n\n return NEXT_STEP_PROMPT.format(\n url_placeholder=url_info,\n tabs_placeholder=tabs_info,\n content_above_placeholder=content_above_info,\n content_below_placeholder=content_below_info,\n results_placeholder=results_info,\n )\n\n async def cleanup_browser(self):\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if browser_tool and hasattr(browser_tool, \"cleanup\"):\n await browser_tool.cleanup()\n\n\n", "suffix_code": "\n", "middle_code": "class BrowserAgent(ToolCallAgent):\n name: str = \"browser\"\n description: str = \"A browser agent that can control a browser to accomplish tasks\"\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n max_observe: int = 10000\n max_steps: int = 20\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())\n )\n tool_choices: ToolChoice = ToolChoice.AUTO\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"BrowserAgent\":\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n async def think(self) -> bool:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n return await super().think()\n async def cleanup(self):\n await self.browser_context_helper.cleanup_browser()", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/OpenManus/app/logger.py", "import sys\nfrom datetime import datetime\n\nfrom loguru import logger as _logger\n\nfrom app.config import PROJECT_ROOT\n\n\n_print_level = \"INFO\"\n\n\ndef define_log_level(print_level=\"INFO\", logfile_level=\"DEBUG\", name: str = None):\n \"\"\"Adjust the log level to above level\"\"\"\n global _print_level\n _print_level = print_level\n\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%Y%m%d%H%M%S\")\n log_name = (\n f\"{name}_{formatted_date}\" if name else formatted_date\n ) # name a log with prefix name\n\n _logger.remove()\n _logger.add(sys.stderr, level=print_level)\n _logger.add(PROJECT_ROOT / f\"logs/{log_name}.log\", level=logfile_level)\n return _logger\n\n\nlogger = define_log_level()\n\n\nif __name__ == \"__main__\":\n logger.info(\"Starting application\")\n logger.debug(\"Debug message\")\n logger.warning(\"Warning message\")\n logger.error(\"Error message\")\n logger.critical(\"Critical message\")\n\n try:\n raise ValueError(\"Test error\")\n except Exception as e:\n logger.exception(f\"An error occurred: {e}\")\n"], ["/OpenManus/app/agent/toolcall.py", "import asyncio\nimport json\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.react import ReActAgent\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger\nfrom app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice\nfrom app.tool import CreateChatCompletion, Terminate, ToolCollection\n\n\nTOOL_CALL_REQUIRED = \"Tool calls required but none provided\"\n\n\nclass ToolCallAgent(ReActAgent):\n \"\"\"Base agent class for handling tool/function calls with enhanced abstraction\"\"\"\n\n name: str = \"toolcall\"\n description: str = \"an agent that can execute tool calls.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n available_tools: ToolCollection = ToolCollection(\n CreateChatCompletion(), Terminate()\n )\n tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n tool_calls: List[ToolCall] = Field(default_factory=list)\n _current_base64_image: Optional[str] = None\n\n max_steps: int = 30\n max_observe: Optional[Union[int, bool]] = None\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools\"\"\"\n if self.next_step_prompt:\n user_msg = Message.user_message(self.next_step_prompt)\n self.messages += [user_msg]\n\n try:\n # Get response with tool options\n response = await self.llm.ask_tool(\n messages=self.messages,\n system_msgs=(\n [Message.system_message(self.system_prompt)]\n if self.system_prompt\n else None\n ),\n tools=self.available_tools.to_params(),\n tool_choice=self.tool_choices,\n )\n except ValueError:\n raise\n except Exception as e:\n # Check if this is a RetryError containing TokenLimitExceeded\n if hasattr(e, \"__cause__\") and isinstance(e.__cause__, TokenLimitExceeded):\n token_limit_error = e.__cause__\n logger.error(\n f\"🚨 Token limit error (from RetryError): {token_limit_error}\"\n )\n self.memory.add_message(\n Message.assistant_message(\n f\"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}\"\n )\n )\n self.state = AgentState.FINISHED\n return False\n raise\n\n self.tool_calls = tool_calls = (\n response.tool_calls if response and response.tool_calls else []\n )\n content = response.content if response and response.content else \"\"\n\n # Log response info\n logger.info(f\"✨ {self.name}'s thoughts: {content}\")\n logger.info(\n f\"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use\"\n )\n if tool_calls:\n logger.info(\n f\"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}\"\n )\n logger.info(f\"🔧 Tool arguments: {tool_calls[0].function.arguments}\")\n\n try:\n if response is None:\n raise RuntimeError(\"No response received from the LLM\")\n\n # Handle different tool_choices modes\n if self.tool_choices == ToolChoice.NONE:\n if tool_calls:\n logger.warning(\n f\"🤔 Hmm, {self.name} tried to use tools when they weren't available!\"\n )\n if content:\n self.memory.add_message(Message.assistant_message(content))\n return True\n return False\n\n # Create and add assistant message\n assistant_msg = (\n Message.from_tool_calls(content=content, tool_calls=self.tool_calls)\n if self.tool_calls\n else Message.assistant_message(content)\n )\n self.memory.add_message(assistant_msg)\n\n if self.tool_choices == ToolChoice.REQUIRED and not self.tool_calls:\n return True # Will be handled in act()\n\n # For 'auto' mode, continue with content if no commands but content exists\n if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:\n return bool(content)\n\n return bool(self.tool_calls)\n except Exception as e:\n logger.error(f\"🚨 Oops! The {self.name}'s thinking process hit a snag: {e}\")\n self.memory.add_message(\n Message.assistant_message(\n f\"Error encountered while processing: {str(e)}\"\n )\n )\n return False\n\n async def act(self) -> str:\n \"\"\"Execute tool calls and handle their results\"\"\"\n if not self.tool_calls:\n if self.tool_choices == ToolChoice.REQUIRED:\n raise ValueError(TOOL_CALL_REQUIRED)\n\n # Return last message content if no tool calls\n return self.messages[-1].content or \"No content or commands to execute\"\n\n results = []\n for command in self.tool_calls:\n # Reset base64_image for each tool call\n self._current_base64_image = None\n\n result = await self.execute_tool(command)\n\n if self.max_observe:\n result = result[: self.max_observe]\n\n logger.info(\n f\"🎯 Tool '{command.function.name}' completed its mission! Result: {result}\"\n )\n\n # Add tool response to memory\n tool_msg = Message.tool_message(\n content=result,\n tool_call_id=command.id,\n name=command.function.name,\n base64_image=self._current_base64_image,\n )\n self.memory.add_message(tool_msg)\n results.append(result)\n\n return \"\\n\\n\".join(results)\n\n async def execute_tool(self, command: ToolCall) -> str:\n \"\"\"Execute a single tool call with robust error handling\"\"\"\n if not command or not command.function or not command.function.name:\n return \"Error: Invalid command format\"\n\n name = command.function.name\n if name not in self.available_tools.tool_map:\n return f\"Error: Unknown tool '{name}'\"\n\n try:\n # Parse arguments\n args = json.loads(command.function.arguments or \"{}\")\n\n # Execute the tool\n logger.info(f\"🔧 Activating tool: '{name}'...\")\n result = await self.available_tools.execute(name=name, tool_input=args)\n\n # Handle special tools\n await self._handle_special_tool(name=name, result=result)\n\n # Check if result is a ToolResult with base64_image\n if hasattr(result, \"base64_image\") and result.base64_image:\n # Store the base64_image for later use in tool_message\n self._current_base64_image = result.base64_image\n\n # Format result for display (standard case)\n observation = (\n f\"Observed output of cmd `{name}` executed:\\n{str(result)}\"\n if result\n else f\"Cmd `{name}` completed with no output\"\n )\n\n return observation\n except json.JSONDecodeError:\n error_msg = f\"Error parsing arguments for {name}: Invalid JSON format\"\n logger.error(\n f\"📝 Oops! The arguments for '{name}' don't make sense - invalid JSON, arguments:{command.function.arguments}\"\n )\n return f\"Error: {error_msg}\"\n except Exception as e:\n error_msg = f\"⚠️ Tool '{name}' encountered a problem: {str(e)}\"\n logger.exception(error_msg)\n return f\"Error: {error_msg}\"\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs):\n \"\"\"Handle special tool execution and state changes\"\"\"\n if not self._is_special_tool(name):\n return\n\n if self._should_finish_execution(name=name, result=result, **kwargs):\n # Set agent state to finished\n logger.info(f\"🏁 Special tool '{name}' has completed the task!\")\n self.state = AgentState.FINISHED\n\n @staticmethod\n def _should_finish_execution(**kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n return True\n\n def _is_special_tool(self, name: str) -> bool:\n \"\"\"Check if tool name is in special tools list\"\"\"\n return name.lower() in [n.lower() for n in self.special_tool_names]\n\n async def cleanup(self):\n \"\"\"Clean up resources used by the agent's tools.\"\"\"\n logger.info(f\"🧹 Cleaning up resources for agent '{self.name}'...\")\n for tool_name, tool_instance in self.available_tools.tool_map.items():\n if hasattr(tool_instance, \"cleanup\") and asyncio.iscoroutinefunction(\n tool_instance.cleanup\n ):\n try:\n logger.debug(f\"🧼 Cleaning up tool: {tool_name}\")\n await tool_instance.cleanup()\n except Exception as e:\n logger.error(\n f\"🚨 Error cleaning up tool '{tool_name}': {e}\", exc_info=True\n )\n logger.info(f\"✨ Cleanup complete for agent '{self.name}'.\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n return await super().run(request)\n finally:\n await self.cleanup()\n"], ["/OpenManus/app/tool/browser_use_tool.py", "import asyncio\nimport base64\nimport json\nfrom typing import Generic, Optional, TypeVar\n\nfrom browser_use import Browser as BrowserUseBrowser\nfrom browser_use import BrowserConfig\nfrom browser_use.browser.context import BrowserContext, BrowserContextConfig\nfrom browser_use.dom.service import DomService\nfrom pydantic import Field, field_validator\nfrom pydantic_core.core_schema import ValidationInfo\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.web_search import WebSearch\n\n\n_BROWSER_DESCRIPTION = \"\"\"\\\nA powerful browser automation tool that allows interaction with web pages through various actions.\n* This tool provides commands for controlling a browser session, navigating web pages, and extracting information\n* It maintains state across calls, keeping the browser session alive until explicitly closed\n* Use this when you need to browse websites, fill forms, click buttons, extract content, or perform web searches\n* Each action requires specific parameters as defined in the tool's dependencies\n\nKey capabilities include:\n* Navigation: Go to specific URLs, go back, search the web, or refresh pages\n* Interaction: Click elements, input text, select from dropdowns, send keyboard commands\n* Scrolling: Scroll up/down by pixel amount or scroll to specific text\n* Content extraction: Extract and analyze content from web pages based on specific goals\n* Tab management: Switch between tabs, open new tabs, or close tabs\n\nNote: When using element indices, refer to the numbered elements shown in the current browser state.\n\"\"\"\n\nContext = TypeVar(\"Context\")\n\n\nclass BrowserUseTool(BaseTool, Generic[Context]):\n name: str = \"browser_use\"\n description: str = _BROWSER_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"action\": {\n \"type\": \"string\",\n \"enum\": [\n \"go_to_url\",\n \"click_element\",\n \"input_text\",\n \"scroll_down\",\n \"scroll_up\",\n \"scroll_to_text\",\n \"send_keys\",\n \"get_dropdown_options\",\n \"select_dropdown_option\",\n \"go_back\",\n \"web_search\",\n \"wait\",\n \"extract_content\",\n \"switch_tab\",\n \"open_tab\",\n \"close_tab\",\n ],\n \"description\": \"The browser action to perform\",\n },\n \"url\": {\n \"type\": \"string\",\n \"description\": \"URL for 'go_to_url' or 'open_tab' actions\",\n },\n \"index\": {\n \"type\": \"integer\",\n \"description\": \"Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions\",\n },\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions\",\n },\n \"scroll_amount\": {\n \"type\": \"integer\",\n \"description\": \"Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions\",\n },\n \"tab_id\": {\n \"type\": \"integer\",\n \"description\": \"Tab ID for 'switch_tab' action\",\n },\n \"query\": {\n \"type\": \"string\",\n \"description\": \"Search query for 'web_search' action\",\n },\n \"goal\": {\n \"type\": \"string\",\n \"description\": \"Extraction goal for 'extract_content' action\",\n },\n \"keys\": {\n \"type\": \"string\",\n \"description\": \"Keys to send for 'send_keys' action\",\n },\n \"seconds\": {\n \"type\": \"integer\",\n \"description\": \"Seconds to wait for 'wait' action\",\n },\n },\n \"required\": [\"action\"],\n \"dependencies\": {\n \"go_to_url\": [\"url\"],\n \"click_element\": [\"index\"],\n \"input_text\": [\"index\", \"text\"],\n \"switch_tab\": [\"tab_id\"],\n \"open_tab\": [\"url\"],\n \"scroll_down\": [\"scroll_amount\"],\n \"scroll_up\": [\"scroll_amount\"],\n \"scroll_to_text\": [\"text\"],\n \"send_keys\": [\"keys\"],\n \"get_dropdown_options\": [\"index\"],\n \"select_dropdown_option\": [\"index\", \"text\"],\n \"go_back\": [],\n \"web_search\": [\"query\"],\n \"wait\": [\"seconds\"],\n \"extract_content\": [\"goal\"],\n },\n }\n\n lock: asyncio.Lock = Field(default_factory=asyncio.Lock)\n browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True)\n context: Optional[BrowserContext] = Field(default=None, exclude=True)\n dom_service: Optional[DomService] = Field(default=None, exclude=True)\n web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True)\n\n # Context for generic functionality\n tool_context: Optional[Context] = Field(default=None, exclude=True)\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n\n @field_validator(\"parameters\", mode=\"before\")\n def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict:\n if not v:\n raise ValueError(\"Parameters cannot be empty\")\n return v\n\n async def _ensure_browser_initialized(self) -> BrowserContext:\n \"\"\"Ensure browser and context are initialized.\"\"\"\n if self.browser is None:\n browser_config_kwargs = {\"headless\": False, \"disable_security\": True}\n\n if config.browser_config:\n from browser_use.browser.browser import ProxySettings\n\n # handle proxy settings.\n if config.browser_config.proxy and config.browser_config.proxy.server:\n browser_config_kwargs[\"proxy\"] = ProxySettings(\n server=config.browser_config.proxy.server,\n username=config.browser_config.proxy.username,\n password=config.browser_config.proxy.password,\n )\n\n browser_attrs = [\n \"headless\",\n \"disable_security\",\n \"extra_chromium_args\",\n \"chrome_instance_path\",\n \"wss_url\",\n \"cdp_url\",\n ]\n\n for attr in browser_attrs:\n value = getattr(config.browser_config, attr, None)\n if value is not None:\n if not isinstance(value, list) or value:\n browser_config_kwargs[attr] = value\n\n self.browser = BrowserUseBrowser(BrowserConfig(**browser_config_kwargs))\n\n if self.context is None:\n context_config = BrowserContextConfig()\n\n # if there is context config in the config, use it.\n if (\n config.browser_config\n and hasattr(config.browser_config, \"new_context_config\")\n and config.browser_config.new_context_config\n ):\n context_config = config.browser_config.new_context_config\n\n self.context = await self.browser.new_context(context_config)\n self.dom_service = DomService(await self.context.get_current_page())\n\n return self.context\n\n async def execute(\n self,\n action: str,\n url: Optional[str] = None,\n index: Optional[int] = None,\n text: Optional[str] = None,\n scroll_amount: Optional[int] = None,\n tab_id: Optional[int] = None,\n query: Optional[str] = None,\n goal: Optional[str] = None,\n keys: Optional[str] = None,\n seconds: Optional[int] = None,\n **kwargs,\n ) -> ToolResult:\n \"\"\"\n Execute a specified browser action.\n\n Args:\n action: The browser action to perform\n url: URL for navigation or new tab\n index: Element index for click or input actions\n text: Text for input action or search query\n scroll_amount: Pixels to scroll for scroll action\n tab_id: Tab ID for switch_tab action\n query: Search query for Google search\n goal: Extraction goal for content extraction\n keys: Keys to send for keyboard actions\n seconds: Seconds to wait\n **kwargs: Additional arguments\n\n Returns:\n ToolResult with the action's output or error\n \"\"\"\n async with self.lock:\n try:\n context = await self._ensure_browser_initialized()\n\n # Get max content length from config\n max_content_length = getattr(\n config.browser_config, \"max_content_length\", 2000\n )\n\n # Navigation actions\n if action == \"go_to_url\":\n if not url:\n return ToolResult(\n error=\"URL is required for 'go_to_url' action\"\n )\n page = await context.get_current_page()\n await page.goto(url)\n await page.wait_for_load_state()\n return ToolResult(output=f\"Navigated to {url}\")\n\n elif action == \"go_back\":\n await context.go_back()\n return ToolResult(output=\"Navigated back\")\n\n elif action == \"refresh\":\n await context.refresh_page()\n return ToolResult(output=\"Refreshed current page\")\n\n elif action == \"web_search\":\n if not query:\n return ToolResult(\n error=\"Query is required for 'web_search' action\"\n )\n # Execute the web search and return results directly without browser navigation\n search_response = await self.web_search_tool.execute(\n query=query, fetch_content=True, num_results=1\n )\n # Navigate to the first search result\n first_search_result = search_response.results[0]\n url_to_navigate = first_search_result.url\n\n page = await context.get_current_page()\n await page.goto(url_to_navigate)\n await page.wait_for_load_state()\n\n return search_response\n\n # Element interaction actions\n elif action == \"click_element\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'click_element' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n download_path = await context._click_element_node(element)\n output = f\"Clicked element at index {index}\"\n if download_path:\n output += f\" - Downloaded file to {download_path}\"\n return ToolResult(output=output)\n\n elif action == \"input_text\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'input_text' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n await context._input_text_element_node(element, text)\n return ToolResult(\n output=f\"Input '{text}' into element at index {index}\"\n )\n\n elif action == \"scroll_down\" or action == \"scroll_up\":\n direction = 1 if action == \"scroll_down\" else -1\n amount = (\n scroll_amount\n if scroll_amount is not None\n else context.config.browser_window_size[\"height\"]\n )\n await context.execute_javascript(\n f\"window.scrollBy(0, {direction * amount});\"\n )\n return ToolResult(\n output=f\"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels\"\n )\n\n elif action == \"scroll_to_text\":\n if not text:\n return ToolResult(\n error=\"Text is required for 'scroll_to_text' action\"\n )\n page = await context.get_current_page()\n try:\n locator = page.get_by_text(text, exact=False)\n await locator.scroll_into_view_if_needed()\n return ToolResult(output=f\"Scrolled to text: '{text}'\")\n except Exception as e:\n return ToolResult(error=f\"Failed to scroll to text: {str(e)}\")\n\n elif action == \"send_keys\":\n if not keys:\n return ToolResult(\n error=\"Keys are required for 'send_keys' action\"\n )\n page = await context.get_current_page()\n await page.keyboard.press(keys)\n return ToolResult(output=f\"Sent keys: {keys}\")\n\n elif action == \"get_dropdown_options\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'get_dropdown_options' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n options = await page.evaluate(\n \"\"\"\n (xpath) => {\n const select = document.evaluate(xpath, document, null,\n XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;\n if (!select) return null;\n return Array.from(select.options).map(opt => ({\n text: opt.text,\n value: opt.value,\n index: opt.index\n }));\n }\n \"\"\",\n element.xpath,\n )\n return ToolResult(output=f\"Dropdown options: {options}\")\n\n elif action == \"select_dropdown_option\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'select_dropdown_option' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n await page.select_option(element.xpath, label=text)\n return ToolResult(\n output=f\"Selected option '{text}' from dropdown at index {index}\"\n )\n\n # Content extraction actions\n elif action == \"extract_content\":\n if not goal:\n return ToolResult(\n error=\"Goal is required for 'extract_content' action\"\n )\n\n page = await context.get_current_page()\n import markdownify\n\n content = markdownify.markdownify(await page.content())\n\n prompt = f\"\"\"\\\nYour task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format.\nExtraction goal: {goal}\n\nPage content:\n{content[:max_content_length]}\n\"\"\"\n messages = [{\"role\": \"system\", \"content\": prompt}]\n\n # Define extraction function schema\n extraction_function = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"extract_content\",\n \"description\": \"Extract specific information from a webpage based on a goal\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"extracted_content\": {\n \"type\": \"object\",\n \"description\": \"The content extracted from the page according to the goal\",\n \"properties\": {\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text content extracted from the page\",\n },\n \"metadata\": {\n \"type\": \"object\",\n \"description\": \"Additional metadata about the extracted content\",\n \"properties\": {\n \"source\": {\n \"type\": \"string\",\n \"description\": \"Source of the extracted content\",\n }\n },\n },\n },\n }\n },\n \"required\": [\"extracted_content\"],\n },\n },\n }\n\n # Use LLM to extract content with required function calling\n response = await self.llm.ask_tool(\n messages,\n tools=[extraction_function],\n tool_choice=\"required\",\n )\n\n if response and response.tool_calls:\n args = json.loads(response.tool_calls[0].function.arguments)\n extracted_content = args.get(\"extracted_content\", {})\n return ToolResult(\n output=f\"Extracted from page:\\n{extracted_content}\\n\"\n )\n\n return ToolResult(output=\"No content was extracted from the page.\")\n\n # Tab management actions\n elif action == \"switch_tab\":\n if tab_id is None:\n return ToolResult(\n error=\"Tab ID is required for 'switch_tab' action\"\n )\n await context.switch_to_tab(tab_id)\n page = await context.get_current_page()\n await page.wait_for_load_state()\n return ToolResult(output=f\"Switched to tab {tab_id}\")\n\n elif action == \"open_tab\":\n if not url:\n return ToolResult(error=\"URL is required for 'open_tab' action\")\n await context.create_new_tab(url)\n return ToolResult(output=f\"Opened new tab with {url}\")\n\n elif action == \"close_tab\":\n await context.close_current_tab()\n return ToolResult(output=\"Closed current tab\")\n\n # Utility actions\n elif action == \"wait\":\n seconds_to_wait = seconds if seconds is not None else 3\n await asyncio.sleep(seconds_to_wait)\n return ToolResult(output=f\"Waited for {seconds_to_wait} seconds\")\n\n else:\n return ToolResult(error=f\"Unknown action: {action}\")\n\n except Exception as e:\n return ToolResult(error=f\"Browser action '{action}' failed: {str(e)}\")\n\n async def get_current_state(\n self, context: Optional[BrowserContext] = None\n ) -> ToolResult:\n \"\"\"\n Get the current browser state as a ToolResult.\n If context is not provided, uses self.context.\n \"\"\"\n try:\n # Use provided context or fall back to self.context\n ctx = context or self.context\n if not ctx:\n return ToolResult(error=\"Browser context not initialized\")\n\n state = await ctx.get_state()\n\n # Create a viewport_info dictionary if it doesn't exist\n viewport_height = 0\n if hasattr(state, \"viewport_info\") and state.viewport_info:\n viewport_height = state.viewport_info.height\n elif hasattr(ctx, \"config\") and hasattr(ctx.config, \"browser_window_size\"):\n viewport_height = ctx.config.browser_window_size.get(\"height\", 0)\n\n # Take a screenshot for the state\n page = await ctx.get_current_page()\n\n await page.bring_to_front()\n await page.wait_for_load_state()\n\n screenshot = await page.screenshot(\n full_page=True, animations=\"disabled\", type=\"jpeg\", quality=100\n )\n\n screenshot = base64.b64encode(screenshot).decode(\"utf-8\")\n\n # Build the state info with all required fields\n state_info = {\n \"url\": state.url,\n \"title\": state.title,\n \"tabs\": [tab.model_dump() for tab in state.tabs],\n \"help\": \"[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.\",\n \"interactive_elements\": (\n state.element_tree.clickable_elements_to_string()\n if state.element_tree\n else \"\"\n ),\n \"scroll_info\": {\n \"pixels_above\": getattr(state, \"pixels_above\", 0),\n \"pixels_below\": getattr(state, \"pixels_below\", 0),\n \"total_height\": getattr(state, \"pixels_above\", 0)\n + getattr(state, \"pixels_below\", 0)\n + viewport_height,\n },\n \"viewport_height\": viewport_height,\n }\n\n return ToolResult(\n output=json.dumps(state_info, indent=4, ensure_ascii=False),\n base64_image=screenshot,\n )\n except Exception as e:\n return ToolResult(error=f\"Failed to get browser state: {str(e)}\")\n\n async def cleanup(self):\n \"\"\"Clean up browser resources.\"\"\"\n async with self.lock:\n if self.context is not None:\n await self.context.close()\n self.context = None\n self.dom_service = None\n if self.browser is not None:\n await self.browser.close()\n self.browser = None\n\n def __del__(self):\n \"\"\"Ensure cleanup when object is destroyed.\"\"\"\n if self.browser is not None or self.context is not None:\n try:\n asyncio.run(self.cleanup())\n except RuntimeError:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.cleanup())\n loop.close()\n\n @classmethod\n def create_with_context(cls, context: Context) -> \"BrowserUseTool[Context]\":\n \"\"\"Factory method to create a BrowserUseTool with a specific context.\"\"\"\n tool = cls()\n tool.tool_context = context\n return tool\n"], ["/OpenManus/app/agent/manus.py", "from typing import Dict, List, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.browser import BrowserContextHelper\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.logger import logger\nfrom app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.ask_human import AskHuman\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.mcp import MCPClients, MCPClientTool\nfrom app.tool.python_execute import PythonExecute\nfrom app.tool.str_replace_editor import StrReplaceEditor\n\n\nclass Manus(ToolCallAgent):\n \"\"\"A versatile general-purpose agent with support for both local and MCP tools.\"\"\"\n\n name: str = \"Manus\"\n description: str = \"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # MCP clients for remote tool access\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n PythonExecute(),\n BrowserUseTool(),\n StrReplaceEditor(),\n AskHuman(),\n Terminate(),\n )\n )\n\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n # Track connected MCP servers\n connected_servers: Dict[str, str] = Field(\n default_factory=dict\n ) # server_id -> url/command\n _initialized: bool = False\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"Manus\":\n \"\"\"Initialize basic components synchronously.\"\"\"\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n @classmethod\n async def create(cls, **kwargs) -> \"Manus\":\n \"\"\"Factory method to create and properly initialize a Manus instance.\"\"\"\n instance = cls(**kwargs)\n await instance.initialize_mcp_servers()\n instance._initialized = True\n return instance\n\n async def initialize_mcp_servers(self) -> None:\n \"\"\"Initialize connections to configured MCP servers.\"\"\"\n for server_id, server_config in config.mcp_config.servers.items():\n try:\n if server_config.type == \"sse\":\n if server_config.url:\n await self.connect_mcp_server(server_config.url, server_id)\n logger.info(\n f\"Connected to MCP server {server_id} at {server_config.url}\"\n )\n elif server_config.type == \"stdio\":\n if server_config.command:\n await self.connect_mcp_server(\n server_config.command,\n server_id,\n use_stdio=True,\n stdio_args=server_config.args,\n )\n logger.info(\n f\"Connected to MCP server {server_id} using command {server_config.command}\"\n )\n except Exception as e:\n logger.error(f\"Failed to connect to MCP server {server_id}: {e}\")\n\n async def connect_mcp_server(\n self,\n server_url: str,\n server_id: str = \"\",\n use_stdio: bool = False,\n stdio_args: List[str] = None,\n ) -> None:\n \"\"\"Connect to an MCP server and add its tools.\"\"\"\n if use_stdio:\n await self.mcp_clients.connect_stdio(\n server_url, stdio_args or [], server_id\n )\n self.connected_servers[server_id or server_url] = server_url\n else:\n await self.mcp_clients.connect_sse(server_url, server_id)\n self.connected_servers[server_id or server_url] = server_url\n\n # Update available tools with only the new tools from this server\n new_tools = [\n tool for tool in self.mcp_clients.tools if tool.server_id == server_id\n ]\n self.available_tools.add_tools(*new_tools)\n\n async def disconnect_mcp_server(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from an MCP server and remove its tools.\"\"\"\n await self.mcp_clients.disconnect(server_id)\n if server_id:\n self.connected_servers.pop(server_id, None)\n else:\n self.connected_servers.clear()\n\n # Rebuild available tools without the disconnected server's tools\n base_tools = [\n tool\n for tool in self.available_tools.tools\n if not isinstance(tool, MCPClientTool)\n ]\n self.available_tools = ToolCollection(*base_tools)\n self.available_tools.add_tools(*self.mcp_clients.tools)\n\n async def cleanup(self):\n \"\"\"Clean up Manus agent resources.\"\"\"\n if self.browser_context_helper:\n await self.browser_context_helper.cleanup_browser()\n # Disconnect from all MCP servers only if we were initialized\n if self._initialized:\n await self.disconnect_mcp_server()\n self._initialized = False\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions with appropriate context.\"\"\"\n if not self._initialized:\n await self.initialize_mcp_servers()\n self._initialized = True\n\n original_prompt = self.next_step_prompt\n recent_messages = self.memory.messages[-3:] if self.memory.messages else []\n browser_in_use = any(\n tc.function.name == BrowserUseTool().name\n for msg in recent_messages\n if msg.tool_calls\n for tc in msg.tool_calls\n )\n\n if browser_in_use:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n\n result = await super().think()\n\n # Restore original prompt\n self.next_step_prompt = original_prompt\n\n return result\n"], ["/OpenManus/app/agent/mcp.py", "from typing import Any, Dict, List, Optional, Tuple\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.mcp import MULTIMEDIA_RESPONSE_PROMPT, NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import AgentState, Message\nfrom app.tool.base import ToolResult\nfrom app.tool.mcp import MCPClients\n\n\nclass MCPAgent(ToolCallAgent):\n \"\"\"Agent for interacting with MCP (Model Context Protocol) servers.\n\n This agent connects to an MCP server using either SSE or stdio transport\n and makes the server's tools available through the agent's tool interface.\n \"\"\"\n\n name: str = \"mcp_agent\"\n description: str = \"An agent that connects to an MCP server and uses its tools.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n # Initialize MCP tool collection\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n available_tools: MCPClients = None # Will be set in initialize()\n\n max_steps: int = 20\n connection_type: str = \"stdio\" # \"stdio\" or \"sse\"\n\n # Track tool schemas to detect changes\n tool_schemas: Dict[str, Dict[str, Any]] = Field(default_factory=dict)\n _refresh_tools_interval: int = 5 # Refresh tools every N steps\n\n # Special tool names that should trigger termination\n special_tool_names: List[str] = Field(default_factory=lambda: [\"terminate\"])\n\n async def initialize(\n self,\n connection_type: Optional[str] = None,\n server_url: Optional[str] = None,\n command: Optional[str] = None,\n args: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Initialize the MCP connection.\n\n Args:\n connection_type: Type of connection to use (\"stdio\" or \"sse\")\n server_url: URL of the MCP server (for SSE connection)\n command: Command to run (for stdio connection)\n args: Arguments for the command (for stdio connection)\n \"\"\"\n if connection_type:\n self.connection_type = connection_type\n\n # Connect to the MCP server based on connection type\n if self.connection_type == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL is required for SSE connection\")\n await self.mcp_clients.connect_sse(server_url=server_url)\n elif self.connection_type == \"stdio\":\n if not command:\n raise ValueError(\"Command is required for stdio connection\")\n await self.mcp_clients.connect_stdio(command=command, args=args or [])\n else:\n raise ValueError(f\"Unsupported connection type: {self.connection_type}\")\n\n # Set available_tools to our MCP instance\n self.available_tools = self.mcp_clients\n\n # Store initial tool schemas\n await self._refresh_tools()\n\n # Add system message about available tools\n tool_names = list(self.mcp_clients.tool_map.keys())\n tools_info = \", \".join(tool_names)\n\n # Add system prompt and available tools information\n self.memory.add_message(\n Message.system_message(\n f\"{self.system_prompt}\\n\\nAvailable MCP tools: {tools_info}\"\n )\n )\n\n async def _refresh_tools(self) -> Tuple[List[str], List[str]]:\n \"\"\"Refresh the list of available tools from the MCP server.\n\n Returns:\n A tuple of (added_tools, removed_tools)\n \"\"\"\n if not self.mcp_clients.sessions:\n return [], []\n\n # Get current tool schemas directly from the server\n response = await self.mcp_clients.list_tools()\n current_tools = {tool.name: tool.inputSchema for tool in response.tools}\n\n # Determine added, removed, and changed tools\n current_names = set(current_tools.keys())\n previous_names = set(self.tool_schemas.keys())\n\n added_tools = list(current_names - previous_names)\n removed_tools = list(previous_names - current_names)\n\n # Check for schema changes in existing tools\n changed_tools = []\n for name in current_names.intersection(previous_names):\n if current_tools[name] != self.tool_schemas.get(name):\n changed_tools.append(name)\n\n # Update stored schemas\n self.tool_schemas = current_tools\n\n # Log and notify about changes\n if added_tools:\n logger.info(f\"Added MCP tools: {added_tools}\")\n self.memory.add_message(\n Message.system_message(f\"New tools available: {', '.join(added_tools)}\")\n )\n if removed_tools:\n logger.info(f\"Removed MCP tools: {removed_tools}\")\n self.memory.add_message(\n Message.system_message(\n f\"Tools no longer available: {', '.join(removed_tools)}\"\n )\n )\n if changed_tools:\n logger.info(f\"Changed MCP tools: {changed_tools}\")\n\n return added_tools, removed_tools\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action.\"\"\"\n # Check MCP session and tools availability\n if not self.mcp_clients.sessions or not self.mcp_clients.tool_map:\n logger.info(\"MCP service is no longer available, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Refresh tools periodically\n if self.current_step % self._refresh_tools_interval == 0:\n await self._refresh_tools()\n # All tools removed indicates shutdown\n if not self.mcp_clients.tool_map:\n logger.info(\"MCP service has shut down, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Use the parent class's think method\n return await super().think()\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs) -> None:\n \"\"\"Handle special tool execution and state changes\"\"\"\n # First process with parent handler\n await super()._handle_special_tool(name, result, **kwargs)\n\n # Handle multimedia responses\n if isinstance(result, ToolResult) and result.base64_image:\n self.memory.add_message(\n Message.system_message(\n MULTIMEDIA_RESPONSE_PROMPT.format(tool_name=name)\n )\n )\n\n def _should_finish_execution(self, name: str, **kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n # Terminate if the tool name is 'terminate'\n return name.lower() == \"terminate\"\n\n async def cleanup(self) -> None:\n \"\"\"Clean up MCP connection when done.\"\"\"\n if self.mcp_clients.sessions:\n await self.mcp_clients.disconnect()\n logger.info(\"MCP connection closed\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n result = await super().run(request)\n return result\n finally:\n # Ensure cleanup happens even if there's an error\n await self.cleanup()\n"], ["/OpenManus/app/agent/base.py", "from abc import ABC, abstractmethod\nfrom contextlib import asynccontextmanager\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel, Field, model_validator\n\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.sandbox.client import SANDBOX_CLIENT\nfrom app.schema import ROLE_TYPE, AgentState, Memory, Message\n\n\nclass BaseAgent(BaseModel, ABC):\n \"\"\"Abstract base class for managing agent state and execution.\n\n Provides foundational functionality for state transitions, memory management,\n and a step-based execution loop. Subclasses must implement the `step` method.\n \"\"\"\n\n # Core attributes\n name: str = Field(..., description=\"Unique name of the agent\")\n description: Optional[str] = Field(None, description=\"Optional agent description\")\n\n # Prompts\n system_prompt: Optional[str] = Field(\n None, description=\"System-level instruction prompt\"\n )\n next_step_prompt: Optional[str] = Field(\n None, description=\"Prompt for determining next action\"\n )\n\n # Dependencies\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n memory: Memory = Field(default_factory=Memory, description=\"Agent's memory store\")\n state: AgentState = Field(\n default=AgentState.IDLE, description=\"Current agent state\"\n )\n\n # Execution control\n max_steps: int = Field(default=10, description=\"Maximum steps before termination\")\n current_step: int = Field(default=0, description=\"Current step in execution\")\n\n duplicate_threshold: int = 2\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\" # Allow extra fields for flexibility in subclasses\n\n @model_validator(mode=\"after\")\n def initialize_agent(self) -> \"BaseAgent\":\n \"\"\"Initialize agent with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n if not isinstance(self.memory, Memory):\n self.memory = Memory()\n return self\n\n @asynccontextmanager\n async def state_context(self, new_state: AgentState):\n \"\"\"Context manager for safe agent state transitions.\n\n Args:\n new_state: The state to transition to during the context.\n\n Yields:\n None: Allows execution within the new state.\n\n Raises:\n ValueError: If the new_state is invalid.\n \"\"\"\n if not isinstance(new_state, AgentState):\n raise ValueError(f\"Invalid state: {new_state}\")\n\n previous_state = self.state\n self.state = new_state\n try:\n yield\n except Exception as e:\n self.state = AgentState.ERROR # Transition to ERROR on failure\n raise e\n finally:\n self.state = previous_state # Revert to previous state\n\n def update_memory(\n self,\n role: ROLE_TYPE, # type: ignore\n content: str,\n base64_image: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Add a message to the agent's memory.\n\n Args:\n role: The role of the message sender (user, system, assistant, tool).\n content: The message content.\n base64_image: Optional base64 encoded image.\n **kwargs: Additional arguments (e.g., tool_call_id for tool messages).\n\n Raises:\n ValueError: If the role is unsupported.\n \"\"\"\n message_map = {\n \"user\": Message.user_message,\n \"system\": Message.system_message,\n \"assistant\": Message.assistant_message,\n \"tool\": lambda content, **kw: Message.tool_message(content, **kw),\n }\n\n if role not in message_map:\n raise ValueError(f\"Unsupported message role: {role}\")\n\n # Create message with appropriate parameters based on role\n kwargs = {\"base64_image\": base64_image, **(kwargs if role == \"tool\" else {})}\n self.memory.add_message(message_map[role](content, **kwargs))\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Execute the agent's main loop asynchronously.\n\n Args:\n request: Optional initial user request to process.\n\n Returns:\n A string summarizing the execution results.\n\n Raises:\n RuntimeError: If the agent is not in IDLE state at start.\n \"\"\"\n if self.state != AgentState.IDLE:\n raise RuntimeError(f\"Cannot run agent from state: {self.state}\")\n\n if request:\n self.update_memory(\"user\", request)\n\n results: List[str] = []\n async with self.state_context(AgentState.RUNNING):\n while (\n self.current_step < self.max_steps and self.state != AgentState.FINISHED\n ):\n self.current_step += 1\n logger.info(f\"Executing step {self.current_step}/{self.max_steps}\")\n step_result = await self.step()\n\n # Check for stuck state\n if self.is_stuck():\n self.handle_stuck_state()\n\n results.append(f\"Step {self.current_step}: {step_result}\")\n\n if self.current_step >= self.max_steps:\n self.current_step = 0\n self.state = AgentState.IDLE\n results.append(f\"Terminated: Reached max steps ({self.max_steps})\")\n await SANDBOX_CLIENT.cleanup()\n return \"\\n\".join(results) if results else \"No steps executed\"\n\n @abstractmethod\n async def step(self) -> str:\n \"\"\"Execute a single step in the agent's workflow.\n\n Must be implemented by subclasses to define specific behavior.\n \"\"\"\n\n def handle_stuck_state(self):\n \"\"\"Handle stuck state by adding a prompt to change strategy\"\"\"\n stuck_prompt = \"\\\n Observed duplicate responses. Consider new strategies and avoid repeating ineffective paths already attempted.\"\n self.next_step_prompt = f\"{stuck_prompt}\\n{self.next_step_prompt}\"\n logger.warning(f\"Agent detected stuck state. Added prompt: {stuck_prompt}\")\n\n def is_stuck(self) -> bool:\n \"\"\"Check if the agent is stuck in a loop by detecting duplicate content\"\"\"\n if len(self.memory.messages) < 2:\n return False\n\n last_message = self.memory.messages[-1]\n if not last_message.content:\n return False\n\n # Count identical content occurrences\n duplicate_count = sum(\n 1\n for msg in reversed(self.memory.messages[:-1])\n if msg.role == \"assistant\" and msg.content == last_message.content\n )\n\n return duplicate_count >= self.duplicate_threshold\n\n @property\n def messages(self) -> List[Message]:\n \"\"\"Retrieve a list of messages from the agent's memory.\"\"\"\n return self.memory.messages\n\n @messages.setter\n def messages(self, value: List[Message]):\n \"\"\"Set the list of messages in the agent's memory.\"\"\"\n self.memory.messages = value\n"], ["/OpenManus/app/flow/planning.py", "import json\nimport time\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.schema import AgentState, Message, ToolChoice\nfrom app.tool import PlanningTool\n\n\nclass PlanStepStatus(str, Enum):\n \"\"\"Enum class defining possible statuses of a plan step\"\"\"\n\n NOT_STARTED = \"not_started\"\n IN_PROGRESS = \"in_progress\"\n COMPLETED = \"completed\"\n BLOCKED = \"blocked\"\n\n @classmethod\n def get_all_statuses(cls) -> list[str]:\n \"\"\"Return a list of all possible step status values\"\"\"\n return [status.value for status in cls]\n\n @classmethod\n def get_active_statuses(cls) -> list[str]:\n \"\"\"Return a list of values representing active statuses (not started or in progress)\"\"\"\n return [cls.NOT_STARTED.value, cls.IN_PROGRESS.value]\n\n @classmethod\n def get_status_marks(cls) -> Dict[str, str]:\n \"\"\"Return a mapping of statuses to their marker symbols\"\"\"\n return {\n cls.COMPLETED.value: \"[✓]\",\n cls.IN_PROGRESS.value: \"[→]\",\n cls.BLOCKED.value: \"[!]\",\n cls.NOT_STARTED.value: \"[ ]\",\n }\n\n\nclass PlanningFlow(BaseFlow):\n \"\"\"A flow that manages planning and execution of tasks using agents.\"\"\"\n\n llm: LLM = Field(default_factory=lambda: LLM())\n planning_tool: PlanningTool = Field(default_factory=PlanningTool)\n executor_keys: List[str] = Field(default_factory=list)\n active_plan_id: str = Field(default_factory=lambda: f\"plan_{int(time.time())}\")\n current_step_index: Optional[int] = None\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Set executor keys before super().__init__\n if \"executors\" in data:\n data[\"executor_keys\"] = data.pop(\"executors\")\n\n # Set plan ID if provided\n if \"plan_id\" in data:\n data[\"active_plan_id\"] = data.pop(\"plan_id\")\n\n # Initialize the planning tool if not provided\n if \"planning_tool\" not in data:\n planning_tool = PlanningTool()\n data[\"planning_tool\"] = planning_tool\n\n # Call parent's init with the processed data\n super().__init__(agents, **data)\n\n # Set executor_keys to all agent keys if not specified\n if not self.executor_keys:\n self.executor_keys = list(self.agents.keys())\n\n def get_executor(self, step_type: Optional[str] = None) -> BaseAgent:\n \"\"\"\n Get an appropriate executor agent for the current step.\n Can be extended to select agents based on step type/requirements.\n \"\"\"\n # If step type is provided and matches an agent key, use that agent\n if step_type and step_type in self.agents:\n return self.agents[step_type]\n\n # Otherwise use the first available executor or fall back to primary agent\n for key in self.executor_keys:\n if key in self.agents:\n return self.agents[key]\n\n # Fallback to primary agent\n return self.primary_agent\n\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the planning flow with agents.\"\"\"\n try:\n if not self.primary_agent:\n raise ValueError(\"No primary agent available\")\n\n # Create initial plan if input provided\n if input_text:\n await self._create_initial_plan(input_text)\n\n # Verify plan was created successfully\n if self.active_plan_id not in self.planning_tool.plans:\n logger.error(\n f\"Plan creation failed. Plan ID {self.active_plan_id} not found in planning tool.\"\n )\n return f\"Failed to create plan for: {input_text}\"\n\n result = \"\"\n while True:\n # Get current step to execute\n self.current_step_index, step_info = await self._get_current_step_info()\n\n # Exit if no more steps or plan completed\n if self.current_step_index is None:\n result += await self._finalize_plan()\n break\n\n # Execute current step with appropriate agent\n step_type = step_info.get(\"type\") if step_info else None\n executor = self.get_executor(step_type)\n step_result = await self._execute_step(executor, step_info)\n result += step_result + \"\\n\"\n\n # Check if agent wants to terminate\n if hasattr(executor, \"state\") and executor.state == AgentState.FINISHED:\n break\n\n return result\n except Exception as e:\n logger.error(f\"Error in PlanningFlow: {str(e)}\")\n return f\"Execution failed: {str(e)}\"\n\n async def _create_initial_plan(self, request: str) -> None:\n \"\"\"Create an initial plan based on the request using the flow's LLM and PlanningTool.\"\"\"\n logger.info(f\"Creating initial plan with ID: {self.active_plan_id}\")\n\n system_message_content = (\n \"You are a planning assistant. Create a concise, actionable plan with clear steps. \"\n \"Focus on key milestones rather than detailed sub-steps. \"\n \"Optimize for clarity and efficiency.\"\n )\n agents_description = []\n for key in self.executor_keys:\n if key in self.agents:\n agents_description.append(\n {\n \"name\": key.upper(),\n \"description\": self.agents[key].description,\n }\n )\n if len(agents_description) > 1:\n # Add description of agents to select\n system_message_content += (\n f\"\\nNow we have {agents_description} agents. \"\n f\"The infomation of them are below: {json.dumps(agents_description)}\\n\"\n \"When creating steps in the planning tool, please specify the agent names using the format '[agent_name]'.\"\n )\n\n # Create a system message for plan creation\n system_message = Message.system_message(system_message_content)\n\n # Create a user message with the request\n user_message = Message.user_message(\n f\"Create a reasonable plan with clear steps to accomplish the task: {request}\"\n )\n\n # Call LLM with PlanningTool\n response = await self.llm.ask_tool(\n messages=[user_message],\n system_msgs=[system_message],\n tools=[self.planning_tool.to_param()],\n tool_choice=ToolChoice.AUTO,\n )\n\n # Process tool calls if present\n if response.tool_calls:\n for tool_call in response.tool_calls:\n if tool_call.function.name == \"planning\":\n # Parse the arguments\n args = tool_call.function.arguments\n if isinstance(args, str):\n try:\n args = json.loads(args)\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse tool arguments: {args}\")\n continue\n\n # Ensure plan_id is set correctly and execute the tool\n args[\"plan_id\"] = self.active_plan_id\n\n # Execute the tool via ToolCollection instead of directly\n result = await self.planning_tool.execute(**args)\n\n logger.info(f\"Plan creation result: {str(result)}\")\n return\n\n # If execution reached here, create a default plan\n logger.warning(\"Creating default plan\")\n\n # Create default plan using the ToolCollection\n await self.planning_tool.execute(\n **{\n \"command\": \"create\",\n \"plan_id\": self.active_plan_id,\n \"title\": f\"Plan for: {request[:50]}{'...' if len(request) > 50 else ''}\",\n \"steps\": [\"Analyze request\", \"Execute task\", \"Verify results\"],\n }\n )\n\n async def _get_current_step_info(self) -> tuple[Optional[int], Optional[dict]]:\n \"\"\"\n Parse the current plan to identify the first non-completed step's index and info.\n Returns (None, None) if no active step is found.\n \"\"\"\n if (\n not self.active_plan_id\n or self.active_plan_id not in self.planning_tool.plans\n ):\n logger.error(f\"Plan with ID {self.active_plan_id} not found\")\n return None, None\n\n try:\n # Direct access to plan data from planning tool storage\n plan_data = self.planning_tool.plans[self.active_plan_id]\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Find first non-completed step\n for i, step in enumerate(steps):\n if i >= len(step_statuses):\n status = PlanStepStatus.NOT_STARTED.value\n else:\n status = step_statuses[i]\n\n if status in PlanStepStatus.get_active_statuses():\n # Extract step type/category if available\n step_info = {\"text\": step}\n\n # Try to extract step type from the text (e.g., [SEARCH] or [CODE])\n import re\n\n type_match = re.search(r\"\\[([A-Z_]+)\\]\", step)\n if type_match:\n step_info[\"type\"] = type_match.group(1).lower()\n\n # Mark current step as in_progress\n try:\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=i,\n step_status=PlanStepStatus.IN_PROGRESS.value,\n )\n except Exception as e:\n logger.warning(f\"Error marking step as in_progress: {e}\")\n # Update step status directly if needed\n if i < len(step_statuses):\n step_statuses[i] = PlanStepStatus.IN_PROGRESS.value\n else:\n while len(step_statuses) < i:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n step_statuses.append(PlanStepStatus.IN_PROGRESS.value)\n\n plan_data[\"step_statuses\"] = step_statuses\n\n return i, step_info\n\n return None, None # No active step found\n\n except Exception as e:\n logger.warning(f\"Error finding current step index: {e}\")\n return None, None\n\n async def _execute_step(self, executor: BaseAgent, step_info: dict) -> str:\n \"\"\"Execute the current step with the specified agent using agent.run().\"\"\"\n # Prepare context for the agent with current plan status\n plan_status = await self._get_plan_text()\n step_text = step_info.get(\"text\", f\"Step {self.current_step_index}\")\n\n # Create a prompt for the agent to execute the current step\n step_prompt = f\"\"\"\n CURRENT PLAN STATUS:\n {plan_status}\n\n YOUR CURRENT TASK:\n You are now working on step {self.current_step_index}: \"{step_text}\"\n\n Please only execute this current step using the appropriate tools. When you're done, provide a summary of what you accomplished.\n \"\"\"\n\n # Use agent.run() to execute the step\n try:\n step_result = await executor.run(step_prompt)\n\n # Mark the step as completed after successful execution\n await self._mark_step_completed()\n\n return step_result\n except Exception as e:\n logger.error(f\"Error executing step {self.current_step_index}: {e}\")\n return f\"Error executing step {self.current_step_index}: {str(e)}\"\n\n async def _mark_step_completed(self) -> None:\n \"\"\"Mark the current step as completed.\"\"\"\n if self.current_step_index is None:\n return\n\n try:\n # Mark the step as completed\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=self.current_step_index,\n step_status=PlanStepStatus.COMPLETED.value,\n )\n logger.info(\n f\"Marked step {self.current_step_index} as completed in plan {self.active_plan_id}\"\n )\n except Exception as e:\n logger.warning(f\"Failed to update plan status: {e}\")\n # Update step status directly in planning tool storage\n if self.active_plan_id in self.planning_tool.plans:\n plan_data = self.planning_tool.plans[self.active_plan_id]\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Ensure the step_statuses list is long enough\n while len(step_statuses) <= self.current_step_index:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n\n # Update the status\n step_statuses[self.current_step_index] = PlanStepStatus.COMPLETED.value\n plan_data[\"step_statuses\"] = step_statuses\n\n async def _get_plan_text(self) -> str:\n \"\"\"Get the current plan as formatted text.\"\"\"\n try:\n result = await self.planning_tool.execute(\n command=\"get\", plan_id=self.active_plan_id\n )\n return result.output if hasattr(result, \"output\") else str(result)\n except Exception as e:\n logger.error(f\"Error getting plan: {e}\")\n return self._generate_plan_text_from_storage()\n\n def _generate_plan_text_from_storage(self) -> str:\n \"\"\"Generate plan text directly from storage if the planning tool fails.\"\"\"\n try:\n if self.active_plan_id not in self.planning_tool.plans:\n return f\"Error: Plan with ID {self.active_plan_id} not found\"\n\n plan_data = self.planning_tool.plans[self.active_plan_id]\n title = plan_data.get(\"title\", \"Untitled Plan\")\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n step_notes = plan_data.get(\"step_notes\", [])\n\n # Ensure step_statuses and step_notes match the number of steps\n while len(step_statuses) < len(steps):\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n while len(step_notes) < len(steps):\n step_notes.append(\"\")\n\n # Count steps by status\n status_counts = {status: 0 for status in PlanStepStatus.get_all_statuses()}\n\n for status in step_statuses:\n if status in status_counts:\n status_counts[status] += 1\n\n completed = status_counts[PlanStepStatus.COMPLETED.value]\n total = len(steps)\n progress = (completed / total) * 100 if total > 0 else 0\n\n plan_text = f\"Plan: {title} (ID: {self.active_plan_id})\\n\"\n plan_text += \"=\" * len(plan_text) + \"\\n\\n\"\n\n plan_text += (\n f\"Progress: {completed}/{total} steps completed ({progress:.1f}%)\\n\"\n )\n plan_text += f\"Status: {status_counts[PlanStepStatus.COMPLETED.value]} completed, {status_counts[PlanStepStatus.IN_PROGRESS.value]} in progress, \"\n plan_text += f\"{status_counts[PlanStepStatus.BLOCKED.value]} blocked, {status_counts[PlanStepStatus.NOT_STARTED.value]} not started\\n\\n\"\n plan_text += \"Steps:\\n\"\n\n status_marks = PlanStepStatus.get_status_marks()\n\n for i, (step, status, notes) in enumerate(\n zip(steps, step_statuses, step_notes)\n ):\n # Use status marks to indicate step status\n status_mark = status_marks.get(\n status, status_marks[PlanStepStatus.NOT_STARTED.value]\n )\n\n plan_text += f\"{i}. {status_mark} {step}\\n\"\n if notes:\n plan_text += f\" Notes: {notes}\\n\"\n\n return plan_text\n except Exception as e:\n logger.error(f\"Error generating plan text from storage: {e}\")\n return f\"Error: Unable to retrieve plan with ID {self.active_plan_id}\"\n\n async def _finalize_plan(self) -> str:\n \"\"\"Finalize the plan and provide a summary using the flow's LLM directly.\"\"\"\n plan_text = await self._get_plan_text()\n\n # Create a summary using the flow's LLM directly\n try:\n system_message = Message.system_message(\n \"You are a planning assistant. Your task is to summarize the completed plan.\"\n )\n\n user_message = Message.user_message(\n f\"The plan has been completed. Here is the final plan status:\\n\\n{plan_text}\\n\\nPlease provide a summary of what was accomplished and any final thoughts.\"\n )\n\n response = await self.llm.ask(\n messages=[user_message], system_msgs=[system_message]\n )\n\n return f\"Plan completed:\\n\\n{response}\"\n except Exception as e:\n logger.error(f\"Error finalizing plan with LLM: {e}\")\n\n # Fallback to using an agent for the summary\n try:\n agent = self.primary_agent\n summary_prompt = f\"\"\"\n The plan has been completed. Here is the final plan status:\n\n {plan_text}\n\n Please provide a summary of what was accomplished and any final thoughts.\n \"\"\"\n summary = await agent.run(summary_prompt)\n return f\"Plan completed:\\n\\n{summary}\"\n except Exception as e2:\n logger.error(f\"Error finalizing plan with agent: {e2}\")\n return \"Plan completed. Error generating summary.\"\n"], ["/OpenManus/app/tool/crawl4ai.py", "\"\"\"\nCrawl4AI Web Crawler Tool for OpenManus\n\nThis tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents,\nproviding fast, precise, and AI-ready data extraction with clean Markdown generation.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Union\nfrom urllib.parse import urlparse\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\n\n\nclass Crawl4aiTool(BaseTool):\n \"\"\"\n Web crawler tool powered by Crawl4AI.\n\n Provides clean markdown extraction optimized for AI processing.\n \"\"\"\n\n name: str = \"crawl4ai\"\n description: str = \"\"\"Web crawler that extracts clean, AI-ready content from web pages.\n\n Features:\n - Extracts clean markdown content optimized for LLMs\n - Handles JavaScript-heavy sites and dynamic content\n - Supports multiple URLs in a single request\n - Fast and reliable with built-in error handling\n\n Perfect for content analysis, research, and feeding web content to AI models.\"\"\"\n\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"urls\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"(required) List of URLs to crawl. Can be a single URL or multiple URLs.\",\n \"minItems\": 1,\n },\n \"timeout\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Timeout in seconds for each URL. Default is 30.\",\n \"default\": 30,\n \"minimum\": 5,\n \"maximum\": 120,\n },\n \"bypass_cache\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to bypass cache and fetch fresh content. Default is false.\",\n \"default\": False,\n },\n \"word_count_threshold\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Minimum word count for content blocks. Default is 10.\",\n \"default\": 10,\n \"minimum\": 1,\n },\n },\n \"required\": [\"urls\"],\n }\n\n async def execute(\n self,\n urls: Union[str, List[str]],\n timeout: int = 30,\n bypass_cache: bool = False,\n word_count_threshold: int = 10,\n ) -> ToolResult:\n \"\"\"\n Execute web crawling for the specified URLs.\n\n Args:\n urls: Single URL string or list of URLs to crawl\n timeout: Timeout in seconds for each URL\n bypass_cache: Whether to bypass cache\n word_count_threshold: Minimum word count for content blocks\n\n Returns:\n ToolResult with crawl results\n \"\"\"\n # Normalize URLs to list\n if isinstance(urls, str):\n url_list = [urls]\n else:\n url_list = urls\n\n # Validate URLs\n valid_urls = []\n for url in url_list:\n if self._is_valid_url(url):\n valid_urls.append(url)\n else:\n logger.warning(f\"Invalid URL skipped: {url}\")\n\n if not valid_urls:\n return ToolResult(error=\"No valid URLs provided\")\n\n try:\n # Import crawl4ai components\n from crawl4ai import (\n AsyncWebCrawler,\n BrowserConfig,\n CacheMode,\n CrawlerRunConfig,\n )\n\n # Configure browser settings\n browser_config = BrowserConfig(\n headless=True,\n verbose=False,\n browser_type=\"chromium\",\n ignore_https_errors=True,\n java_script_enabled=True,\n )\n\n # Configure crawler settings\n run_config = CrawlerRunConfig(\n cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED,\n word_count_threshold=word_count_threshold,\n process_iframes=True,\n remove_overlay_elements=True,\n excluded_tags=[\"script\", \"style\"],\n page_timeout=timeout * 1000, # Convert to milliseconds\n verbose=False,\n wait_until=\"domcontentloaded\",\n )\n\n results = []\n successful_count = 0\n failed_count = 0\n\n # Process each URL\n async with AsyncWebCrawler(config=browser_config) as crawler:\n for url in valid_urls:\n try:\n logger.info(f\"🕷️ Crawling URL: {url}\")\n start_time = asyncio.get_event_loop().time()\n\n result = await crawler.arun(url=url, config=run_config)\n\n end_time = asyncio.get_event_loop().time()\n execution_time = end_time - start_time\n\n if result.success:\n # Count words in markdown\n word_count = 0\n if hasattr(result, \"markdown\") and result.markdown:\n word_count = len(result.markdown.split())\n\n # Count links\n links_count = 0\n if hasattr(result, \"links\") and result.links:\n internal_links = result.links.get(\"internal\", [])\n external_links = result.links.get(\"external\", [])\n links_count = len(internal_links) + len(external_links)\n\n # Count images\n images_count = 0\n if hasattr(result, \"media\") and result.media:\n images = result.media.get(\"images\", [])\n images_count = len(images)\n\n results.append(\n {\n \"url\": url,\n \"success\": True,\n \"status_code\": getattr(result, \"status_code\", 200),\n \"title\": result.metadata.get(\"title\")\n if result.metadata\n else None,\n \"markdown\": result.markdown\n if hasattr(result, \"markdown\")\n else None,\n \"word_count\": word_count,\n \"links_count\": links_count,\n \"images_count\": images_count,\n \"execution_time\": execution_time,\n }\n )\n successful_count += 1\n logger.info(\n f\"✅ Successfully crawled {url} in {execution_time:.2f}s\"\n )\n\n else:\n results.append(\n {\n \"url\": url,\n \"success\": False,\n \"error_message\": getattr(\n result, \"error_message\", \"Unknown error\"\n ),\n \"execution_time\": execution_time,\n }\n )\n failed_count += 1\n logger.warning(f\"❌ Failed to crawl {url}\")\n\n except Exception as e:\n error_msg = f\"Error crawling {url}: {str(e)}\"\n logger.error(error_msg)\n results.append(\n {\"url\": url, \"success\": False, \"error_message\": error_msg}\n )\n failed_count += 1\n\n # Format output\n output_lines = [f\"🕷️ Crawl4AI Results Summary:\"]\n output_lines.append(f\"📊 Total URLs: {len(valid_urls)}\")\n output_lines.append(f\"✅ Successful: {successful_count}\")\n output_lines.append(f\"❌ Failed: {failed_count}\")\n output_lines.append(\"\")\n\n for i, result in enumerate(results, 1):\n output_lines.append(f\"{i}. {result['url']}\")\n\n if result[\"success\"]:\n output_lines.append(\n f\" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})\"\n )\n if result.get(\"title\"):\n output_lines.append(f\" 📄 Title: {result['title']}\")\n\n if result.get(\"markdown\"):\n # Show first 300 characters of markdown content\n content_preview = result[\"markdown\"]\n if len(result[\"markdown\"]) > 300:\n content_preview += \"...\"\n output_lines.append(f\" 📝 Content: {content_preview}\")\n\n output_lines.append(\n f\" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images\"\n )\n\n if result.get(\"execution_time\"):\n output_lines.append(\n f\" ⏱️ Time: {result['execution_time']:.2f}s\"\n )\n else:\n output_lines.append(f\" ❌ Status: Failed\")\n if result.get(\"error_message\"):\n output_lines.append(f\" 🚫 Error: {result['error_message']}\")\n\n output_lines.append(\"\")\n\n return ToolResult(output=\"\\n\".join(output_lines))\n\n except ImportError as e:\n error_msg = \"Crawl4AI is not installed. Please install it with: pip install crawl4ai\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n except Exception as e:\n error_msg = f\"Crawl4AI execution failed: {str(e)}\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n\n def _is_valid_url(self, url: str) -> bool:\n \"\"\"Validate if a URL is properly formatted.\"\"\"\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc]) and result.scheme in [\n \"http\",\n \"https\",\n ]\n except Exception:\n return False\n"], ["/OpenManus/app/llm.py", "import math\nfrom typing import Dict, List, Optional, Union\n\nimport tiktoken\nfrom openai import (\n APIError,\n AsyncAzureOpenAI,\n AsyncOpenAI,\n AuthenticationError,\n OpenAIError,\n RateLimitError,\n)\nfrom openai.types.chat import ChatCompletion, ChatCompletionMessage\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_random_exponential,\n)\n\nfrom app.bedrock import BedrockClient\nfrom app.config import LLMSettings, config\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger # Assuming a logger is set up in your app\nfrom app.schema import (\n ROLE_VALUES,\n TOOL_CHOICE_TYPE,\n TOOL_CHOICE_VALUES,\n Message,\n ToolChoice,\n)\n\n\nREASONING_MODELS = [\"o1\", \"o3-mini\"]\nMULTIMODAL_MODELS = [\n \"gpt-4-vision-preview\",\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"claude-3-opus-20240229\",\n \"claude-3-sonnet-20240229\",\n \"claude-3-haiku-20240307\",\n]\n\n\nclass TokenCounter:\n # Token constants\n BASE_MESSAGE_TOKENS = 4\n FORMAT_TOKENS = 2\n LOW_DETAIL_IMAGE_TOKENS = 85\n HIGH_DETAIL_TILE_TOKENS = 170\n\n # Image processing constants\n MAX_SIZE = 2048\n HIGH_DETAIL_TARGET_SHORT_SIDE = 768\n TILE_SIZE = 512\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n def count_text(self, text: str) -> int:\n \"\"\"Calculate tokens for a text string\"\"\"\n return 0 if not text else len(self.tokenizer.encode(text))\n\n def count_image(self, image_item: dict) -> int:\n \"\"\"\n Calculate tokens for an image based on detail level and dimensions\n\n For \"low\" detail: fixed 85 tokens\n For \"high\" detail:\n 1. Scale to fit in 2048x2048 square\n 2. Scale shortest side to 768px\n 3. Count 512px tiles (170 tokens each)\n 4. Add 85 tokens\n \"\"\"\n detail = image_item.get(\"detail\", \"medium\")\n\n # For low detail, always return fixed token count\n if detail == \"low\":\n return self.LOW_DETAIL_IMAGE_TOKENS\n\n # For medium detail (default in OpenAI), use high detail calculation\n # OpenAI doesn't specify a separate calculation for medium\n\n # For high detail, calculate based on dimensions if available\n if detail == \"high\" or detail == \"medium\":\n # If dimensions are provided in the image_item\n if \"dimensions\" in image_item:\n width, height = image_item[\"dimensions\"]\n return self._calculate_high_detail_tokens(width, height)\n\n return (\n self._calculate_high_detail_tokens(1024, 1024) if detail == \"high\" else 1024\n )\n\n def _calculate_high_detail_tokens(self, width: int, height: int) -> int:\n \"\"\"Calculate tokens for high detail images based on dimensions\"\"\"\n # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square\n if width > self.MAX_SIZE or height > self.MAX_SIZE:\n scale = self.MAX_SIZE / max(width, height)\n width = int(width * scale)\n height = int(height * scale)\n\n # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE\n scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height)\n scaled_width = int(width * scale)\n scaled_height = int(height * scale)\n\n # Step 3: Count number of 512px tiles\n tiles_x = math.ceil(scaled_width / self.TILE_SIZE)\n tiles_y = math.ceil(scaled_height / self.TILE_SIZE)\n total_tiles = tiles_x * tiles_y\n\n # Step 4: Calculate final token count\n return (\n total_tiles * self.HIGH_DETAIL_TILE_TOKENS\n ) + self.LOW_DETAIL_IMAGE_TOKENS\n\n def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int:\n \"\"\"Calculate tokens for message content\"\"\"\n if not content:\n return 0\n\n if isinstance(content, str):\n return self.count_text(content)\n\n token_count = 0\n for item in content:\n if isinstance(item, str):\n token_count += self.count_text(item)\n elif isinstance(item, dict):\n if \"text\" in item:\n token_count += self.count_text(item[\"text\"])\n elif \"image_url\" in item:\n token_count += self.count_image(item)\n return token_count\n\n def count_tool_calls(self, tool_calls: List[dict]) -> int:\n \"\"\"Calculate tokens for tool calls\"\"\"\n token_count = 0\n for tool_call in tool_calls:\n if \"function\" in tool_call:\n function = tool_call[\"function\"]\n token_count += self.count_text(function.get(\"name\", \"\"))\n token_count += self.count_text(function.get(\"arguments\", \"\"))\n return token_count\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n \"\"\"Calculate the total number of tokens in a message list\"\"\"\n total_tokens = self.FORMAT_TOKENS # Base format tokens\n\n for message in messages:\n tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message\n\n # Add role tokens\n tokens += self.count_text(message.get(\"role\", \"\"))\n\n # Add content tokens\n if \"content\" in message:\n tokens += self.count_content(message[\"content\"])\n\n # Add tool calls tokens\n if \"tool_calls\" in message:\n tokens += self.count_tool_calls(message[\"tool_calls\"])\n\n # Add name and tool_call_id tokens\n tokens += self.count_text(message.get(\"name\", \"\"))\n tokens += self.count_text(message.get(\"tool_call_id\", \"\"))\n\n total_tokens += tokens\n\n return total_tokens\n\n\nclass LLM:\n _instances: Dict[str, \"LLM\"] = {}\n\n def __new__(\n cls, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if config_name not in cls._instances:\n instance = super().__new__(cls)\n instance.__init__(config_name, llm_config)\n cls._instances[config_name] = instance\n return cls._instances[config_name]\n\n def __init__(\n self, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if not hasattr(self, \"client\"): # Only initialize if not already initialized\n llm_config = llm_config or config.llm\n llm_config = llm_config.get(config_name, llm_config[\"default\"])\n self.model = llm_config.model\n self.max_tokens = llm_config.max_tokens\n self.temperature = llm_config.temperature\n self.api_type = llm_config.api_type\n self.api_key = llm_config.api_key\n self.api_version = llm_config.api_version\n self.base_url = llm_config.base_url\n\n # Add token counting related attributes\n self.total_input_tokens = 0\n self.total_completion_tokens = 0\n self.max_input_tokens = (\n llm_config.max_input_tokens\n if hasattr(llm_config, \"max_input_tokens\")\n else None\n )\n\n # Initialize tokenizer\n try:\n self.tokenizer = tiktoken.encoding_for_model(self.model)\n except KeyError:\n # If the model is not in tiktoken's presets, use cl100k_base as default\n self.tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n\n if self.api_type == \"azure\":\n self.client = AsyncAzureOpenAI(\n base_url=self.base_url,\n api_key=self.api_key,\n api_version=self.api_version,\n )\n elif self.api_type == \"aws\":\n self.client = BedrockClient()\n else:\n self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)\n\n self.token_counter = TokenCounter(self.tokenizer)\n\n def count_tokens(self, text: str) -> int:\n \"\"\"Calculate the number of tokens in a text\"\"\"\n if not text:\n return 0\n return len(self.tokenizer.encode(text))\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n return self.token_counter.count_message_tokens(messages)\n\n def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None:\n \"\"\"Update token counts\"\"\"\n # Only track tokens if max_input_tokens is set\n self.total_input_tokens += input_tokens\n self.total_completion_tokens += completion_tokens\n logger.info(\n f\"Token usage: Input={input_tokens}, Completion={completion_tokens}, \"\n f\"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, \"\n f\"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}\"\n )\n\n def check_token_limit(self, input_tokens: int) -> bool:\n \"\"\"Check if token limits are exceeded\"\"\"\n if self.max_input_tokens is not None:\n return (self.total_input_tokens + input_tokens) <= self.max_input_tokens\n # If max_input_tokens is not set, always return True\n return True\n\n def get_limit_error_message(self, input_tokens: int) -> str:\n \"\"\"Generate error message for token limit exceeded\"\"\"\n if (\n self.max_input_tokens is not None\n and (self.total_input_tokens + input_tokens) > self.max_input_tokens\n ):\n return f\"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})\"\n\n return \"Token limit exceeded\"\n\n @staticmethod\n def format_messages(\n messages: List[Union[dict, Message]], supports_images: bool = False\n ) -> List[dict]:\n \"\"\"\n Format messages for LLM by converting them to OpenAI message format.\n\n Args:\n messages: List of messages that can be either dict or Message objects\n supports_images: Flag indicating if the target model supports image inputs\n\n Returns:\n List[dict]: List of formatted messages in OpenAI format\n\n Raises:\n ValueError: If messages are invalid or missing required fields\n TypeError: If unsupported message types are provided\n\n Examples:\n >>> msgs = [\n ... Message.system_message(\"You are a helpful assistant\"),\n ... {\"role\": \"user\", \"content\": \"Hello\"},\n ... Message.user_message(\"How are you?\")\n ... ]\n >>> formatted = LLM.format_messages(msgs)\n \"\"\"\n formatted_messages = []\n\n for message in messages:\n # Convert Message objects to dictionaries\n if isinstance(message, Message):\n message = message.to_dict()\n\n if isinstance(message, dict):\n # If message is a dict, ensure it has required fields\n if \"role\" not in message:\n raise ValueError(\"Message dict must contain 'role' field\")\n\n # Process base64 images if present and model supports images\n if supports_images and message.get(\"base64_image\"):\n # Initialize or convert content to appropriate format\n if not message.get(\"content\"):\n message[\"content\"] = []\n elif isinstance(message[\"content\"], str):\n message[\"content\"] = [\n {\"type\": \"text\", \"text\": message[\"content\"]}\n ]\n elif isinstance(message[\"content\"], list):\n # Convert string items to proper text objects\n message[\"content\"] = [\n (\n {\"type\": \"text\", \"text\": item}\n if isinstance(item, str)\n else item\n )\n for item in message[\"content\"]\n ]\n\n # Add the image to content\n message[\"content\"].append(\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": f\"data:image/jpeg;base64,{message['base64_image']}\"\n },\n }\n )\n\n # Remove the base64_image field\n del message[\"base64_image\"]\n # If model doesn't support images but message has base64_image, handle gracefully\n elif not supports_images and message.get(\"base64_image\"):\n # Just remove the base64_image field and keep the text content\n del message[\"base64_image\"]\n\n if \"content\" in message or \"tool_calls\" in message:\n formatted_messages.append(message)\n # else: do not include the message\n else:\n raise TypeError(f\"Unsupported message type: {type(message)}\")\n\n # Validate all messages have required fields\n for msg in formatted_messages:\n if msg[\"role\"] not in ROLE_VALUES:\n raise ValueError(f\"Invalid role: {msg['role']}\")\n\n return formatted_messages\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = True,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format system and user messages with image support check\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n params = {\n \"model\": self.model,\n \"messages\": messages,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n if not stream:\n # Non-streaming request\n response = await self.client.chat.completions.create(\n **params, stream=False\n )\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message.content\n\n # Streaming request, For streaming, update estimated token count before making the request\n self.update_token_count(input_tokens)\n\n response = await self.client.chat.completions.create(**params, stream=True)\n\n collected_messages = []\n completion_text = \"\"\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n completion_text += chunk_message\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n # estimate completion tokens for streaming response\n completion_tokens = self.count_tokens(completion_text)\n logger.info(\n f\"Estimated completion tokens for streaming response: {completion_tokens}\"\n )\n self.total_completion_tokens += completion_tokens\n\n return full_response\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError:\n logger.exception(f\"Validation error\")\n raise\n except OpenAIError as oe:\n logger.exception(f\"OpenAI API error\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception:\n logger.exception(f\"Unexpected error in ask\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_with_images(\n self,\n messages: List[Union[dict, Message]],\n images: List[Union[str, dict]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = False,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt with images to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n images: List of image URLs or image data dictionaries\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # For ask_with_images, we always set supports_images to True because\n # this method should only be called with models that support images\n if self.model not in MULTIMODAL_MODELS:\n raise ValueError(\n f\"Model {self.model} does not support images. Use a model from {MULTIMODAL_MODELS}\"\n )\n\n # Format messages with image support\n formatted_messages = self.format_messages(messages, supports_images=True)\n\n # Ensure the last message is from the user to attach images\n if not formatted_messages or formatted_messages[-1][\"role\"] != \"user\":\n raise ValueError(\n \"The last message must be from the user to attach images\"\n )\n\n # Process the last user message to include images\n last_message = formatted_messages[-1]\n\n # Convert content to multimodal format if needed\n content = last_message[\"content\"]\n multimodal_content = (\n [{\"type\": \"text\", \"text\": content}]\n if isinstance(content, str)\n else content\n if isinstance(content, list)\n else []\n )\n\n # Add images to content\n for image in images:\n if isinstance(image, str):\n multimodal_content.append(\n {\"type\": \"image_url\", \"image_url\": {\"url\": image}}\n )\n elif isinstance(image, dict) and \"url\" in image:\n multimodal_content.append({\"type\": \"image_url\", \"image_url\": image})\n elif isinstance(image, dict) and \"image_url\" in image:\n multimodal_content.append(image)\n else:\n raise ValueError(f\"Unsupported image format: {image}\")\n\n # Update the message with multimodal content\n last_message[\"content\"] = multimodal_content\n\n # Add system messages if provided\n if system_msgs:\n all_messages = (\n self.format_messages(system_msgs, supports_images=True)\n + formatted_messages\n )\n else:\n all_messages = formatted_messages\n\n # Calculate tokens and check limits\n input_tokens = self.count_message_tokens(all_messages)\n if not self.check_token_limit(input_tokens):\n raise TokenLimitExceeded(self.get_limit_error_message(input_tokens))\n\n # Set up API parameters\n params = {\n \"model\": self.model,\n \"messages\": all_messages,\n \"stream\": stream,\n }\n\n # Add model-specific parameters\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n # Handle non-streaming request\n if not stream:\n response = await self.client.chat.completions.create(**params)\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n self.update_token_count(response.usage.prompt_tokens)\n return response.choices[0].message.content\n\n # Handle streaming request\n self.update_token_count(input_tokens)\n response = await self.client.chat.completions.create(**params)\n\n collected_messages = []\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n return full_response\n\n except TokenLimitExceeded:\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_with_images: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_with_images: {e}\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_tool(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n timeout: int = 300,\n tools: Optional[List[dict]] = None,\n tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore\n temperature: Optional[float] = None,\n **kwargs,\n ) -> ChatCompletionMessage | None:\n \"\"\"\n Ask LLM using functions/tools and return the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n timeout: Request timeout in seconds\n tools: List of tools to use\n tool_choice: Tool choice strategy\n temperature: Sampling temperature for the response\n **kwargs: Additional completion arguments\n\n Returns:\n ChatCompletionMessage: The model's response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If tools, tool_choice, or messages are invalid\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Validate tool_choice\n if tool_choice not in TOOL_CHOICE_VALUES:\n raise ValueError(f\"Invalid tool_choice: {tool_choice}\")\n\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format messages\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # If there are tools, calculate token count for tool descriptions\n tools_tokens = 0\n if tools:\n for tool in tools:\n tools_tokens += self.count_tokens(str(tool))\n\n input_tokens += tools_tokens\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n # Validate tools if provided\n if tools:\n for tool in tools:\n if not isinstance(tool, dict) or \"type\" not in tool:\n raise ValueError(\"Each tool must be a dict with 'type' field\")\n\n # Set up the completion request\n params = {\n \"model\": self.model,\n \"messages\": messages,\n \"tools\": tools,\n \"tool_choice\": tool_choice,\n \"timeout\": timeout,\n **kwargs,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n params[\"stream\"] = False # Always use non-streaming for tool requests\n response: ChatCompletion = await self.client.chat.completions.create(\n **params\n )\n\n # Check if response is valid\n if not response.choices or not response.choices[0].message:\n print(response)\n # raise ValueError(\"Invalid or empty response from LLM\")\n return None\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_tool: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_tool: {e}\")\n raise\n"], ["/OpenManus/app/config.py", "import json\nimport threading\nimport tomllib\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\ndef get_project_root() -> Path:\n \"\"\"Get the project root directory\"\"\"\n return Path(__file__).resolve().parent.parent\n\n\nPROJECT_ROOT = get_project_root()\nWORKSPACE_ROOT = PROJECT_ROOT / \"workspace\"\n\n\nclass LLMSettings(BaseModel):\n model: str = Field(..., description=\"Model name\")\n base_url: str = Field(..., description=\"API base URL\")\n api_key: str = Field(..., description=\"API key\")\n max_tokens: int = Field(4096, description=\"Maximum number of tokens per request\")\n max_input_tokens: Optional[int] = Field(\n None,\n description=\"Maximum input tokens to use across all requests (None for unlimited)\",\n )\n temperature: float = Field(1.0, description=\"Sampling temperature\")\n api_type: str = Field(..., description=\"Azure, Openai, or Ollama\")\n api_version: str = Field(..., description=\"Azure Openai version if AzureOpenai\")\n\n\nclass ProxySettings(BaseModel):\n server: str = Field(None, description=\"Proxy server address\")\n username: Optional[str] = Field(None, description=\"Proxy username\")\n password: Optional[str] = Field(None, description=\"Proxy password\")\n\n\nclass SearchSettings(BaseModel):\n engine: str = Field(default=\"Google\", description=\"Search engine the llm to use\")\n fallback_engines: List[str] = Field(\n default_factory=lambda: [\"DuckDuckGo\", \"Baidu\", \"Bing\"],\n description=\"Fallback search engines to try if the primary engine fails\",\n )\n retry_delay: int = Field(\n default=60,\n description=\"Seconds to wait before retrying all engines again after they all fail\",\n )\n max_retries: int = Field(\n default=3,\n description=\"Maximum number of times to retry all engines when all fail\",\n )\n lang: str = Field(\n default=\"en\",\n description=\"Language code for search results (e.g., en, zh, fr)\",\n )\n country: str = Field(\n default=\"us\",\n description=\"Country code for search results (e.g., us, cn, uk)\",\n )\n\n\nclass RunflowSettings(BaseModel):\n use_data_analysis_agent: bool = Field(\n default=False, description=\"Enable data analysis agent in run flow\"\n )\n\n\nclass BrowserSettings(BaseModel):\n headless: bool = Field(False, description=\"Whether to run browser in headless mode\")\n disable_security: bool = Field(\n True, description=\"Disable browser security features\"\n )\n extra_chromium_args: List[str] = Field(\n default_factory=list, description=\"Extra arguments to pass to the browser\"\n )\n chrome_instance_path: Optional[str] = Field(\n None, description=\"Path to a Chrome instance to use\"\n )\n wss_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via WebSocket\"\n )\n cdp_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via CDP\"\n )\n proxy: Optional[ProxySettings] = Field(\n None, description=\"Proxy settings for the browser\"\n )\n max_content_length: int = Field(\n 2000, description=\"Maximum length for content retrieval operations\"\n )\n\n\nclass SandboxSettings(BaseModel):\n \"\"\"Configuration for the execution sandbox\"\"\"\n\n use_sandbox: bool = Field(False, description=\"Whether to use the sandbox\")\n image: str = Field(\"python:3.12-slim\", description=\"Base image\")\n work_dir: str = Field(\"/workspace\", description=\"Container working directory\")\n memory_limit: str = Field(\"512m\", description=\"Memory limit\")\n cpu_limit: float = Field(1.0, description=\"CPU limit\")\n timeout: int = Field(300, description=\"Default command timeout (seconds)\")\n network_enabled: bool = Field(\n False, description=\"Whether network access is allowed\"\n )\n\n\nclass MCPServerConfig(BaseModel):\n \"\"\"Configuration for a single MCP server\"\"\"\n\n type: str = Field(..., description=\"Server connection type (sse or stdio)\")\n url: Optional[str] = Field(None, description=\"Server URL for SSE connections\")\n command: Optional[str] = Field(None, description=\"Command for stdio connections\")\n args: List[str] = Field(\n default_factory=list, description=\"Arguments for stdio command\"\n )\n\n\nclass MCPSettings(BaseModel):\n \"\"\"Configuration for MCP (Model Context Protocol)\"\"\"\n\n server_reference: str = Field(\n \"app.mcp.server\", description=\"Module reference for the MCP server\"\n )\n servers: Dict[str, MCPServerConfig] = Field(\n default_factory=dict, description=\"MCP server configurations\"\n )\n\n @classmethod\n def load_server_config(cls) -> Dict[str, MCPServerConfig]:\n \"\"\"Load MCP server configuration from JSON file\"\"\"\n config_path = PROJECT_ROOT / \"config\" / \"mcp.json\"\n\n try:\n config_file = config_path if config_path.exists() else None\n if not config_file:\n return {}\n\n with config_file.open() as f:\n data = json.load(f)\n servers = {}\n\n for server_id, server_config in data.get(\"mcpServers\", {}).items():\n servers[server_id] = MCPServerConfig(\n type=server_config[\"type\"],\n url=server_config.get(\"url\"),\n command=server_config.get(\"command\"),\n args=server_config.get(\"args\", []),\n )\n return servers\n except Exception as e:\n raise ValueError(f\"Failed to load MCP server config: {e}\")\n\n\nclass AppConfig(BaseModel):\n llm: Dict[str, LLMSettings]\n sandbox: Optional[SandboxSettings] = Field(\n None, description=\"Sandbox configuration\"\n )\n browser_config: Optional[BrowserSettings] = Field(\n None, description=\"Browser configuration\"\n )\n search_config: Optional[SearchSettings] = Field(\n None, description=\"Search configuration\"\n )\n mcp_config: Optional[MCPSettings] = Field(None, description=\"MCP configuration\")\n run_flow_config: Optional[RunflowSettings] = Field(\n None, description=\"Run flow configuration\"\n )\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass Config:\n _instance = None\n _lock = threading.Lock()\n _initialized = False\n\n def __new__(cls):\n if cls._instance is None:\n with cls._lock:\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n if not self._initialized:\n with self._lock:\n if not self._initialized:\n self._config = None\n self._load_initial_config()\n self._initialized = True\n\n @staticmethod\n def _get_config_path() -> Path:\n root = PROJECT_ROOT\n config_path = root / \"config\" / \"config.toml\"\n if config_path.exists():\n return config_path\n example_path = root / \"config\" / \"config.example.toml\"\n if example_path.exists():\n return example_path\n raise FileNotFoundError(\"No configuration file found in config directory\")\n\n def _load_config(self) -> dict:\n config_path = self._get_config_path()\n with config_path.open(\"rb\") as f:\n return tomllib.load(f)\n\n def _load_initial_config(self):\n raw_config = self._load_config()\n base_llm = raw_config.get(\"llm\", {})\n llm_overrides = {\n k: v for k, v in raw_config.get(\"llm\", {}).items() if isinstance(v, dict)\n }\n\n default_settings = {\n \"model\": base_llm.get(\"model\"),\n \"base_url\": base_llm.get(\"base_url\"),\n \"api_key\": base_llm.get(\"api_key\"),\n \"max_tokens\": base_llm.get(\"max_tokens\", 4096),\n \"max_input_tokens\": base_llm.get(\"max_input_tokens\"),\n \"temperature\": base_llm.get(\"temperature\", 1.0),\n \"api_type\": base_llm.get(\"api_type\", \"\"),\n \"api_version\": base_llm.get(\"api_version\", \"\"),\n }\n\n # handle browser config.\n browser_config = raw_config.get(\"browser\", {})\n browser_settings = None\n\n if browser_config:\n # handle proxy settings.\n proxy_config = browser_config.get(\"proxy\", {})\n proxy_settings = None\n\n if proxy_config and proxy_config.get(\"server\"):\n proxy_settings = ProxySettings(\n **{\n k: v\n for k, v in proxy_config.items()\n if k in [\"server\", \"username\", \"password\"] and v\n }\n )\n\n # filter valid browser config parameters.\n valid_browser_params = {\n k: v\n for k, v in browser_config.items()\n if k in BrowserSettings.__annotations__ and v is not None\n }\n\n # if there is proxy settings, add it to the parameters.\n if proxy_settings:\n valid_browser_params[\"proxy\"] = proxy_settings\n\n # only create BrowserSettings when there are valid parameters.\n if valid_browser_params:\n browser_settings = BrowserSettings(**valid_browser_params)\n\n search_config = raw_config.get(\"search\", {})\n search_settings = None\n if search_config:\n search_settings = SearchSettings(**search_config)\n sandbox_config = raw_config.get(\"sandbox\", {})\n if sandbox_config:\n sandbox_settings = SandboxSettings(**sandbox_config)\n else:\n sandbox_settings = SandboxSettings()\n\n mcp_config = raw_config.get(\"mcp\", {})\n mcp_settings = None\n if mcp_config:\n # Load server configurations from JSON\n mcp_config[\"servers\"] = MCPSettings.load_server_config()\n mcp_settings = MCPSettings(**mcp_config)\n else:\n mcp_settings = MCPSettings(servers=MCPSettings.load_server_config())\n\n run_flow_config = raw_config.get(\"runflow\")\n if run_flow_config:\n run_flow_settings = RunflowSettings(**run_flow_config)\n else:\n run_flow_settings = RunflowSettings()\n config_dict = {\n \"llm\": {\n \"default\": default_settings,\n **{\n name: {**default_settings, **override_config}\n for name, override_config in llm_overrides.items()\n },\n },\n \"sandbox\": sandbox_settings,\n \"browser_config\": browser_settings,\n \"search_config\": search_settings,\n \"mcp_config\": mcp_settings,\n \"run_flow_config\": run_flow_settings,\n }\n\n self._config = AppConfig(**config_dict)\n\n @property\n def llm(self) -> Dict[str, LLMSettings]:\n return self._config.llm\n\n @property\n def sandbox(self) -> SandboxSettings:\n return self._config.sandbox\n\n @property\n def browser_config(self) -> Optional[BrowserSettings]:\n return self._config.browser_config\n\n @property\n def search_config(self) -> Optional[SearchSettings]:\n return self._config.search_config\n\n @property\n def mcp_config(self) -> MCPSettings:\n \"\"\"Get the MCP configuration\"\"\"\n return self._config.mcp_config\n\n @property\n def run_flow_config(self) -> RunflowSettings:\n \"\"\"Get the Run Flow configuration\"\"\"\n return self._config.run_flow_config\n\n @property\n def workspace_root(self) -> Path:\n \"\"\"Get the workspace root directory\"\"\"\n return WORKSPACE_ROOT\n\n @property\n def root_path(self) -> Path:\n \"\"\"Get the root path of the application\"\"\"\n return PROJECT_ROOT\n\n\nconfig = Config()\n"], ["/OpenManus/app/tool/web_search.py", "import asyncio\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pydantic import BaseModel, ConfigDict, Field, model_validator\nfrom tenacity import retry, stop_after_attempt, wait_exponential\n\nfrom app.config import config\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.search import (\n BaiduSearchEngine,\n BingSearchEngine,\n DuckDuckGoSearchEngine,\n GoogleSearchEngine,\n WebSearchEngine,\n)\nfrom app.tool.search.base import SearchItem\n\n\nclass SearchResult(BaseModel):\n \"\"\"Represents a single search result returned by a search engine.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n position: int = Field(description=\"Position in search results\")\n url: str = Field(description=\"URL of the search result\")\n title: str = Field(default=\"\", description=\"Title of the search result\")\n description: str = Field(\n default=\"\", description=\"Description or snippet of the search result\"\n )\n source: str = Field(description=\"The search engine that provided this result\")\n raw_content: Optional[str] = Field(\n default=None, description=\"Raw content from the search result page if available\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result.\"\"\"\n return f\"{self.title} ({self.url})\"\n\n\nclass SearchMetadata(BaseModel):\n \"\"\"Metadata about the search operation.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n total_results: int = Field(description=\"Total number of results found\")\n language: str = Field(description=\"Language code used for the search\")\n country: str = Field(description=\"Country code used for the search\")\n\n\nclass SearchResponse(ToolResult):\n \"\"\"Structured response from the web search tool, inheriting ToolResult.\"\"\"\n\n query: str = Field(description=\"The search query that was executed\")\n results: List[SearchResult] = Field(\n default_factory=list, description=\"List of search results\"\n )\n metadata: Optional[SearchMetadata] = Field(\n default=None, description=\"Metadata about the search\"\n )\n\n @model_validator(mode=\"after\")\n def populate_output(self) -> \"SearchResponse\":\n \"\"\"Populate output or error fields based on search results.\"\"\"\n if self.error:\n return self\n\n result_text = [f\"Search results for '{self.query}':\"]\n\n for i, result in enumerate(self.results, 1):\n # Add title with position number\n title = result.title.strip() or \"No title\"\n result_text.append(f\"\\n{i}. {title}\")\n\n # Add URL with proper indentation\n result_text.append(f\" URL: {result.url}\")\n\n # Add description if available\n if result.description.strip():\n result_text.append(f\" Description: {result.description}\")\n\n # Add content preview if available\n if result.raw_content:\n content_preview = result.raw_content[:1000].replace(\"\\n\", \" \").strip()\n if len(result.raw_content) > 1000:\n content_preview += \"...\"\n result_text.append(f\" Content: {content_preview}\")\n\n # Add metadata at the bottom if available\n if self.metadata:\n result_text.extend(\n [\n f\"\\nMetadata:\",\n f\"- Total results: {self.metadata.total_results}\",\n f\"- Language: {self.metadata.language}\",\n f\"- Country: {self.metadata.country}\",\n ]\n )\n\n self.output = \"\\n\".join(result_text)\n return self\n\n\nclass WebContentFetcher:\n \"\"\"Utility class for fetching web content.\"\"\"\n\n @staticmethod\n async def fetch_content(url: str, timeout: int = 10) -> Optional[str]:\n \"\"\"\n Fetch and extract the main content from a webpage.\n\n Args:\n url: The URL to fetch content from\n timeout: Request timeout in seconds\n\n Returns:\n Extracted text content or None if fetching fails\n \"\"\"\n headers = {\n \"WebSearch\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n\n try:\n # Use asyncio to run requests in a thread pool\n response = await asyncio.get_event_loop().run_in_executor(\n None, lambda: requests.get(url, headers=headers, timeout=timeout)\n )\n\n if response.status_code != 200:\n logger.warning(\n f\"Failed to fetch content from {url}: HTTP {response.status_code}\"\n )\n return None\n\n # Parse HTML with BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\", \"header\", \"footer\", \"nav\"]):\n script.extract()\n\n # Get text content\n text = soup.get_text(separator=\"\\n\", strip=True)\n\n # Clean up whitespace and limit size (100KB max)\n text = \" \".join(text.split())\n return text[:10000] if text else None\n\n except Exception as e:\n logger.warning(f\"Error fetching content from {url}: {e}\")\n return None\n\n\nclass WebSearch(BaseTool):\n \"\"\"Search the web for information using various search engines.\"\"\"\n\n name: str = \"web_search\"\n description: str = \"\"\"Search the web for real-time information about any topic.\n This tool returns comprehensive search results with relevant information, URLs, titles, and descriptions.\n If the primary search engine fails, it automatically falls back to alternative engines.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\",\n \"description\": \"(required) The search query to submit to the search engine.\",\n },\n \"num_results\": {\n \"type\": \"integer\",\n \"description\": \"(optional) The number of search results to return. Default is 5.\",\n \"default\": 5,\n },\n \"lang\": {\n \"type\": \"string\",\n \"description\": \"(optional) Language code for search results (default: en).\",\n \"default\": \"en\",\n },\n \"country\": {\n \"type\": \"string\",\n \"description\": \"(optional) Country code for search results (default: us).\",\n \"default\": \"us\",\n },\n \"fetch_content\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to fetch full content from result pages. Default is false.\",\n \"default\": False,\n },\n },\n \"required\": [\"query\"],\n }\n _search_engine: dict[str, WebSearchEngine] = {\n \"google\": GoogleSearchEngine(),\n \"baidu\": BaiduSearchEngine(),\n \"duckduckgo\": DuckDuckGoSearchEngine(),\n \"bing\": BingSearchEngine(),\n }\n content_fetcher: WebContentFetcher = WebContentFetcher()\n\n async def execute(\n self,\n query: str,\n num_results: int = 5,\n lang: Optional[str] = None,\n country: Optional[str] = None,\n fetch_content: bool = False,\n ) -> SearchResponse:\n \"\"\"\n Execute a Web search and return detailed search results.\n\n Args:\n query: The search query to submit to the search engine\n num_results: The number of search results to return (default: 5)\n lang: Language code for search results (default from config)\n country: Country code for search results (default from config)\n fetch_content: Whether to fetch content from result pages (default: False)\n\n Returns:\n A structured response containing search results and metadata\n \"\"\"\n # Get settings from config\n retry_delay = (\n getattr(config.search_config, \"retry_delay\", 60)\n if config.search_config\n else 60\n )\n max_retries = (\n getattr(config.search_config, \"max_retries\", 3)\n if config.search_config\n else 3\n )\n\n # Use config values for lang and country if not specified\n if lang is None:\n lang = (\n getattr(config.search_config, \"lang\", \"en\")\n if config.search_config\n else \"en\"\n )\n\n if country is None:\n country = (\n getattr(config.search_config, \"country\", \"us\")\n if config.search_config\n else \"us\"\n )\n\n search_params = {\"lang\": lang, \"country\": country}\n\n # Try searching with retries when all engines fail\n for retry_count in range(max_retries + 1):\n results = await self._try_all_engines(query, num_results, search_params)\n\n if results:\n # Fetch content if requested\n if fetch_content:\n results = await self._fetch_content_for_results(results)\n\n # Return a successful structured response\n return SearchResponse(\n status=\"success\",\n query=query,\n results=results,\n metadata=SearchMetadata(\n total_results=len(results),\n language=lang,\n country=country,\n ),\n )\n\n if retry_count < max_retries:\n # All engines failed, wait and retry\n logger.warning(\n f\"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}...\"\n )\n await asyncio.sleep(retry_delay)\n else:\n logger.error(\n f\"All search engines failed after {max_retries} retries. Giving up.\"\n )\n\n # Return an error response\n return SearchResponse(\n query=query,\n error=\"All search engines failed to return results after multiple retries.\",\n results=[],\n )\n\n async def _try_all_engines(\n self, query: str, num_results: int, search_params: Dict[str, Any]\n ) -> List[SearchResult]:\n \"\"\"Try all search engines in the configured order.\"\"\"\n engine_order = self._get_engine_order()\n failed_engines = []\n\n for engine_name in engine_order:\n engine = self._search_engine[engine_name]\n logger.info(f\"🔎 Attempting search with {engine_name.capitalize()}...\")\n search_items = await self._perform_search_with_engine(\n engine, query, num_results, search_params\n )\n\n if not search_items:\n continue\n\n if failed_engines:\n logger.info(\n f\"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}\"\n )\n\n # Transform search items into structured results\n return [\n SearchResult(\n position=i + 1,\n url=item.url,\n title=item.title\n or f\"Result {i+1}\", # Ensure we always have a title\n description=item.description or \"\",\n source=engine_name,\n )\n for i, item in enumerate(search_items)\n ]\n\n if failed_engines:\n logger.error(f\"All search engines failed: {', '.join(failed_engines)}\")\n return []\n\n async def _fetch_content_for_results(\n self, results: List[SearchResult]\n ) -> List[SearchResult]:\n \"\"\"Fetch and add web content to search results.\"\"\"\n if not results:\n return []\n\n # Create tasks for each result\n tasks = [self._fetch_single_result_content(result) for result in results]\n\n # Type annotation to help type checker\n fetched_results = await asyncio.gather(*tasks)\n\n # Explicit validation of return type\n return [\n (\n result\n if isinstance(result, SearchResult)\n else SearchResult(**result.dict())\n )\n for result in fetched_results\n ]\n\n async def _fetch_single_result_content(self, result: SearchResult) -> SearchResult:\n \"\"\"Fetch content for a single search result.\"\"\"\n if result.url:\n content = await self.content_fetcher.fetch_content(result.url)\n if content:\n result.raw_content = content\n return result\n\n def _get_engine_order(self) -> List[str]:\n \"\"\"Determines the order in which to try search engines.\"\"\"\n preferred = (\n getattr(config.search_config, \"engine\", \"google\").lower()\n if config.search_config\n else \"google\"\n )\n fallbacks = (\n [engine.lower() for engine in config.search_config.fallback_engines]\n if config.search_config\n and hasattr(config.search_config, \"fallback_engines\")\n else []\n )\n\n # Start with preferred engine, then fallbacks, then remaining engines\n engine_order = [preferred] if preferred in self._search_engine else []\n engine_order.extend(\n [\n fb\n for fb in fallbacks\n if fb in self._search_engine and fb not in engine_order\n ]\n )\n engine_order.extend([e for e in self._search_engine if e not in engine_order])\n\n return engine_order\n\n @retry(\n stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10)\n )\n async def _perform_search_with_engine(\n self,\n engine: WebSearchEngine,\n query: str,\n num_results: int,\n search_params: Dict[str, Any],\n ) -> List[SearchItem]:\n \"\"\"Execute search with the given engine and parameters.\"\"\"\n return await asyncio.get_event_loop().run_in_executor(\n None,\n lambda: list(\n engine.perform_search(\n query,\n num_results=num_results,\n lang=search_params.get(\"lang\"),\n country=search_params.get(\"country\"),\n )\n ),\n )\n\n\nif __name__ == \"__main__\":\n web_search = WebSearch()\n search_response = asyncio.run(\n web_search.execute(\n query=\"Python programming\", fetch_content=True, num_results=1\n )\n )\n print(search_response.to_tool_result())\n"], ["/OpenManus/app/mcp/server.py", "import logging\nimport sys\n\n\nlogging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])\n\nimport argparse\nimport asyncio\nimport atexit\nimport json\nfrom inspect import Parameter, Signature\nfrom typing import Any, Dict, Optional\n\nfrom mcp.server.fastmcp import FastMCP\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\n\n\nclass MCPServer:\n \"\"\"MCP Server implementation with tool registration and management.\"\"\"\n\n def __init__(self, name: str = \"openmanus\"):\n self.server = FastMCP(name)\n self.tools: Dict[str, BaseTool] = {}\n\n # Initialize standard tools\n self.tools[\"bash\"] = Bash()\n self.tools[\"browser\"] = BrowserUseTool()\n self.tools[\"editor\"] = StrReplaceEditor()\n self.tools[\"terminate\"] = Terminate()\n\n def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:\n \"\"\"Register a tool with parameter validation and documentation.\"\"\"\n tool_name = method_name or tool.name\n tool_param = tool.to_param()\n tool_function = tool_param[\"function\"]\n\n # Define the async function to be registered\n async def tool_method(**kwargs):\n logger.info(f\"Executing {tool_name}: {kwargs}\")\n result = await tool.execute(**kwargs)\n\n logger.info(f\"Result of {tool_name}: {result}\")\n\n # Handle different types of results (match original logic)\n if hasattr(result, \"model_dump\"):\n return json.dumps(result.model_dump())\n elif isinstance(result, dict):\n return json.dumps(result)\n return result\n\n # Set method metadata\n tool_method.__name__ = tool_name\n tool_method.__doc__ = self._build_docstring(tool_function)\n tool_method.__signature__ = self._build_signature(tool_function)\n\n # Store parameter schema (important for tools that access it programmatically)\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n tool_method._parameter_schema = {\n param_name: {\n \"description\": param_details.get(\"description\", \"\"),\n \"type\": param_details.get(\"type\", \"any\"),\n \"required\": param_name in required_params,\n }\n for param_name, param_details in param_props.items()\n }\n\n # Register with server\n self.server.tool()(tool_method)\n logger.info(f\"Registered tool: {tool_name}\")\n\n def _build_docstring(self, tool_function: dict) -> str:\n \"\"\"Build a formatted docstring from tool function metadata.\"\"\"\n description = tool_function.get(\"description\", \"\")\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n # Build docstring (match original format)\n docstring = description\n if param_props:\n docstring += \"\\n\\nParameters:\\n\"\n for param_name, param_details in param_props.items():\n required_str = (\n \"(required)\" if param_name in required_params else \"(optional)\"\n )\n param_type = param_details.get(\"type\", \"any\")\n param_desc = param_details.get(\"description\", \"\")\n docstring += (\n f\" {param_name} ({param_type}) {required_str}: {param_desc}\\n\"\n )\n\n return docstring\n\n def _build_signature(self, tool_function: dict) -> Signature:\n \"\"\"Build a function signature from tool function metadata.\"\"\"\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n parameters = []\n\n # Follow original type mapping\n for param_name, param_details in param_props.items():\n param_type = param_details.get(\"type\", \"\")\n default = Parameter.empty if param_name in required_params else None\n\n # Map JSON Schema types to Python types (same as original)\n annotation = Any\n if param_type == \"string\":\n annotation = str\n elif param_type == \"integer\":\n annotation = int\n elif param_type == \"number\":\n annotation = float\n elif param_type == \"boolean\":\n annotation = bool\n elif param_type == \"object\":\n annotation = dict\n elif param_type == \"array\":\n annotation = list\n\n # Create parameter with same structure as original\n param = Parameter(\n name=param_name,\n kind=Parameter.KEYWORD_ONLY,\n default=default,\n annotation=annotation,\n )\n parameters.append(param)\n\n return Signature(parameters=parameters)\n\n async def cleanup(self) -> None:\n \"\"\"Clean up server resources.\"\"\"\n logger.info(\"Cleaning up resources\")\n # Follow original cleanup logic - only clean browser tool\n if \"browser\" in self.tools and hasattr(self.tools[\"browser\"], \"cleanup\"):\n await self.tools[\"browser\"].cleanup()\n\n def register_all_tools(self) -> None:\n \"\"\"Register all tools with the server.\"\"\"\n for tool in self.tools.values():\n self.register_tool(tool)\n\n def run(self, transport: str = \"stdio\") -> None:\n \"\"\"Run the MCP server.\"\"\"\n # Register all tools\n self.register_all_tools()\n\n # Register cleanup function (match original behavior)\n atexit.register(lambda: asyncio.run(self.cleanup()))\n\n # Start server (with same logging as original)\n logger.info(f\"Starting OpenManus server ({transport} mode)\")\n self.server.run(transport=transport)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"OpenManus MCP Server\")\n parser.add_argument(\n \"--transport\",\n choices=[\"stdio\"],\n default=\"stdio\",\n help=\"Communication method: stdio or http (default: stdio)\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/schema.py", "from enum import Enum\nfrom typing import Any, List, Literal, Optional, Union\n\nfrom pydantic import BaseModel, Field\n\n\nclass Role(str, Enum):\n \"\"\"Message role options\"\"\"\n\n SYSTEM = \"system\"\n USER = \"user\"\n ASSISTANT = \"assistant\"\n TOOL = \"tool\"\n\n\nROLE_VALUES = tuple(role.value for role in Role)\nROLE_TYPE = Literal[ROLE_VALUES] # type: ignore\n\n\nclass ToolChoice(str, Enum):\n \"\"\"Tool choice options\"\"\"\n\n NONE = \"none\"\n AUTO = \"auto\"\n REQUIRED = \"required\"\n\n\nTOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)\nTOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore\n\n\nclass AgentState(str, Enum):\n \"\"\"Agent execution states\"\"\"\n\n IDLE = \"IDLE\"\n RUNNING = \"RUNNING\"\n FINISHED = \"FINISHED\"\n ERROR = \"ERROR\"\n\n\nclass Function(BaseModel):\n name: str\n arguments: str\n\n\nclass ToolCall(BaseModel):\n \"\"\"Represents a tool/function call in a message\"\"\"\n\n id: str\n type: str = \"function\"\n function: Function\n\n\nclass Message(BaseModel):\n \"\"\"Represents a chat message in the conversation\"\"\"\n\n role: ROLE_TYPE = Field(...) # type: ignore\n content: Optional[str] = Field(default=None)\n tool_calls: Optional[List[ToolCall]] = Field(default=None)\n name: Optional[str] = Field(default=None)\n tool_call_id: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n\n def __add__(self, other) -> List[\"Message\"]:\n \"\"\"支持 Message + list 或 Message + Message 的操作\"\"\"\n if isinstance(other, list):\n return [self] + other\n elif isinstance(other, Message):\n return [self, other]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'\"\n )\n\n def __radd__(self, other) -> List[\"Message\"]:\n \"\"\"支持 list + Message 的操作\"\"\"\n if isinstance(other, list):\n return other + [self]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'\"\n )\n\n def to_dict(self) -> dict:\n \"\"\"Convert message to dictionary format\"\"\"\n message = {\"role\": self.role}\n if self.content is not None:\n message[\"content\"] = self.content\n if self.tool_calls is not None:\n message[\"tool_calls\"] = [tool_call.dict() for tool_call in self.tool_calls]\n if self.name is not None:\n message[\"name\"] = self.name\n if self.tool_call_id is not None:\n message[\"tool_call_id\"] = self.tool_call_id\n if self.base64_image is not None:\n message[\"base64_image\"] = self.base64_image\n return message\n\n @classmethod\n def user_message(\n cls, content: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a user message\"\"\"\n return cls(role=Role.USER, content=content, base64_image=base64_image)\n\n @classmethod\n def system_message(cls, content: str) -> \"Message\":\n \"\"\"Create a system message\"\"\"\n return cls(role=Role.SYSTEM, content=content)\n\n @classmethod\n def assistant_message(\n cls, content: Optional[str] = None, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create an assistant message\"\"\"\n return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image)\n\n @classmethod\n def tool_message(\n cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a tool message\"\"\"\n return cls(\n role=Role.TOOL,\n content=content,\n name=name,\n tool_call_id=tool_call_id,\n base64_image=base64_image,\n )\n\n @classmethod\n def from_tool_calls(\n cls,\n tool_calls: List[Any],\n content: Union[str, List[str]] = \"\",\n base64_image: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Create ToolCallsMessage from raw tool calls.\n\n Args:\n tool_calls: Raw tool calls from LLM\n content: Optional message content\n base64_image: Optional base64 encoded image\n \"\"\"\n formatted_calls = [\n {\"id\": call.id, \"function\": call.function.model_dump(), \"type\": \"function\"}\n for call in tool_calls\n ]\n return cls(\n role=Role.ASSISTANT,\n content=content,\n tool_calls=formatted_calls,\n base64_image=base64_image,\n **kwargs,\n )\n\n\nclass Memory(BaseModel):\n messages: List[Message] = Field(default_factory=list)\n max_messages: int = Field(default=100)\n\n def add_message(self, message: Message) -> None:\n \"\"\"Add a message to memory\"\"\"\n self.messages.append(message)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def add_messages(self, messages: List[Message]) -> None:\n \"\"\"Add multiple messages to memory\"\"\"\n self.messages.extend(messages)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def clear(self) -> None:\n \"\"\"Clear all messages\"\"\"\n self.messages.clear()\n\n def get_recent_messages(self, n: int) -> List[Message]:\n \"\"\"Get n most recent messages\"\"\"\n return self.messages[-n:]\n\n def to_dict_list(self) -> List[dict]:\n \"\"\"Convert messages to list of dicts\"\"\"\n return [msg.to_dict() for msg in self.messages]\n"], ["/OpenManus/app/tool/mcp.py", "from contextlib import AsyncExitStack\nfrom typing import Dict, List, Optional\n\nfrom mcp import ClientSession, StdioServerParameters\nfrom mcp.client.sse import sse_client\nfrom mcp.client.stdio import stdio_client\nfrom mcp.types import ListToolsResult, TextContent\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.tool_collection import ToolCollection\n\n\nclass MCPClientTool(BaseTool):\n \"\"\"Represents a tool proxy that can be called on the MCP server from the client side.\"\"\"\n\n session: Optional[ClientSession] = None\n server_id: str = \"\" # Add server identifier\n original_name: str = \"\"\n\n async def execute(self, **kwargs) -> ToolResult:\n \"\"\"Execute the tool by making a remote call to the MCP server.\"\"\"\n if not self.session:\n return ToolResult(error=\"Not connected to MCP server\")\n\n try:\n logger.info(f\"Executing tool: {self.original_name}\")\n result = await self.session.call_tool(self.original_name, kwargs)\n content_str = \", \".join(\n item.text for item in result.content if isinstance(item, TextContent)\n )\n return ToolResult(output=content_str or \"No output returned.\")\n except Exception as e:\n return ToolResult(error=f\"Error executing tool: {str(e)}\")\n\n\nclass MCPClients(ToolCollection):\n \"\"\"\n A collection of tools that connects to multiple MCP servers and manages available tools through the Model Context Protocol.\n \"\"\"\n\n sessions: Dict[str, ClientSession] = {}\n exit_stacks: Dict[str, AsyncExitStack] = {}\n description: str = \"MCP client tools for server interaction\"\n\n def __init__(self):\n super().__init__() # Initialize with empty tools list\n self.name = \"mcp\" # Keep name for backward compatibility\n\n async def connect_sse(self, server_url: str, server_id: str = \"\") -> None:\n \"\"\"Connect to an MCP server using SSE transport.\"\"\"\n if not server_url:\n raise ValueError(\"Server URL is required.\")\n\n server_id = server_id or server_url\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n streams_context = sse_client(url=server_url)\n streams = await exit_stack.enter_async_context(streams_context)\n session = await exit_stack.enter_async_context(ClientSession(*streams))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def connect_stdio(\n self, command: str, args: List[str], server_id: str = \"\"\n ) -> None:\n \"\"\"Connect to an MCP server using stdio transport.\"\"\"\n if not command:\n raise ValueError(\"Server command is required.\")\n\n server_id = server_id or command\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n server_params = StdioServerParameters(command=command, args=args)\n stdio_transport = await exit_stack.enter_async_context(\n stdio_client(server_params)\n )\n read, write = stdio_transport\n session = await exit_stack.enter_async_context(ClientSession(read, write))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def _initialize_and_list_tools(self, server_id: str) -> None:\n \"\"\"Initialize session and populate tool map.\"\"\"\n session = self.sessions.get(server_id)\n if not session:\n raise RuntimeError(f\"Session not initialized for server {server_id}\")\n\n await session.initialize()\n response = await session.list_tools()\n\n # Create proper tool objects for each server tool\n for tool in response.tools:\n original_name = tool.name\n tool_name = f\"mcp_{server_id}_{original_name}\"\n tool_name = self._sanitize_tool_name(tool_name)\n\n server_tool = MCPClientTool(\n name=tool_name,\n description=tool.description,\n parameters=tool.inputSchema,\n session=session,\n server_id=server_id,\n original_name=original_name,\n )\n self.tool_map[tool_name] = server_tool\n\n # Update tools tuple\n self.tools = tuple(self.tool_map.values())\n logger.info(\n f\"Connected to server {server_id} with tools: {[tool.name for tool in response.tools]}\"\n )\n\n def _sanitize_tool_name(self, name: str) -> str:\n \"\"\"Sanitize tool name to match MCPClientTool requirements.\"\"\"\n import re\n\n # Replace invalid characters with underscores\n sanitized = re.sub(r\"[^a-zA-Z0-9_-]\", \"_\", name)\n\n # Remove consecutive underscores\n sanitized = re.sub(r\"_+\", \"_\", sanitized)\n\n # Remove leading/trailing underscores\n sanitized = sanitized.strip(\"_\")\n\n # Truncate to 64 characters if needed\n if len(sanitized) > 64:\n sanitized = sanitized[:64]\n\n return sanitized\n\n async def list_tools(self) -> ListToolsResult:\n \"\"\"List all available tools.\"\"\"\n tools_result = ListToolsResult(tools=[])\n for session in self.sessions.values():\n response = await session.list_tools()\n tools_result.tools += response.tools\n return tools_result\n\n async def disconnect(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from a specific MCP server or all servers if no server_id provided.\"\"\"\n if server_id:\n if server_id in self.sessions:\n try:\n exit_stack = self.exit_stacks.get(server_id)\n\n # Close the exit stack which will handle session cleanup\n if exit_stack:\n try:\n await exit_stack.aclose()\n except RuntimeError as e:\n if \"cancel scope\" in str(e).lower():\n logger.warning(\n f\"Cancel scope error during disconnect from {server_id}, continuing with cleanup: {e}\"\n )\n else:\n raise\n\n # Clean up references\n self.sessions.pop(server_id, None)\n self.exit_stacks.pop(server_id, None)\n\n # Remove tools associated with this server\n self.tool_map = {\n k: v\n for k, v in self.tool_map.items()\n if v.server_id != server_id\n }\n self.tools = tuple(self.tool_map.values())\n logger.info(f\"Disconnected from MCP server {server_id}\")\n except Exception as e:\n logger.error(f\"Error disconnecting from server {server_id}: {e}\")\n else:\n # Disconnect from all servers in a deterministic order\n for sid in sorted(list(self.sessions.keys())):\n await self.disconnect(sid)\n self.tool_map = {}\n self.tools = tuple()\n logger.info(\"Disconnected from all MCP servers\")\n"], ["/OpenManus/app/tool/chart_visualization/data_visualization.py", "import asyncio\nimport json\nimport os\nfrom typing import Any, Hashable\n\nimport pandas as pd\nfrom pydantic import Field, model_validator\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\n\n\nclass DataVisualization(BaseTool):\n name: str = \"data_visualization\"\n description: str = \"\"\"Visualize statistical chart or Add insights in chart with JSON info from visualization_preparation tool. You can do steps as follows:\n1. Visualize statistical chart\n2. Choose insights into chart based on step 1 (Optional)\nOutputs:\n1. Charts (png/html)\n2. Charts Insights (.md)(Optional)\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"json_path\": {\n \"type\": \"string\",\n \"description\": \"\"\"file path of json info with \".json\" in the end\"\"\",\n },\n \"output_type\": {\n \"description\": \"Rendering format (html=interactive)\",\n \"type\": \"string\",\n \"default\": \"html\",\n \"enum\": [\"png\", \"html\"],\n },\n \"tool_type\": {\n \"description\": \"visualize chart or add insights\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"language\": {\n \"description\": \"english(en) / chinese(zh)\",\n \"type\": \"string\",\n \"default\": \"en\",\n \"enum\": [\"zh\", \"en\"],\n },\n },\n \"required\": [\"code\"],\n }\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n\n @model_validator(mode=\"after\")\n def initialize_llm(self):\n \"\"\"Initialize llm with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n return self\n\n def get_file_path(\n self,\n json_info: list[dict[str, str]],\n path_str: str,\n directory: str = None,\n ) -> list[str]:\n res = []\n for item in json_info:\n if os.path.exists(item[path_str]):\n res.append(item[path_str])\n elif os.path.exists(\n os.path.join(f\"{directory or config.workspace_root}\", item[path_str])\n ):\n res.append(\n os.path.join(\n f\"{directory or config.workspace_root}\", item[path_str]\n )\n )\n else:\n raise Exception(f\"No such file or directory: {item[path_str]}\")\n return res\n\n def success_output_template(self, result: list[dict[str, str]]) -> str:\n content = \"\"\n if len(result) == 0:\n return \"Is EMPTY!\"\n for item in result:\n content += f\"\"\"## {item['title']}\\nChart saved in: {item['chart_path']}\"\"\"\n if \"insight_path\" in item and item[\"insight_path\"] and \"insight_md\" in item:\n content += \"\\n\" + item[\"insight_md\"]\n else:\n content += \"\\n\"\n return f\"Chart Generated Successful!\\n{content}\"\n\n async def data_visualization(\n self, json_info: list[dict[str, str]], output_type: str, language: str\n ) -> str:\n data_list = []\n csv_file_path = self.get_file_path(json_info, \"csvFilePath\")\n for index, item in enumerate(json_info):\n df = pd.read_csv(csv_file_path[index], encoding=\"utf-8\")\n df = df.astype(object)\n df = df.where(pd.notnull(df), None)\n data_dict_list = df.to_json(orient=\"records\", force_ascii=False)\n\n data_list.append(\n {\n \"file_name\": os.path.basename(csv_file_path[index]).replace(\n \".csv\", \"\"\n ),\n \"dict_data\": data_dict_list,\n \"chartTitle\": item[\"chartTitle\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n dict_data=item[\"dict_data\"],\n chart_description=item[\"chartTitle\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"visualization\",\n language=language,\n )\n for item in data_list\n ]\n\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n csv_path = csv_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {csv_path}: {result['error']}\")\n else:\n success_list.append(\n {\n **result,\n \"title\": json_info[index][\"chartTitle\"],\n }\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error chart generated{'\\n'.join(error_list)}\\n{self.success_output_template(success_list)}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{self.success_output_template(success_list)}\"}\n\n async def add_insighs(\n self, json_info: list[dict[str, str]], output_type: str\n ) -> str:\n data_list = []\n chart_file_path = self.get_file_path(\n json_info, \"chartPath\", os.path.join(config.workspace_root, \"visualization\")\n )\n for index, item in enumerate(json_info):\n if \"insights_id\" in item:\n data_list.append(\n {\n \"file_name\": os.path.basename(chart_file_path[index]).replace(\n f\".{output_type}\", \"\"\n ),\n \"insights_id\": item[\"insights_id\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n insights_id=item[\"insights_id\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"insight\",\n )\n for item in data_list\n ]\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n chart_path = chart_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {chart_path}: {result['error']}\")\n else:\n success_list.append(chart_path)\n success_template = (\n f\"# Charts Update with Insights\\n{','.join(success_list)}\"\n if len(success_list) > 0\n else \"\"\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error in chart insights:{'\\n'.join(error_list)}\\n{success_template}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{success_template}\"}\n\n async def execute(\n self,\n json_path: str,\n output_type: str | None = \"html\",\n tool_type: str | None = \"visualization\",\n language: str | None = \"en\",\n ) -> str:\n try:\n logger.info(f\"📈 data_visualization with {json_path} in: {tool_type} \")\n with open(json_path, \"r\", encoding=\"utf-8\") as file:\n json_info = json.load(file)\n if tool_type == \"visualization\":\n return await self.data_visualization(json_info, output_type, language)\n else:\n return await self.add_insighs(json_info, output_type)\n except Exception as e:\n return {\n \"observation\": f\"Error: {e}\",\n \"success\": False,\n }\n\n async def invoke_vmind(\n self,\n file_name: str,\n output_type: str,\n task_type: str,\n insights_id: list[str] = None,\n dict_data: list[dict[Hashable, Any]] = None,\n chart_description: str = None,\n language: str = \"en\",\n ):\n llm_config = {\n \"base_url\": self.llm.base_url,\n \"model\": self.llm.model,\n \"api_key\": self.llm.api_key,\n }\n vmind_params = {\n \"llm_config\": llm_config,\n \"user_prompt\": chart_description,\n \"dataset\": dict_data,\n \"file_name\": file_name,\n \"output_type\": output_type,\n \"insights_id\": insights_id,\n \"task_type\": task_type,\n \"directory\": str(config.workspace_root),\n \"language\": language,\n }\n # build async sub process\n process = await asyncio.create_subprocess_exec(\n \"npx\",\n \"ts-node\",\n \"src/chartVisualize.ts\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=os.path.dirname(__file__),\n )\n input_json = json.dumps(vmind_params, ensure_ascii=False).encode(\"utf-8\")\n try:\n stdout, stderr = await process.communicate(input_json)\n stdout_str = stdout.decode(\"utf-8\")\n stderr_str = stderr.decode(\"utf-8\")\n if process.returncode == 0:\n return json.loads(stdout_str)\n else:\n return {\"error\": f\"Node.js Error: {stderr_str}\"}\n except Exception as e:\n return {\"error\": f\"Subprocess Error: {str(e)}\"}\n"], ["/OpenManus/app/bedrock.py", "import json\nimport sys\nimport time\nimport uuid\nfrom datetime import datetime\nfrom typing import Dict, List, Literal, Optional\n\nimport boto3\n\n\n# Global variables to track the current tool use ID across function calls\n# Tmp solution\nCURRENT_TOOLUSE_ID = None\n\n\n# Class to handle OpenAI-style response formatting\nclass OpenAIResponse:\n def __init__(self, data):\n # Recursively convert nested dicts and lists to OpenAIResponse objects\n for key, value in data.items():\n if isinstance(value, dict):\n value = OpenAIResponse(value)\n elif isinstance(value, list):\n value = [\n OpenAIResponse(item) if isinstance(item, dict) else item\n for item in value\n ]\n setattr(self, key, value)\n\n def model_dump(self, *args, **kwargs):\n # Convert object to dict and add timestamp\n data = self.__dict__\n data[\"created_at\"] = datetime.now().isoformat()\n return data\n\n\n# Main client class for interacting with Amazon Bedrock\nclass BedrockClient:\n def __init__(self):\n # Initialize Bedrock client, you need to configure AWS env first\n try:\n self.client = boto3.client(\"bedrock-runtime\")\n self.chat = Chat(self.client)\n except Exception as e:\n print(f\"Error initializing Bedrock client: {e}\")\n sys.exit(1)\n\n\n# Chat interface class\nclass Chat:\n def __init__(self, client):\n self.completions = ChatCompletions(client)\n\n\n# Core class handling chat completions functionality\nclass ChatCompletions:\n def __init__(self, client):\n self.client = client\n\n def _convert_openai_tools_to_bedrock_format(self, tools):\n # Convert OpenAI function calling format to Bedrock tool format\n bedrock_tools = []\n for tool in tools:\n if tool.get(\"type\") == \"function\":\n function = tool.get(\"function\", {})\n bedrock_tool = {\n \"toolSpec\": {\n \"name\": function.get(\"name\", \"\"),\n \"description\": function.get(\"description\", \"\"),\n \"inputSchema\": {\n \"json\": {\n \"type\": \"object\",\n \"properties\": function.get(\"parameters\", {}).get(\n \"properties\", {}\n ),\n \"required\": function.get(\"parameters\", {}).get(\n \"required\", []\n ),\n }\n },\n }\n }\n bedrock_tools.append(bedrock_tool)\n return bedrock_tools\n\n def _convert_openai_messages_to_bedrock_format(self, messages):\n # Convert OpenAI message format to Bedrock message format\n bedrock_messages = []\n system_prompt = []\n for message in messages:\n if message.get(\"role\") == \"system\":\n system_prompt = [{\"text\": message.get(\"content\")}]\n elif message.get(\"role\") == \"user\":\n bedrock_message = {\n \"role\": message.get(\"role\", \"user\"),\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"assistant\":\n bedrock_message = {\n \"role\": \"assistant\",\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n openai_tool_calls = message.get(\"tool_calls\", [])\n if openai_tool_calls:\n bedrock_tool_use = {\n \"toolUseId\": openai_tool_calls[0][\"id\"],\n \"name\": openai_tool_calls[0][\"function\"][\"name\"],\n \"input\": json.loads(\n openai_tool_calls[0][\"function\"][\"arguments\"]\n ),\n }\n bedrock_message[\"content\"].append({\"toolUse\": bedrock_tool_use})\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = openai_tool_calls[0][\"id\"]\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"tool\":\n bedrock_message = {\n \"role\": \"user\",\n \"content\": [\n {\n \"toolResult\": {\n \"toolUseId\": CURRENT_TOOLUSE_ID,\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n }\n ],\n }\n bedrock_messages.append(bedrock_message)\n else:\n raise ValueError(f\"Invalid role: {message.get('role')}\")\n return system_prompt, bedrock_messages\n\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n # Convert Bedrock response format to OpenAI format\n content = \"\"\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n content_array = bedrock_response[\"output\"][\"message\"][\"content\"]\n content = \"\".join(item.get(\"text\", \"\") for item in content_array)\n if content == \"\":\n content = \".\"\n\n # Handle tool calls in response\n openai_tool_calls = []\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n for content_item in bedrock_response[\"output\"][\"message\"][\"content\"]:\n if content_item.get(\"toolUse\"):\n bedrock_tool_use = content_item[\"toolUse\"]\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n openai_tool_call = {\n \"id\": CURRENT_TOOLUSE_ID,\n \"type\": \"function\",\n \"function\": {\n \"name\": bedrock_tool_use[\"name\"],\n \"arguments\": json.dumps(bedrock_tool_use[\"input\"]),\n },\n }\n openai_tool_calls.append(openai_tool_call)\n\n # Construct final OpenAI format response\n openai_format = {\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"created\": int(time.time()),\n \"object\": \"chat.completion\",\n \"system_fingerprint\": None,\n \"choices\": [\n {\n \"finish_reason\": bedrock_response.get(\"stopReason\", \"end_turn\"),\n \"index\": 0,\n \"message\": {\n \"content\": content,\n \"role\": bedrock_response.get(\"output\", {})\n .get(\"message\", {})\n .get(\"role\", \"assistant\"),\n \"tool_calls\": openai_tool_calls\n if openai_tool_calls != []\n else None,\n \"function_call\": None,\n },\n }\n ],\n \"usage\": {\n \"completion_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"outputTokens\", 0\n ),\n \"prompt_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"inputTokens\", 0\n ),\n \"total_tokens\": bedrock_response.get(\"usage\", {}).get(\"totalTokens\", 0),\n },\n }\n return OpenAIResponse(openai_format)\n\n async def _invoke_bedrock(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Non-streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n openai_response = self._convert_bedrock_response_to_openai_format(response)\n return openai_response\n\n async def _invoke_bedrock_stream(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse_stream(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n\n # Initialize response structure\n bedrock_response = {\n \"output\": {\"message\": {\"role\": \"\", \"content\": []}},\n \"stopReason\": \"\",\n \"usage\": {},\n \"metrics\": {},\n }\n bedrock_response_text = \"\"\n bedrock_response_tool_input = \"\"\n\n # Process streaming response\n stream = response.get(\"stream\")\n if stream:\n for event in stream:\n if event.get(\"messageStart\", {}).get(\"role\"):\n bedrock_response[\"output\"][\"message\"][\"role\"] = event[\n \"messageStart\"\n ][\"role\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"text\"):\n bedrock_response_text += event[\"contentBlockDelta\"][\"delta\"][\"text\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"text\"], end=\"\", flush=True\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 0:\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"text\": bedrock_response_text}\n )\n if event.get(\"contentBlockStart\", {}).get(\"start\", {}).get(\"toolUse\"):\n bedrock_tool_use = event[\"contentBlockStart\"][\"start\"][\"toolUse\"]\n tool_use = {\n \"toolUseId\": bedrock_tool_use[\"toolUseId\"],\n \"name\": bedrock_tool_use[\"name\"],\n }\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"toolUse\": tool_use}\n )\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"toolUse\"):\n bedrock_response_tool_input += event[\"contentBlockDelta\"][\"delta\"][\n \"toolUse\"\n ][\"input\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"toolUse\"][\"input\"],\n end=\"\",\n flush=True,\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 1:\n bedrock_response[\"output\"][\"message\"][\"content\"][1][\"toolUse\"][\n \"input\"\n ] = json.loads(bedrock_response_tool_input)\n print()\n openai_response = self._convert_bedrock_response_to_openai_format(\n bedrock_response\n )\n return openai_response\n\n def create(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n stream: Optional[bool] = True,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Main entry point for chat completion\n bedrock_tools = []\n if tools is not None:\n bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)\n if stream:\n return self._invoke_bedrock_stream(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n else:\n return self._invoke_bedrock(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n"], ["/OpenManus/app/sandbox/core/manager.py", "import asyncio\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom typing import Dict, Optional, Set\n\nimport docker\nfrom docker.errors import APIError, ImageNotFound\n\nfrom app.config import SandboxSettings\nfrom app.logger import logger\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxManager:\n \"\"\"Docker sandbox manager.\n\n Manages multiple DockerSandbox instances lifecycle including creation,\n monitoring, and cleanup. Provides concurrent access control and automatic\n cleanup mechanisms for sandbox resources.\n\n Attributes:\n max_sandboxes: Maximum allowed number of sandboxes.\n idle_timeout: Sandbox idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n _sandboxes: Active sandbox instance mapping.\n _last_used: Last used time record for sandboxes.\n \"\"\"\n\n def __init__(\n self,\n max_sandboxes: int = 100,\n idle_timeout: int = 3600,\n cleanup_interval: int = 300,\n ):\n \"\"\"Initializes sandbox manager.\n\n Args:\n max_sandboxes: Maximum sandbox count limit.\n idle_timeout: Idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n \"\"\"\n self.max_sandboxes = max_sandboxes\n self.idle_timeout = idle_timeout\n self.cleanup_interval = cleanup_interval\n\n # Docker client\n self._client = docker.from_env()\n\n # Resource mappings\n self._sandboxes: Dict[str, DockerSandbox] = {}\n self._last_used: Dict[str, float] = {}\n\n # Concurrency control\n self._locks: Dict[str, asyncio.Lock] = {}\n self._global_lock = asyncio.Lock()\n self._active_operations: Set[str] = set()\n\n # Cleanup task\n self._cleanup_task: Optional[asyncio.Task] = None\n self._is_shutting_down = False\n\n # Start automatic cleanup\n self.start_cleanup_task()\n\n async def ensure_image(self, image: str) -> bool:\n \"\"\"Ensures Docker image is available.\n\n Args:\n image: Image name.\n\n Returns:\n bool: Whether image is available.\n \"\"\"\n try:\n self._client.images.get(image)\n return True\n except ImageNotFound:\n try:\n logger.info(f\"Pulling image {image}...\")\n await asyncio.get_event_loop().run_in_executor(\n None, self._client.images.pull, image\n )\n return True\n except (APIError, Exception) as e:\n logger.error(f\"Failed to pull image {image}: {e}\")\n return False\n\n @asynccontextmanager\n async def sandbox_operation(self, sandbox_id: str):\n \"\"\"Context manager for sandbox operations.\n\n Provides concurrency control and usage time updates.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Raises:\n KeyError: If sandbox not found.\n \"\"\"\n if sandbox_id not in self._locks:\n self._locks[sandbox_id] = asyncio.Lock()\n\n async with self._locks[sandbox_id]:\n if sandbox_id not in self._sandboxes:\n raise KeyError(f\"Sandbox {sandbox_id} not found\")\n\n self._active_operations.add(sandbox_id)\n try:\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n yield self._sandboxes[sandbox_id]\n finally:\n self._active_operations.remove(sandbox_id)\n\n async def create_sandbox(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> str:\n \"\"\"Creates a new sandbox instance.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n\n Returns:\n str: Sandbox ID.\n\n Raises:\n RuntimeError: If max sandbox count reached or creation fails.\n \"\"\"\n async with self._global_lock:\n if len(self._sandboxes) >= self.max_sandboxes:\n raise RuntimeError(\n f\"Maximum number of sandboxes ({self.max_sandboxes}) reached\"\n )\n\n config = config or SandboxSettings()\n if not await self.ensure_image(config.image):\n raise RuntimeError(f\"Failed to ensure Docker image: {config.image}\")\n\n sandbox_id = str(uuid.uuid4())\n try:\n sandbox = DockerSandbox(config, volume_bindings)\n await sandbox.create()\n\n self._sandboxes[sandbox_id] = sandbox\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n self._locks[sandbox_id] = asyncio.Lock()\n\n logger.info(f\"Created sandbox {sandbox_id}\")\n return sandbox_id\n\n except Exception as e:\n logger.error(f\"Failed to create sandbox: {e}\")\n if sandbox_id in self._sandboxes:\n await self.delete_sandbox(sandbox_id)\n raise RuntimeError(f\"Failed to create sandbox: {e}\")\n\n async def get_sandbox(self, sandbox_id: str) -> DockerSandbox:\n \"\"\"Gets a sandbox instance.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Returns:\n DockerSandbox: Sandbox instance.\n\n Raises:\n KeyError: If sandbox does not exist.\n \"\"\"\n async with self.sandbox_operation(sandbox_id) as sandbox:\n return sandbox\n\n def start_cleanup_task(self) -> None:\n \"\"\"Starts automatic cleanup task.\"\"\"\n\n async def cleanup_loop():\n while not self._is_shutting_down:\n try:\n await self._cleanup_idle_sandboxes()\n except Exception as e:\n logger.error(f\"Error in cleanup loop: {e}\")\n await asyncio.sleep(self.cleanup_interval)\n\n self._cleanup_task = asyncio.create_task(cleanup_loop())\n\n async def _cleanup_idle_sandboxes(self) -> None:\n \"\"\"Cleans up idle sandboxes.\"\"\"\n current_time = asyncio.get_event_loop().time()\n to_cleanup = []\n\n async with self._global_lock:\n for sandbox_id, last_used in self._last_used.items():\n if (\n sandbox_id not in self._active_operations\n and current_time - last_used > self.idle_timeout\n ):\n to_cleanup.append(sandbox_id)\n\n for sandbox_id in to_cleanup:\n try:\n await self.delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Error cleaning up sandbox {sandbox_id}: {e}\")\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up all resources.\"\"\"\n logger.info(\"Starting manager cleanup...\")\n self._is_shutting_down = True\n\n # Cancel cleanup task\n if self._cleanup_task:\n self._cleanup_task.cancel()\n try:\n await asyncio.wait_for(self._cleanup_task, timeout=1.0)\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n\n # Get all sandbox IDs to clean up\n async with self._global_lock:\n sandbox_ids = list(self._sandboxes.keys())\n\n # Concurrently clean up all sandboxes\n cleanup_tasks = []\n for sandbox_id in sandbox_ids:\n task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id))\n cleanup_tasks.append(task)\n\n if cleanup_tasks:\n # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting\n try:\n await asyncio.wait(cleanup_tasks, timeout=30.0)\n except asyncio.TimeoutError:\n logger.error(\"Sandbox cleanup timed out\")\n\n # Clean up remaining references\n self._sandboxes.clear()\n self._last_used.clear()\n self._locks.clear()\n self._active_operations.clear()\n\n logger.info(\"Manager cleanup completed\")\n\n async def _safe_delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Safely deletes a single sandbox.\n\n Args:\n sandbox_id: Sandbox ID to delete.\n \"\"\"\n try:\n if sandbox_id in self._active_operations:\n logger.warning(\n f\"Sandbox {sandbox_id} has active operations, waiting for completion\"\n )\n for _ in range(10): # Wait at most 10 times\n await asyncio.sleep(0.5)\n if sandbox_id not in self._active_operations:\n break\n else:\n logger.warning(\n f\"Timeout waiting for sandbox {sandbox_id} operations to complete\"\n )\n\n # Get reference to sandbox object\n sandbox = self._sandboxes.get(sandbox_id)\n if sandbox:\n await sandbox.cleanup()\n\n # Remove sandbox record from manager\n async with self._global_lock:\n self._sandboxes.pop(sandbox_id, None)\n self._last_used.pop(sandbox_id, None)\n self._locks.pop(sandbox_id, None)\n logger.info(f\"Deleted sandbox {sandbox_id}\")\n except Exception as e:\n logger.error(f\"Error during cleanup of sandbox {sandbox_id}: {e}\")\n\n async def delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Deletes specified sandbox.\n\n Args:\n sandbox_id: Sandbox ID.\n \"\"\"\n if sandbox_id not in self._sandboxes:\n return\n\n try:\n await self._safe_delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Failed to delete sandbox {sandbox_id}: {e}\")\n\n async def __aenter__(self) -> \"SandboxManager\":\n \"\"\"Async context manager entry.\"\"\"\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n\n def get_stats(self) -> Dict:\n \"\"\"Gets manager statistics.\n\n Returns:\n Dict: Statistics information.\n \"\"\"\n return {\n \"total_sandboxes\": len(self._sandboxes),\n \"active_operations\": len(self._active_operations),\n \"max_sandboxes\": self.max_sandboxes,\n \"idle_timeout\": self.idle_timeout,\n \"cleanup_interval\": self.cleanup_interval,\n \"is_shutting_down\": self._is_shutting_down,\n }\n"], ["/OpenManus/app/sandbox/core/terminal.py", "\"\"\"\nAsynchronous Docker Terminal\n\nThis module provides asynchronous terminal functionality for Docker containers,\nallowing interactive command execution with timeout control.\n\"\"\"\n\nimport asyncio\nimport re\nimport socket\nfrom typing import Dict, Optional, Tuple, Union\n\nimport docker\nfrom docker import APIClient\nfrom docker.errors import APIError\nfrom docker.models.containers import Container\n\n\nclass DockerSession:\n def __init__(self, container_id: str) -> None:\n \"\"\"Initializes a Docker session.\n\n Args:\n container_id: ID of the Docker container.\n \"\"\"\n self.api = APIClient()\n self.container_id = container_id\n self.exec_id = None\n self.socket = None\n\n async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None:\n \"\"\"Creates an interactive session with the container.\n\n Args:\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n\n Raises:\n RuntimeError: If socket connection fails.\n \"\"\"\n startup_command = [\n \"bash\",\n \"-c\",\n f\"cd {working_dir} && \"\n \"PROMPT_COMMAND='' \"\n \"PS1='$ ' \"\n \"exec bash --norc --noprofile\",\n ]\n\n exec_data = self.api.exec_create(\n self.container_id,\n startup_command,\n stdin=True,\n tty=True,\n stdout=True,\n stderr=True,\n privileged=True,\n user=\"root\",\n environment={**env_vars, \"TERM\": \"dumb\", \"PS1\": \"$ \", \"PROMPT_COMMAND\": \"\"},\n )\n self.exec_id = exec_data[\"Id\"]\n\n socket_data = self.api.exec_start(\n self.exec_id, socket=True, tty=True, stream=True, demux=True\n )\n\n if hasattr(socket_data, \"_sock\"):\n self.socket = socket_data._sock\n self.socket.setblocking(False)\n else:\n raise RuntimeError(\"Failed to get socket connection\")\n\n await self._read_until_prompt()\n\n async def close(self) -> None:\n \"\"\"Cleans up session resources.\n\n 1. Sends exit command\n 2. Closes socket connection\n 3. Checks and cleans up exec instance\n \"\"\"\n try:\n if self.socket:\n # Send exit command to close bash session\n try:\n self.socket.sendall(b\"exit\\n\")\n # Allow time for command execution\n await asyncio.sleep(0.1)\n except:\n pass # Ignore sending errors, continue cleanup\n\n # Close socket connection\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except:\n pass # Some platforms may not support shutdown\n\n self.socket.close()\n self.socket = None\n\n if self.exec_id:\n try:\n # Check exec instance status\n exec_inspect = self.api.exec_inspect(self.exec_id)\n if exec_inspect.get(\"Running\", False):\n # If still running, wait for it to complete\n await asyncio.sleep(0.5)\n except:\n pass # Ignore inspection errors, continue cleanup\n\n self.exec_id = None\n\n except Exception as e:\n # Log error but don't raise, ensure cleanup continues\n print(f\"Warning: Error during session cleanup: {e}\")\n\n async def _read_until_prompt(self) -> str:\n \"\"\"Reads output until prompt is found.\n\n Returns:\n String containing output up to the prompt.\n\n Raises:\n socket.error: If socket communication fails.\n \"\"\"\n buffer = b\"\"\n while b\"$ \" not in buffer:\n try:\n chunk = self.socket.recv(4096)\n if chunk:\n buffer += chunk\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n return buffer.decode(\"utf-8\")\n\n async def execute(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes a command and returns cleaned output.\n\n Args:\n command: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string with prompt markers removed.\n\n Raises:\n RuntimeError: If session not initialized or execution fails.\n TimeoutError: If command execution exceeds timeout.\n \"\"\"\n if not self.socket:\n raise RuntimeError(\"Session not initialized\")\n\n try:\n # Sanitize command to prevent shell injection\n sanitized_command = self._sanitize_command(command)\n full_command = f\"{sanitized_command}\\necho $?\\n\"\n self.socket.sendall(full_command.encode())\n\n async def read_output() -> str:\n buffer = b\"\"\n result_lines = []\n command_sent = False\n\n while True:\n try:\n chunk = self.socket.recv(4096)\n if not chunk:\n break\n\n buffer += chunk\n lines = buffer.split(b\"\\n\")\n\n buffer = lines[-1]\n lines = lines[:-1]\n\n for line in lines:\n line = line.rstrip(b\"\\r\")\n\n if not command_sent:\n command_sent = True\n continue\n\n if line.strip() == b\"echo $?\" or line.strip().isdigit():\n continue\n\n if line.strip():\n result_lines.append(line)\n\n if buffer.endswith(b\"$ \"):\n break\n\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n\n output = b\"\\n\".join(result_lines).decode(\"utf-8\")\n output = re.sub(r\"\\n\\$ echo \\$\\$?.*$\", \"\", output)\n\n return output\n\n if timeout:\n result = await asyncio.wait_for(read_output(), timeout)\n else:\n result = await read_output()\n\n return result.strip()\n\n except asyncio.TimeoutError:\n raise TimeoutError(f\"Command execution timed out after {timeout} seconds\")\n except Exception as e:\n raise RuntimeError(f\"Failed to execute command: {e}\")\n\n def _sanitize_command(self, command: str) -> str:\n \"\"\"Sanitizes the command string to prevent shell injection.\n\n Args:\n command: Raw command string.\n\n Returns:\n Sanitized command string.\n\n Raises:\n ValueError: If command contains potentially dangerous patterns.\n \"\"\"\n\n # Additional checks for specific risky commands\n risky_commands = [\n \"rm -rf /\",\n \"rm -rf /*\",\n \"mkfs\",\n \"dd if=/dev/zero\",\n \":(){:|:&};:\",\n \"chmod -R 777 /\",\n \"chown -R\",\n ]\n\n for risky in risky_commands:\n if risky in command.lower():\n raise ValueError(\n f\"Command contains potentially dangerous operation: {risky}\"\n )\n\n return command\n\n\nclass AsyncDockerizedTerminal:\n def __init__(\n self,\n container: Union[str, Container],\n working_dir: str = \"/workspace\",\n env_vars: Optional[Dict[str, str]] = None,\n default_timeout: int = 60,\n ) -> None:\n \"\"\"Initializes an asynchronous terminal for Docker containers.\n\n Args:\n container: Docker container ID or Container object.\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n default_timeout: Default command execution timeout in seconds.\n \"\"\"\n self.client = docker.from_env()\n self.container = (\n container\n if isinstance(container, Container)\n else self.client.containers.get(container)\n )\n self.working_dir = working_dir\n self.env_vars = env_vars or {}\n self.default_timeout = default_timeout\n self.session = None\n\n async def init(self) -> None:\n \"\"\"Initializes the terminal environment.\n\n Ensures working directory exists and creates an interactive session.\n\n Raises:\n RuntimeError: If initialization fails.\n \"\"\"\n await self._ensure_workdir()\n\n self.session = DockerSession(self.container.id)\n await self.session.create(self.working_dir, self.env_vars)\n\n async def _ensure_workdir(self) -> None:\n \"\"\"Ensures working directory exists in container.\n\n Raises:\n RuntimeError: If directory creation fails.\n \"\"\"\n try:\n await self._exec_simple(f\"mkdir -p {self.working_dir}\")\n except APIError as e:\n raise RuntimeError(f\"Failed to create working directory: {e}\")\n\n async def _exec_simple(self, cmd: str) -> Tuple[int, str]:\n \"\"\"Executes a simple command using Docker's exec_run.\n\n Args:\n cmd: Command to execute.\n\n Returns:\n Tuple of (exit_code, output).\n \"\"\"\n result = await asyncio.to_thread(\n self.container.exec_run, cmd, environment=self.env_vars\n )\n return result.exit_code, result.output.decode(\"utf-8\")\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the container with timeout.\n\n Args:\n cmd: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If terminal not initialized.\n \"\"\"\n if not self.session:\n raise RuntimeError(\"Terminal not initialized\")\n\n return await self.session.execute(cmd, timeout=timeout or self.default_timeout)\n\n async def close(self) -> None:\n \"\"\"Closes the terminal session.\"\"\"\n if self.session:\n await self.session.close()\n\n async def __aenter__(self) -> \"AsyncDockerizedTerminal\":\n \"\"\"Async context manager entry.\"\"\"\n await self.init()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.close()\n"], ["/OpenManus/run_mcp.py", "#!/usr/bin/env python\nimport argparse\nimport asyncio\nimport sys\n\nfrom app.agent.mcp import MCPAgent\nfrom app.config import config\nfrom app.logger import logger\n\n\nclass MCPRunner:\n \"\"\"Runner class for MCP Agent with proper path handling and configuration.\"\"\"\n\n def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()\n\n async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n \"\"\"Initialize the MCP agent with the appropriate connection.\"\"\"\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: # sse\n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n\n logger.info(f\"Connected to MCP server via {connection_type}\")\n\n async def run_interactive(self) -> None:\n \"\"\"Run the agent in interactive mode.\"\"\"\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")\n\n async def run_single_prompt(self, prompt: str) -> None:\n \"\"\"Run the agent with a single prompt.\"\"\"\n await self.agent.run(prompt)\n\n async def run_default(self) -> None:\n \"\"\"Run the agent in default mode.\"\"\"\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")\n\n async def cleanup(self) -> None:\n \"\"\"Clean up agent resources.\"\"\"\n await self.agent.cleanup()\n logger.info(\"Session ended\")\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Run the MCP Agent\")\n parser.add_argument(\n \"--connection\",\n \"-c\",\n choices=[\"stdio\", \"sse\"],\n default=\"stdio\",\n help=\"Connection type: stdio or sse\",\n )\n parser.add_argument(\n \"--server-url\",\n default=\"http://127.0.0.1:8000/sse\",\n help=\"URL for SSE connection\",\n )\n parser.add_argument(\n \"--interactive\", \"-i\", action=\"store_true\", help=\"Run in interactive mode\"\n )\n parser.add_argument(\"--prompt\", \"-p\", help=\"Single prompt to execute and exit\")\n return parser.parse_args()\n\n\nasync def run_mcp() -> None:\n \"\"\"Main entry point for the MCP runner.\"\"\"\n args = parse_args()\n runner = MCPRunner()\n\n try:\n await runner.initialize(args.connection, args.server_url)\n\n if args.prompt:\n await runner.run_single_prompt(args.prompt)\n elif args.interactive:\n await runner.run_interactive()\n else:\n await runner.run_default()\n\n except KeyboardInterrupt:\n logger.info(\"Program interrupted by user\")\n except Exception as e:\n logger.error(f\"Error running MCPAgent: {str(e)}\", exc_info=True)\n sys.exit(1)\n finally:\n await runner.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_mcp())\n"], ["/OpenManus/app/tool/base.py", "from abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass BaseTool(ABC, BaseModel):\n name: str\n description: str\n parameters: Optional[dict] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n async def __call__(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n return await self.execute(**kwargs)\n\n @abstractmethod\n async def execute(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n\n def to_param(self) -> Dict:\n \"\"\"Convert tool to function call format.\"\"\"\n return {\n \"type\": \"function\",\n \"function\": {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": self.parameters,\n },\n }\n\n\nclass ToolResult(BaseModel):\n \"\"\"Represents the result of a tool execution.\"\"\"\n\n output: Any = Field(default=None)\n error: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n system: Optional[str] = Field(default=None)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __bool__(self):\n return any(getattr(self, field) for field in self.__fields__)\n\n def __add__(self, other: \"ToolResult\"):\n def combine_fields(\n field: Optional[str], other_field: Optional[str], concatenate: bool = True\n ):\n if field and other_field:\n if concatenate:\n return field + other_field\n raise ValueError(\"Cannot combine tool results\")\n return field or other_field\n\n return ToolResult(\n output=combine_fields(self.output, other.output),\n error=combine_fields(self.error, other.error),\n base64_image=combine_fields(self.base64_image, other.base64_image, False),\n system=combine_fields(self.system, other.system),\n )\n\n def __str__(self):\n return f\"Error: {self.error}\" if self.error else self.output\n\n def replace(self, **kwargs):\n \"\"\"Returns a new ToolResult with the given fields replaced.\"\"\"\n # return self.copy(update=kwargs)\n return type(self)(**{**self.dict(), **kwargs})\n\n\nclass CLIResult(ToolResult):\n \"\"\"A ToolResult that can be rendered as a CLI output.\"\"\"\n\n\nclass ToolFailure(ToolResult):\n \"\"\"A ToolResult that represents a failure.\"\"\"\n"], ["/OpenManus/app/prompt/browser.py", "SYSTEM_PROMPT = \"\"\"\\\nYou are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.\n\n# Input Format\nTask\nPrevious steps\nCurrent URL\nOpen Tabs\nInteractive Elements\n[index]text\n- index: Numeric identifier for interaction\n- type: HTML element type (button, input, etc.)\n- text: Element description\nExample:\n[33]\n\n- Only elements with numeric indexes in [] are interactive\n- elements without [] provide only context\n\n# Response Rules\n1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:\n{{\"current_state\": {{\"evaluation_previous_goal\": \"Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not\",\n\"memory\": \"Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz\",\n\"next_goal\": \"What needs to be done with the next immediate action\"}},\n\"action\":[{{\"one_action_name\": {{// action-specific parameter}}}}, // ... more actions in sequence]}}\n\n2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.\nCommon action sequences:\n- Form filling: [{{\"input_text\": {{\"index\": 1, \"text\": \"username\"}}}}, {{\"input_text\": {{\"index\": 2, \"text\": \"password\"}}}}, {{\"click_element\": {{\"index\": 3}}}}]\n- Navigation and extraction: [{{\"go_to_url\": {{\"url\": \"https://example.com\"}}}}, {{\"extract_content\": {{\"goal\": \"extract the names\"}}}}]\n- Actions are executed in the given order\n- If the page changes after an action, the sequence is interrupted and you get the new state.\n- Only provide the action sequence until an action which changes the page state significantly.\n- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page\n- only use multiple actions if it makes sense.\n\n3. ELEMENT INTERACTION:\n- Only use indexes of the interactive elements\n- Elements marked with \"[]Non-interactive text\" are non-interactive\n\n4. NAVIGATION & ERROR HANDLING:\n- If no suitable elements exist, use other functions to complete the task\n- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.\n- Handle popups/cookies by accepting or closing them\n- Use scroll to find elements you are looking for\n- If you want to research something, open a new tab instead of using the current tab\n- If captcha pops up, try to solve it - else try a different approach\n- If the page is not fully loaded, use wait action\n\n5. TASK COMPLETION:\n- Use the done action as the last action as soon as the ultimate task is complete\n- Dont use \"done\" before you are done with everything the user asked you, except you reach the last step of max_steps.\n- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!\n- If you have to do something repeatedly for example the task says for \"each\", or \"for all\", or \"x times\", count always inside \"memory\" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.\n- Don't hallucinate actions\n- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.\n\n6. VISUAL CONTEXT:\n- When an image is provided, use it to understand the page layout\n- Bounding boxes with labels on their top right corner correspond to element indexes\n\n7. Form filling:\n- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.\n\n8. Long tasks:\n- Keep track of the status and subresults in the memory.\n\n9. Extraction:\n- If your task is to find information - call extract_content on the specific pages to get and store the information.\nYour responses must be always JSON with the specified format.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nWhat should I do next to achieve my goal?\n\nWhen you see [Current state starts here], focus on the following:\n- Current URL and page title{url_placeholder}\n- Available tabs{tabs_placeholder}\n- Interactive elements and their indices\n- Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated)\n- Any action results or errors{results_placeholder}\n\nFor browser interactions:\n- To navigate: browser_use with action=\"go_to_url\", url=\"...\"\n- To click: browser_use with action=\"click_element\", index=N\n- To type: browser_use with action=\"input_text\", index=N, text=\"...\"\n- To extract: browser_use with action=\"extract_content\", goal=\"...\"\n- To scroll: browser_use with action=\"scroll_down\" or \"scroll_up\"\n\nConsider both what's visible and what might be beyond the current viewport.\nBe methodical - remember your progress and what you've learned so far.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/tool/create_chat_completion.py", "from typing import Any, List, Optional, Type, Union, get_args, get_origin\n\nfrom pydantic import BaseModel, Field\n\nfrom app.tool import BaseTool\n\n\nclass CreateChatCompletion(BaseTool):\n name: str = \"create_chat_completion\"\n description: str = (\n \"Creates a structured completion with specified output formatting.\"\n )\n\n # Type mapping for JSON schema\n type_mapping: dict = {\n str: \"string\",\n int: \"integer\",\n float: \"number\",\n bool: \"boolean\",\n dict: \"object\",\n list: \"array\",\n }\n response_type: Optional[Type] = None\n required: List[str] = Field(default_factory=lambda: [\"response\"])\n\n def __init__(self, response_type: Optional[Type] = str):\n \"\"\"Initialize with a specific response type.\"\"\"\n super().__init__()\n self.response_type = response_type\n self.parameters = self._build_parameters()\n\n def _build_parameters(self) -> dict:\n \"\"\"Build parameters schema based on response type.\"\"\"\n if self.response_type == str:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\",\n \"description\": \"The response text that should be delivered to the user.\",\n },\n },\n \"required\": self.required,\n }\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n schema = self.response_type.model_json_schema()\n return {\n \"type\": \"object\",\n \"properties\": schema[\"properties\"],\n \"required\": schema.get(\"required\", self.required),\n }\n\n return self._create_type_schema(self.response_type)\n\n def _create_type_schema(self, type_hint: Type) -> dict:\n \"\"\"Create a JSON schema for the given type.\"\"\"\n origin = get_origin(type_hint)\n args = get_args(type_hint)\n\n # Handle primitive types\n if origin is None:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Response of type {type_hint.__name__}\",\n }\n },\n \"required\": self.required,\n }\n\n # Handle List type\n if origin is list:\n item_type = args[0] if args else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"array\",\n \"items\": self._get_type_info(item_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Dict type\n if origin is dict:\n value_type = args[1] if len(args) > 1 else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"object\",\n \"additionalProperties\": self._get_type_info(value_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Union type\n if origin is Union:\n return self._create_union_schema(args)\n\n return self._build_parameters()\n\n def _get_type_info(self, type_hint: Type) -> dict:\n \"\"\"Get type information for a single type.\"\"\"\n if isinstance(type_hint, type) and issubclass(type_hint, BaseModel):\n return type_hint.model_json_schema()\n\n return {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Value of type {getattr(type_hint, '__name__', 'any')}\",\n }\n\n def _create_union_schema(self, types: tuple) -> dict:\n \"\"\"Create schema for Union types.\"\"\"\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\"anyOf\": [self._get_type_info(t) for t in types]}\n },\n \"required\": self.required,\n }\n\n async def execute(self, required: list | None = None, **kwargs) -> Any:\n \"\"\"Execute the chat completion with type conversion.\n\n Args:\n required: List of required field names or None\n **kwargs: Response data\n\n Returns:\n Converted response based on response_type\n \"\"\"\n required = required or self.required\n\n # Handle case when required is a list\n if isinstance(required, list) and len(required) > 0:\n if len(required) == 1:\n required_field = required[0]\n result = kwargs.get(required_field, \"\")\n else:\n # Return multiple fields as a dictionary\n return {field: kwargs.get(field, \"\") for field in required}\n else:\n required_field = \"response\"\n result = kwargs.get(required_field, \"\")\n\n # Type conversion logic\n if self.response_type == str:\n return result\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n return self.response_type(**kwargs)\n\n if get_origin(self.response_type) in (list, dict):\n return result # Assuming result is already in correct format\n\n try:\n return self.response_type(result)\n except (ValueError, TypeError):\n return result\n"], ["/OpenManus/app/tool/planning.py", "# tool/planning.py\nfrom typing import Dict, List, Literal, Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, ToolResult\n\n\n_PLANNING_TOOL_DESCRIPTION = \"\"\"\nA planning tool that allows the agent to create and manage plans for solving complex tasks.\nThe tool provides functionality for creating plans, updating plan steps, and tracking progress.\n\"\"\"\n\n\nclass PlanningTool(BaseTool):\n \"\"\"\n A planning tool that allows the agent to create and manage plans for solving complex tasks.\n The tool provides functionality for creating plans, updating plan steps, and tracking progress.\n \"\"\"\n\n name: str = \"planning\"\n description: str = _PLANNING_TOOL_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The command to execute. Available commands: create, update, list, get, set_active, mark_step, delete.\",\n \"enum\": [\n \"create\",\n \"update\",\n \"list\",\n \"get\",\n \"set_active\",\n \"mark_step\",\n \"delete\",\n ],\n \"type\": \"string\",\n },\n \"plan_id\": {\n \"description\": \"Unique identifier for the plan. Required for create, update, set_active, and delete commands. Optional for get and mark_step (uses active plan if not specified).\",\n \"type\": \"string\",\n },\n \"title\": {\n \"description\": \"Title for the plan. Required for create command, optional for update command.\",\n \"type\": \"string\",\n },\n \"steps\": {\n \"description\": \"List of plan steps. Required for create command, optional for update command.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"step_index\": {\n \"description\": \"Index of the step to update (0-based). Required for mark_step command.\",\n \"type\": \"integer\",\n },\n \"step_status\": {\n \"description\": \"Status to set for a step. Used with mark_step command.\",\n \"enum\": [\"not_started\", \"in_progress\", \"completed\", \"blocked\"],\n \"type\": \"string\",\n },\n \"step_notes\": {\n \"description\": \"Additional notes for a step. Optional for mark_step command.\",\n \"type\": \"string\",\n },\n },\n \"required\": [\"command\"],\n \"additionalProperties\": False,\n }\n\n plans: dict = {} # Dictionary to store plans by plan_id\n _current_plan_id: Optional[str] = None # Track the current active plan\n\n async def execute(\n self,\n *,\n command: Literal[\n \"create\", \"update\", \"list\", \"get\", \"set_active\", \"mark_step\", \"delete\"\n ],\n plan_id: Optional[str] = None,\n title: Optional[str] = None,\n steps: Optional[List[str]] = None,\n step_index: Optional[int] = None,\n step_status: Optional[\n Literal[\"not_started\", \"in_progress\", \"completed\", \"blocked\"]\n ] = None,\n step_notes: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Execute the planning tool with the given command and parameters.\n\n Parameters:\n - command: The operation to perform\n - plan_id: Unique identifier for the plan\n - title: Title for the plan (used with create command)\n - steps: List of steps for the plan (used with create command)\n - step_index: Index of the step to update (used with mark_step command)\n - step_status: Status to set for a step (used with mark_step command)\n - step_notes: Additional notes for a step (used with mark_step command)\n \"\"\"\n\n if command == \"create\":\n return self._create_plan(plan_id, title, steps)\n elif command == \"update\":\n return self._update_plan(plan_id, title, steps)\n elif command == \"list\":\n return self._list_plans()\n elif command == \"get\":\n return self._get_plan(plan_id)\n elif command == \"set_active\":\n return self._set_active_plan(plan_id)\n elif command == \"mark_step\":\n return self._mark_step(plan_id, step_index, step_status, step_notes)\n elif command == \"delete\":\n return self._delete_plan(plan_id)\n else:\n raise ToolError(\n f\"Unrecognized command: {command}. Allowed commands are: create, update, list, get, set_active, mark_step, delete\"\n )\n\n def _create_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Create a new plan with the given ID, title, and steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: create\")\n\n if plan_id in self.plans:\n raise ToolError(\n f\"A plan with ID '{plan_id}' already exists. Use 'update' to modify existing plans.\"\n )\n\n if not title:\n raise ToolError(\"Parameter `title` is required for command: create\")\n\n if (\n not steps\n or not isinstance(steps, list)\n or not all(isinstance(step, str) for step in steps)\n ):\n raise ToolError(\n \"Parameter `steps` must be a non-empty list of strings for command: create\"\n )\n\n # Create a new plan with initialized step statuses\n plan = {\n \"plan_id\": plan_id,\n \"title\": title,\n \"steps\": steps,\n \"step_statuses\": [\"not_started\"] * len(steps),\n \"step_notes\": [\"\"] * len(steps),\n }\n\n self.plans[plan_id] = plan\n self._current_plan_id = plan_id # Set as active plan\n\n return ToolResult(\n output=f\"Plan created successfully with ID: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _update_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Update an existing plan with new title or steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: update\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n\n if title:\n plan[\"title\"] = title\n\n if steps:\n if not isinstance(steps, list) or not all(\n isinstance(step, str) for step in steps\n ):\n raise ToolError(\n \"Parameter `steps` must be a list of strings for command: update\"\n )\n\n # Preserve existing step statuses for unchanged steps\n old_steps = plan[\"steps\"]\n old_statuses = plan[\"step_statuses\"]\n old_notes = plan[\"step_notes\"]\n\n # Create new step statuses and notes\n new_statuses = []\n new_notes = []\n\n for i, step in enumerate(steps):\n # If the step exists at the same position in old steps, preserve status and notes\n if i < len(old_steps) and step == old_steps[i]:\n new_statuses.append(old_statuses[i])\n new_notes.append(old_notes[i])\n else:\n new_statuses.append(\"not_started\")\n new_notes.append(\"\")\n\n plan[\"steps\"] = steps\n plan[\"step_statuses\"] = new_statuses\n plan[\"step_notes\"] = new_notes\n\n return ToolResult(\n output=f\"Plan updated successfully: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _list_plans(self) -> ToolResult:\n \"\"\"List all available plans.\"\"\"\n if not self.plans:\n return ToolResult(\n output=\"No plans available. Create a plan with the 'create' command.\"\n )\n\n output = \"Available plans:\\n\"\n for plan_id, plan in self.plans.items():\n current_marker = \" (active)\" if plan_id == self._current_plan_id else \"\"\n completed = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"completed\"\n )\n total = len(plan[\"steps\"])\n progress = f\"{completed}/{total} steps completed\"\n output += f\"• {plan_id}{current_marker}: {plan['title']} - {progress}\\n\"\n\n return ToolResult(output=output)\n\n def _get_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Get details of a specific plan.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n return ToolResult(output=self._format_plan(plan))\n\n def _set_active_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Set a plan as the active plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: set_active\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n self._current_plan_id = plan_id\n return ToolResult(\n output=f\"Plan '{plan_id}' is now the active plan.\\n\\n{self._format_plan(self.plans[plan_id])}\"\n )\n\n def _mark_step(\n self,\n plan_id: Optional[str],\n step_index: Optional[int],\n step_status: Optional[str],\n step_notes: Optional[str],\n ) -> ToolResult:\n \"\"\"Mark a step with a specific status and optional notes.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n if step_index is None:\n raise ToolError(\"Parameter `step_index` is required for command: mark_step\")\n\n plan = self.plans[plan_id]\n\n if step_index < 0 or step_index >= len(plan[\"steps\"]):\n raise ToolError(\n f\"Invalid step_index: {step_index}. Valid indices range from 0 to {len(plan['steps'])-1}.\"\n )\n\n if step_status and step_status not in [\n \"not_started\",\n \"in_progress\",\n \"completed\",\n \"blocked\",\n ]:\n raise ToolError(\n f\"Invalid step_status: {step_status}. Valid statuses are: not_started, in_progress, completed, blocked\"\n )\n\n if step_status:\n plan[\"step_statuses\"][step_index] = step_status\n\n if step_notes:\n plan[\"step_notes\"][step_index] = step_notes\n\n return ToolResult(\n output=f\"Step {step_index} updated in plan '{plan_id}'.\\n\\n{self._format_plan(plan)}\"\n )\n\n def _delete_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Delete a plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: delete\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n del self.plans[plan_id]\n\n # If the deleted plan was the active plan, clear the active plan\n if self._current_plan_id == plan_id:\n self._current_plan_id = None\n\n return ToolResult(output=f\"Plan '{plan_id}' has been deleted.\")\n\n def _format_plan(self, plan: Dict) -> str:\n \"\"\"Format a plan for display.\"\"\"\n output = f\"Plan: {plan['title']} (ID: {plan['plan_id']})\\n\"\n output += \"=\" * len(output) + \"\\n\\n\"\n\n # Calculate progress statistics\n total_steps = len(plan[\"steps\"])\n completed = sum(1 for status in plan[\"step_statuses\"] if status == \"completed\")\n in_progress = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"in_progress\"\n )\n blocked = sum(1 for status in plan[\"step_statuses\"] if status == \"blocked\")\n not_started = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"not_started\"\n )\n\n output += f\"Progress: {completed}/{total_steps} steps completed \"\n if total_steps > 0:\n percentage = (completed / total_steps) * 100\n output += f\"({percentage:.1f}%)\\n\"\n else:\n output += \"(0%)\\n\"\n\n output += f\"Status: {completed} completed, {in_progress} in progress, {blocked} blocked, {not_started} not started\\n\\n\"\n output += \"Steps:\\n\"\n\n # Add each step with its status and notes\n for i, (step, status, notes) in enumerate(\n zip(plan[\"steps\"], plan[\"step_statuses\"], plan[\"step_notes\"])\n ):\n status_symbol = {\n \"not_started\": \"[ ]\",\n \"in_progress\": \"[→]\",\n \"completed\": \"[✓]\",\n \"blocked\": \"[!]\",\n }.get(status, \"[ ]\")\n\n output += f\"{i}. {status_symbol} {step}\\n\"\n if notes:\n output += f\" Notes: {notes}\\n\"\n\n return output\n"], ["/OpenManus/app/sandbox/core/sandbox.py", "import asyncio\nimport io\nimport os\nimport tarfile\nimport tempfile\nimport uuid\nfrom typing import Dict, Optional\n\nimport docker\nfrom docker.errors import NotFound\nfrom docker.models.containers import Container\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.exceptions import SandboxTimeoutError\nfrom app.sandbox.core.terminal import AsyncDockerizedTerminal\n\n\nclass DockerSandbox:\n \"\"\"Docker sandbox environment.\n\n Provides a containerized execution environment with resource limits,\n file operations, and command execution capabilities.\n\n Attributes:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n client: Docker client.\n container: Docker container instance.\n terminal: Container terminal interface.\n \"\"\"\n\n def __init__(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Initializes a sandbox instance.\n\n Args:\n config: Sandbox configuration. Default configuration used if None.\n volume_bindings: Volume mappings in {host_path: container_path} format.\n \"\"\"\n self.config = config or SandboxSettings()\n self.volume_bindings = volume_bindings or {}\n self.client = docker.from_env()\n self.container: Optional[Container] = None\n self.terminal: Optional[AsyncDockerizedTerminal] = None\n\n async def create(self) -> \"DockerSandbox\":\n \"\"\"Creates and starts the sandbox container.\n\n Returns:\n Current sandbox instance.\n\n Raises:\n docker.errors.APIError: If Docker API call fails.\n RuntimeError: If container creation or startup fails.\n \"\"\"\n try:\n # Prepare container config\n host_config = self.client.api.create_host_config(\n mem_limit=self.config.memory_limit,\n cpu_period=100000,\n cpu_quota=int(100000 * self.config.cpu_limit),\n network_mode=\"none\" if not self.config.network_enabled else \"bridge\",\n binds=self._prepare_volume_bindings(),\n )\n\n # Generate unique container name with sandbox_ prefix\n container_name = f\"sandbox_{uuid.uuid4().hex[:8]}\"\n\n # Create container\n container = await asyncio.to_thread(\n self.client.api.create_container,\n image=self.config.image,\n command=\"tail -f /dev/null\",\n hostname=\"sandbox\",\n working_dir=self.config.work_dir,\n host_config=host_config,\n name=container_name,\n tty=True,\n detach=True,\n )\n\n self.container = self.client.containers.get(container[\"Id\"])\n\n # Start container\n await asyncio.to_thread(self.container.start)\n\n # Initialize terminal\n self.terminal = AsyncDockerizedTerminal(\n container[\"Id\"],\n self.config.work_dir,\n env_vars={\"PYTHONUNBUFFERED\": \"1\"}\n # Ensure Python output is not buffered\n )\n await self.terminal.init()\n\n return self\n\n except Exception as e:\n await self.cleanup() # Ensure resources are cleaned up\n raise RuntimeError(f\"Failed to create sandbox: {e}\") from e\n\n def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]:\n \"\"\"Prepares volume binding configuration.\n\n Returns:\n Volume binding configuration dictionary.\n \"\"\"\n bindings = {}\n\n # Create and add working directory mapping\n work_dir = self._ensure_host_dir(self.config.work_dir)\n bindings[work_dir] = {\"bind\": self.config.work_dir, \"mode\": \"rw\"}\n\n # Add custom volume bindings\n for host_path, container_path in self.volume_bindings.items():\n bindings[host_path] = {\"bind\": container_path, \"mode\": \"rw\"}\n\n return bindings\n\n @staticmethod\n def _ensure_host_dir(path: str) -> str:\n \"\"\"Ensures directory exists on the host.\n\n Args:\n path: Directory path.\n\n Returns:\n Actual path on the host.\n \"\"\"\n host_path = os.path.join(\n tempfile.gettempdir(),\n f\"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}\",\n )\n os.makedirs(host_path, exist_ok=True)\n return host_path\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the sandbox.\n\n Args:\n cmd: Command to execute.\n timeout: Timeout in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If sandbox not initialized or command execution fails.\n TimeoutError: If command execution times out.\n \"\"\"\n if not self.terminal:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n return await self.terminal.run_command(\n cmd, timeout=timeout or self.config.timeout\n )\n except TimeoutError:\n raise SandboxTimeoutError(\n f\"Command execution timed out after {timeout or self.config.timeout} seconds\"\n )\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads a file from the container.\n\n Args:\n path: File path.\n\n Returns:\n File contents as string.\n\n Raises:\n FileNotFoundError: If file does not exist.\n RuntimeError: If read operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n # Get file archive\n resolved_path = self._safe_resolve_path(path)\n tar_stream, _ = await asyncio.to_thread(\n self.container.get_archive, resolved_path\n )\n\n # Read file content from tar stream\n content = await self._read_from_tar(tar_stream)\n return content.decode(\"utf-8\")\n\n except NotFound:\n raise FileNotFoundError(f\"File not found: {path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to read file: {e}\")\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to a file in the container.\n\n Args:\n path: Target path.\n content: File content.\n\n Raises:\n RuntimeError: If write operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n resolved_path = self._safe_resolve_path(path)\n parent_dir = os.path.dirname(resolved_path)\n\n # Create parent directory\n if parent_dir:\n await self.run_command(f\"mkdir -p {parent_dir}\")\n\n # Prepare file data\n tar_stream = await self._create_tar_stream(\n os.path.basename(path), content.encode(\"utf-8\")\n )\n\n # Write file\n await asyncio.to_thread(\n self.container.put_archive, parent_dir or \"/\", tar_stream\n )\n\n except Exception as e:\n raise RuntimeError(f\"Failed to write file: {e}\")\n\n def _safe_resolve_path(self, path: str) -> str:\n \"\"\"Safely resolves container path, preventing path traversal.\n\n Args:\n path: Original path.\n\n Returns:\n Resolved absolute path.\n\n Raises:\n ValueError: If path contains potentially unsafe patterns.\n \"\"\"\n # Check for path traversal attempts\n if \"..\" in path.split(\"/\"):\n raise ValueError(\"Path contains potentially unsafe patterns\")\n\n resolved = (\n os.path.join(self.config.work_dir, path)\n if not os.path.isabs(path)\n else path\n )\n return resolved\n\n async def copy_from(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file from the container.\n\n Args:\n src_path: Source file path (container).\n dst_path: Destination path (host).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n # Ensure destination file's parent directory exists\n parent_dir = os.path.dirname(dst_path)\n if parent_dir:\n os.makedirs(parent_dir, exist_ok=True)\n\n # Get file stream\n resolved_src = self._safe_resolve_path(src_path)\n stream, stat = await asyncio.to_thread(\n self.container.get_archive, resolved_src\n )\n\n # Create temporary directory to extract file\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Write stream to temporary file\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with open(tar_path, \"wb\") as f:\n for chunk in stream:\n f.write(chunk)\n\n # Extract file\n with tarfile.open(tar_path) as tar:\n members = tar.getmembers()\n if not members:\n raise FileNotFoundError(f\"Source file is empty: {src_path}\")\n\n # If destination is a directory, we should preserve relative path structure\n if os.path.isdir(dst_path):\n tar.extractall(dst_path)\n else:\n # If destination is a file, we only extract the source file's content\n if len(members) > 1:\n raise RuntimeError(\n f\"Source path is a directory but destination is a file: {src_path}\"\n )\n\n with open(dst_path, \"wb\") as dst:\n src_file = tar.extractfile(members[0])\n if src_file is None:\n raise RuntimeError(\n f\"Failed to extract file: {src_path}\"\n )\n dst.write(src_file.read())\n\n except docker.errors.NotFound:\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n async def copy_to(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file to the container.\n\n Args:\n src_path: Source file path (host).\n dst_path: Destination path (container).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n if not os.path.exists(src_path):\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n\n # Create destination directory in container\n resolved_dst = self._safe_resolve_path(dst_path)\n container_dir = os.path.dirname(resolved_dst)\n if container_dir:\n await self.run_command(f\"mkdir -p {container_dir}\")\n\n # Create tar file to upload\n with tempfile.TemporaryDirectory() as tmp_dir:\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with tarfile.open(tar_path, \"w\") as tar:\n # Handle directory source path\n if os.path.isdir(src_path):\n os.path.basename(src_path.rstrip(\"/\"))\n for root, _, files in os.walk(src_path):\n for file in files:\n file_path = os.path.join(root, file)\n arcname = os.path.join(\n os.path.basename(dst_path),\n os.path.relpath(file_path, src_path),\n )\n tar.add(file_path, arcname=arcname)\n else:\n # Add single file to tar\n tar.add(src_path, arcname=os.path.basename(dst_path))\n\n # Read tar file content\n with open(tar_path, \"rb\") as f:\n data = f.read()\n\n # Upload to container\n await asyncio.to_thread(\n self.container.put_archive,\n os.path.dirname(resolved_dst) or \"/\",\n data,\n )\n\n # Verify file was created successfully\n try:\n await self.run_command(f\"test -e {resolved_dst}\")\n except Exception:\n raise RuntimeError(f\"Failed to verify file creation: {dst_path}\")\n\n except FileNotFoundError:\n raise\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n @staticmethod\n async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO:\n \"\"\"Creates a tar file stream.\n\n Args:\n name: Filename.\n content: File content.\n\n Returns:\n Tar file stream.\n \"\"\"\n tar_stream = io.BytesIO()\n with tarfile.open(fileobj=tar_stream, mode=\"w\") as tar:\n tarinfo = tarfile.TarInfo(name=name)\n tarinfo.size = len(content)\n tar.addfile(tarinfo, io.BytesIO(content))\n tar_stream.seek(0)\n return tar_stream\n\n @staticmethod\n async def _read_from_tar(tar_stream) -> bytes:\n \"\"\"Reads file content from a tar stream.\n\n Args:\n tar_stream: Tar file stream.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If read operation fails.\n \"\"\"\n with tempfile.NamedTemporaryFile() as tmp:\n for chunk in tar_stream:\n tmp.write(chunk)\n tmp.seek(0)\n\n with tarfile.open(fileobj=tmp) as tar:\n member = tar.next()\n if not member:\n raise RuntimeError(\"Empty tar archive\")\n\n file_content = tar.extractfile(member)\n if not file_content:\n raise RuntimeError(\"Failed to extract file content\")\n\n return file_content.read()\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up sandbox resources.\"\"\"\n errors = []\n try:\n if self.terminal:\n try:\n await self.terminal.close()\n except Exception as e:\n errors.append(f\"Terminal cleanup error: {e}\")\n finally:\n self.terminal = None\n\n if self.container:\n try:\n await asyncio.to_thread(self.container.stop, timeout=5)\n except Exception as e:\n errors.append(f\"Container stop error: {e}\")\n\n try:\n await asyncio.to_thread(self.container.remove, force=True)\n except Exception as e:\n errors.append(f\"Container remove error: {e}\")\n finally:\n self.container = None\n\n except Exception as e:\n errors.append(f\"General cleanup error: {e}\")\n\n if errors:\n print(f\"Warning: Errors during cleanup: {', '.join(errors)}\")\n\n async def __aenter__(self) -> \"DockerSandbox\":\n \"\"\"Async context manager entry.\"\"\"\n return await self.create()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n"], ["/OpenManus/app/tool/str_replace_editor.py", "\"\"\"File and directory manipulation tool with sandbox support.\"\"\"\n\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, DefaultDict, List, Literal, Optional, get_args\n\nfrom app.config import config\nfrom app.exceptions import ToolError\nfrom app.tool import BaseTool\nfrom app.tool.base import CLIResult, ToolResult\nfrom app.tool.file_operators import (\n FileOperator,\n LocalFileOperator,\n PathLike,\n SandboxFileOperator,\n)\n\n\nCommand = Literal[\n \"view\",\n \"create\",\n \"str_replace\",\n \"insert\",\n \"undo_edit\",\n]\n\n# Constants\nSNIPPET_LINES: int = 4\nMAX_RESPONSE_LEN: int = 16000\nTRUNCATED_MESSAGE: str = (\n \"To save on context only part of this file has been shown to you. \"\n \"You should retry this tool after you have searched inside the file with `grep -n` \"\n \"in order to find the line numbers of what you are looking for.\"\n)\n\n# Tool description\n_STR_REPLACE_EDITOR_DESCRIPTION = \"\"\"Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with ``\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n\"\"\"\n\n\ndef maybe_truncate(\n content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN\n) -> str:\n \"\"\"Truncate content and append a notice if content exceeds the specified length.\"\"\"\n if not truncate_after or len(content) <= truncate_after:\n return content\n return content[:truncate_after] + TRUNCATED_MESSAGE\n\n\nclass StrReplaceEditor(BaseTool):\n \"\"\"A tool for viewing, creating, and editing files with sandbox support.\"\"\"\n\n name: str = \"str_replace_editor\"\n description: str = _STR_REPLACE_EDITOR_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\",\n \"enum\": [\"view\", \"create\", \"str_replace\", \"insert\", \"undo_edit\"],\n \"type\": \"string\",\n },\n \"path\": {\n \"description\": \"Absolute path to file or directory.\",\n \"type\": \"string\",\n },\n \"file_text\": {\n \"description\": \"Required parameter of `create` command, with the content of the file to be created.\",\n \"type\": \"string\",\n },\n \"old_str\": {\n \"description\": \"Required parameter of `str_replace` command containing the string in `path` to replace.\",\n \"type\": \"string\",\n },\n \"new_str\": {\n \"description\": \"Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\",\n \"type\": \"string\",\n },\n \"insert_line\": {\n \"description\": \"Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\",\n \"type\": \"integer\",\n },\n \"view_range\": {\n \"description\": \"Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\",\n \"items\": {\"type\": \"integer\"},\n \"type\": \"array\",\n },\n },\n \"required\": [\"command\", \"path\"],\n }\n _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list)\n _local_operator: LocalFileOperator = LocalFileOperator()\n _sandbox_operator: SandboxFileOperator = SandboxFileOperator()\n\n # def _get_operator(self, use_sandbox: bool) -> FileOperator:\n def _get_operator(self) -> FileOperator:\n \"\"\"Get the appropriate file operator based on execution mode.\"\"\"\n return (\n self._sandbox_operator\n if config.sandbox.use_sandbox\n else self._local_operator\n )\n\n async def execute(\n self,\n *,\n command: Command,\n path: str,\n file_text: str | None = None,\n view_range: list[int] | None = None,\n old_str: str | None = None,\n new_str: str | None = None,\n insert_line: int | None = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Execute a file operation command.\"\"\"\n # Get the appropriate file operator\n operator = self._get_operator()\n\n # Validate path and command combination\n await self.validate_path(command, Path(path), operator)\n\n # Execute the appropriate command\n if command == \"view\":\n result = await self.view(path, view_range, operator)\n elif command == \"create\":\n if file_text is None:\n raise ToolError(\"Parameter `file_text` is required for command: create\")\n await operator.write_file(path, file_text)\n self._file_history[path].append(file_text)\n result = ToolResult(output=f\"File created successfully at: {path}\")\n elif command == \"str_replace\":\n if old_str is None:\n raise ToolError(\n \"Parameter `old_str` is required for command: str_replace\"\n )\n result = await self.str_replace(path, old_str, new_str, operator)\n elif command == \"insert\":\n if insert_line is None:\n raise ToolError(\n \"Parameter `insert_line` is required for command: insert\"\n )\n if new_str is None:\n raise ToolError(\"Parameter `new_str` is required for command: insert\")\n result = await self.insert(path, insert_line, new_str, operator)\n elif command == \"undo_edit\":\n result = await self.undo_edit(path, operator)\n else:\n # This should be caught by type checking, but we include it for safety\n raise ToolError(\n f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {\", \".join(get_args(Command))}'\n )\n\n return str(result)\n\n async def validate_path(\n self, command: str, path: Path, operator: FileOperator\n ) -> None:\n \"\"\"Validate path and command combination based on execution environment.\"\"\"\n # Check if path is absolute\n if not path.is_absolute():\n raise ToolError(f\"The path {path} is not an absolute path\")\n\n # Only check if path exists for non-create commands\n if command != \"create\":\n if not await operator.exists(path):\n raise ToolError(\n f\"The path {path} does not exist. Please provide a valid path.\"\n )\n\n # Check if path is a directory\n is_dir = await operator.is_directory(path)\n if is_dir and command != \"view\":\n raise ToolError(\n f\"The path {path} is a directory and only the `view` command can be used on directories\"\n )\n\n # Check if file exists for create command\n elif command == \"create\":\n exists = await operator.exists(path)\n if exists:\n raise ToolError(\n f\"File already exists at: {path}. Cannot overwrite files using command `create`.\"\n )\n\n async def view(\n self,\n path: PathLike,\n view_range: Optional[List[int]] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Display file or directory content.\"\"\"\n # Determine if path is a directory\n is_dir = await operator.is_directory(path)\n\n if is_dir:\n # Directory handling\n if view_range:\n raise ToolError(\n \"The `view_range` parameter is not allowed when `path` points to a directory.\"\n )\n\n return await self._view_directory(path, operator)\n else:\n # File handling\n return await self._view_file(path, operator, view_range)\n\n @staticmethod\n async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult:\n \"\"\"Display directory contents.\"\"\"\n find_cmd = f\"find {path} -maxdepth 2 -not -path '*/\\\\.*'\"\n\n # Execute command using the operator\n returncode, stdout, stderr = await operator.run_command(find_cmd)\n\n if not stderr:\n stdout = (\n f\"Here's the files and directories up to 2 levels deep in {path}, \"\n f\"excluding hidden items:\\n{stdout}\\n\"\n )\n\n return CLIResult(output=stdout, error=stderr)\n\n async def _view_file(\n self,\n path: PathLike,\n operator: FileOperator,\n view_range: Optional[List[int]] = None,\n ) -> CLIResult:\n \"\"\"Display file content, optionally within a specified line range.\"\"\"\n # Read file content\n file_content = await operator.read_file(path)\n init_line = 1\n\n # Apply view range if specified\n if view_range:\n if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range):\n raise ToolError(\n \"Invalid `view_range`. It should be a list of two integers.\"\n )\n\n file_lines = file_content.split(\"\\n\")\n n_lines_file = len(file_lines)\n init_line, final_line = view_range\n\n # Validate view range\n if init_line < 1 or init_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be \"\n f\"within the range of lines of the file: {[1, n_lines_file]}\"\n )\n if final_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"smaller than the number of lines in the file: `{n_lines_file}`\"\n )\n if final_line != -1 and final_line < init_line:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"larger or equal than its first `{init_line}`\"\n )\n\n # Apply range\n if final_line == -1:\n file_content = \"\\n\".join(file_lines[init_line - 1 :])\n else:\n file_content = \"\\n\".join(file_lines[init_line - 1 : final_line])\n\n # Format and return result\n return CLIResult(\n output=self._make_output(file_content, str(path), init_line=init_line)\n )\n\n async def str_replace(\n self,\n path: PathLike,\n old_str: str,\n new_str: Optional[str] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Replace a unique string in a file with a new string.\"\"\"\n # Read file content and expand tabs\n file_content = (await operator.read_file(path)).expandtabs()\n old_str = old_str.expandtabs()\n new_str = new_str.expandtabs() if new_str is not None else \"\"\n\n # Check if old_str is unique in the file\n occurrences = file_content.count(old_str)\n if occurrences == 0:\n raise ToolError(\n f\"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}.\"\n )\n elif occurrences > 1:\n # Find line numbers of occurrences\n file_content_lines = file_content.split(\"\\n\")\n lines = [\n idx + 1\n for idx, line in enumerate(file_content_lines)\n if old_str in line\n ]\n raise ToolError(\n f\"No replacement was performed. Multiple occurrences of old_str `{old_str}` \"\n f\"in lines {lines}. Please ensure it is unique\"\n )\n\n # Replace old_str with new_str\n new_file_content = file_content.replace(old_str, new_str)\n\n # Write the new content to the file\n await operator.write_file(path, new_file_content)\n\n # Save the original content to history\n self._file_history[path].append(file_content)\n\n # Create a snippet of the edited section\n replacement_line = file_content.split(old_str)[0].count(\"\\n\")\n start_line = max(0, replacement_line - SNIPPET_LINES)\n end_line = replacement_line + SNIPPET_LINES + new_str.count(\"\\n\")\n snippet = \"\\n\".join(new_file_content.split(\"\\n\")[start_line : end_line + 1])\n\n # Prepare the success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet, f\"a snippet of {path}\", start_line + 1\n )\n success_msg += \"Review the changes and make sure they are as expected. Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def insert(\n self,\n path: PathLike,\n insert_line: int,\n new_str: str,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Insert text at a specific line in a file.\"\"\"\n # Read and prepare content\n file_text = (await operator.read_file(path)).expandtabs()\n new_str = new_str.expandtabs()\n file_text_lines = file_text.split(\"\\n\")\n n_lines_file = len(file_text_lines)\n\n # Validate insert_line\n if insert_line < 0 or insert_line > n_lines_file:\n raise ToolError(\n f\"Invalid `insert_line` parameter: {insert_line}. It should be within \"\n f\"the range of lines of the file: {[0, n_lines_file]}\"\n )\n\n # Perform insertion\n new_str_lines = new_str.split(\"\\n\")\n new_file_text_lines = (\n file_text_lines[:insert_line]\n + new_str_lines\n + file_text_lines[insert_line:]\n )\n\n # Create a snippet for preview\n snippet_lines = (\n file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]\n + new_str_lines\n + file_text_lines[insert_line : insert_line + SNIPPET_LINES]\n )\n\n # Join lines and write to file\n new_file_text = \"\\n\".join(new_file_text_lines)\n snippet = \"\\n\".join(snippet_lines)\n\n await operator.write_file(path, new_file_text)\n self._file_history[path].append(file_text)\n\n # Prepare success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet,\n \"a snippet of the edited file\",\n max(1, insert_line - SNIPPET_LINES + 1),\n )\n success_msg += \"Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def undo_edit(\n self, path: PathLike, operator: FileOperator = None\n ) -> CLIResult:\n \"\"\"Revert the last edit made to a file.\"\"\"\n if not self._file_history[path]:\n raise ToolError(f\"No edit history found for {path}.\")\n\n old_text = self._file_history[path].pop()\n await operator.write_file(path, old_text)\n\n return CLIResult(\n output=f\"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}\"\n )\n\n def _make_output(\n self,\n file_content: str,\n file_descriptor: str,\n init_line: int = 1,\n expand_tabs: bool = True,\n ) -> str:\n \"\"\"Format file content for display with line numbers.\"\"\"\n file_content = maybe_truncate(file_content)\n if expand_tabs:\n file_content = file_content.expandtabs()\n\n # Add line numbers to each line\n file_content = \"\\n\".join(\n [\n f\"{i + init_line:6}\\t{line}\"\n for i, line in enumerate(file_content.split(\"\\n\"))\n ]\n )\n\n return (\n f\"Here's the result of running `cat -n` on {file_descriptor}:\\n\"\n + file_content\n + \"\\n\"\n )\n"], ["/OpenManus/app/tool/search/bing_search.py", "from typing import List, Optional, Tuple\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom app.logger import logger\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nABSTRACT_MAX_LENGTH = 300\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3\",\n \"Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866\",\n]\n\nHEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": USER_AGENTS[0],\n \"Referer\": \"https://www.bing.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n}\n\nBING_HOST_URL = \"https://www.bing.com\"\nBING_SEARCH_URL = \"https://www.bing.com/search?q=\"\n\n\nclass BingSearchEngine(WebSearchEngine):\n session: Optional[requests.Session] = None\n\n def __init__(self, **data):\n \"\"\"Initialize the BingSearch tool with a requests session.\"\"\"\n super().__init__(**data)\n self.session = requests.Session()\n self.session.headers.update(HEADERS)\n\n def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:\n \"\"\"\n Synchronous Bing search implementation to retrieve search results.\n\n Args:\n query (str): The search query to submit to Bing.\n num_results (int, optional): Maximum number of results to return. Defaults to 10.\n\n Returns:\n List[SearchItem]: A list of search items with title, URL, and description.\n \"\"\"\n if not query:\n return []\n\n list_result = []\n first = 1\n next_url = BING_SEARCH_URL + query\n\n while len(list_result) < num_results:\n data, next_url = self._parse_html(\n next_url, rank_start=len(list_result), first=first\n )\n if data:\n list_result.extend(data)\n if not next_url:\n break\n first += 10\n\n return list_result[:num_results]\n\n def _parse_html(\n self, url: str, rank_start: int = 0, first: int = 1\n ) -> Tuple[List[SearchItem], str]:\n \"\"\"\n Parse Bing search result HTML to extract search results and the next page URL.\n\n Returns:\n tuple: (List of SearchItem objects, next page URL or None)\n \"\"\"\n try:\n res = self.session.get(url=url)\n res.encoding = \"utf-8\"\n root = BeautifulSoup(res.text, \"lxml\")\n\n list_data = []\n ol_results = root.find(\"ol\", id=\"b_results\")\n if not ol_results:\n return [], None\n\n for li in ol_results.find_all(\"li\", class_=\"b_algo\"):\n title = \"\"\n url = \"\"\n abstract = \"\"\n try:\n h2 = li.find(\"h2\")\n if h2:\n title = h2.text.strip()\n url = h2.a[\"href\"].strip()\n\n p = li.find(\"p\")\n if p:\n abstract = p.text.strip()\n\n if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:\n abstract = abstract[:ABSTRACT_MAX_LENGTH]\n\n rank_start += 1\n\n # Create a SearchItem object\n list_data.append(\n SearchItem(\n title=title or f\"Bing Result {rank_start}\",\n url=url,\n description=abstract,\n )\n )\n except Exception:\n continue\n\n next_btn = root.find(\"a\", title=\"Next page\")\n if not next_btn:\n return list_data, None\n\n next_url = BING_HOST_URL + next_btn[\"href\"]\n return list_data, next_url\n except Exception as e:\n logger.warning(f\"Error parsing HTML: {e}\")\n return [], None\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Bing search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n return self._search_sync(query, num_results=num_results)\n"], ["/OpenManus/app/tool/file_operators.py", "\"\"\"File operation interfaces and implementations for local and sandbox environments.\"\"\"\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import Optional, Protocol, Tuple, Union, runtime_checkable\n\nfrom app.config import SandboxSettings\nfrom app.exceptions import ToolError\nfrom app.sandbox.client import SANDBOX_CLIENT\n\n\nPathLike = Union[str, Path]\n\n\n@runtime_checkable\nclass FileOperator(Protocol):\n \"\"\"Interface for file operations in different environments.\"\"\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file.\"\"\"\n ...\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file.\"\"\"\n ...\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n ...\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n ...\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command and return (return_code, stdout, stderr).\"\"\"\n ...\n\n\nclass LocalFileOperator(FileOperator):\n \"\"\"File operations implementation for local filesystem.\"\"\"\n\n encoding: str = \"utf-8\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a local file.\"\"\"\n try:\n return Path(path).read_text(encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to read {path}: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a local file.\"\"\"\n try:\n Path(path).write_text(content, encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path}: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n return Path(path).is_dir()\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n return Path(path).exists()\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command locally.\"\"\"\n process = await asyncio.create_subprocess_shell(\n cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n try:\n stdout, stderr = await asyncio.wait_for(\n process.communicate(), timeout=timeout\n )\n return (\n process.returncode or 0,\n stdout.decode(),\n stderr.decode(),\n )\n except asyncio.TimeoutError as exc:\n try:\n process.kill()\n except ProcessLookupError:\n pass\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds\"\n ) from exc\n\n\nclass SandboxFileOperator(FileOperator):\n \"\"\"File operations implementation for sandbox environment.\"\"\"\n\n def __init__(self):\n self.sandbox_client = SANDBOX_CLIENT\n\n async def _ensure_sandbox_initialized(self):\n \"\"\"Ensure sandbox is initialized.\"\"\"\n if not self.sandbox_client.sandbox:\n await self.sandbox_client.create(config=SandboxSettings())\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n return await self.sandbox_client.read_file(str(path))\n except Exception as e:\n raise ToolError(f\"Failed to read {path} in sandbox: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n await self.sandbox_client.write_file(str(path), content)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path} in sandbox: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -d {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -e {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a command in sandbox environment.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n stdout = await self.sandbox_client.run_command(\n cmd, timeout=int(timeout) if timeout else None\n )\n return (\n 0, # Always return 0 since we don't have explicit return code from sandbox\n stdout,\n \"\", # No stderr capture in the current sandbox implementation\n )\n except TimeoutError as exc:\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds in sandbox\"\n ) from exc\n except Exception as exc:\n return 1, \"\", f\"Error executing command in sandbox: {str(exc)}\"\n"], ["/OpenManus/app/agent/data_analysis.py", "from pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass DataAnalysis(ToolCallAgent):\n \"\"\"\n A data analysis agent that uses planning to solve various data analysis tasks.\n\n This agent extends ToolCallAgent with a comprehensive set of tools and capabilities,\n including Data Analysis, Chart Visualization, Data Report.\n \"\"\"\n\n name: str = \"Data_Analysis\"\n description: str = \"An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 15000\n max_steps: int = 20\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n NormalPythonExecute(),\n VisualizationPrepare(),\n DataVisualization(),\n Terminate(),\n )\n )\n"], ["/OpenManus/app/tool/tool_collection.py", "\"\"\"Collection classes for managing multiple tools.\"\"\"\nfrom typing import Any, Dict, List\n\nfrom app.exceptions import ToolError\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolFailure, ToolResult\n\n\nclass ToolCollection:\n \"\"\"A collection of defined tools.\"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, *tools: BaseTool):\n self.tools = tools\n self.tool_map = {tool.name: tool for tool in tools}\n\n def __iter__(self):\n return iter(self.tools)\n\n def to_params(self) -> List[Dict[str, Any]]:\n return [tool.to_param() for tool in self.tools]\n\n async def execute(\n self, *, name: str, tool_input: Dict[str, Any] = None\n ) -> ToolResult:\n tool = self.tool_map.get(name)\n if not tool:\n return ToolFailure(error=f\"Tool {name} is invalid\")\n try:\n result = await tool(**tool_input)\n return result\n except ToolError as e:\n return ToolFailure(error=e.message)\n\n async def execute_all(self) -> List[ToolResult]:\n \"\"\"Execute all tools in the collection sequentially.\"\"\"\n results = []\n for tool in self.tools:\n try:\n result = await tool()\n results.append(result)\n except ToolError as e:\n results.append(ToolFailure(error=e.message))\n return results\n\n def get_tool(self, name: str) -> BaseTool:\n return self.tool_map.get(name)\n\n def add_tool(self, tool: BaseTool):\n \"\"\"Add a single tool to the collection.\n\n If a tool with the same name already exists, it will be skipped and a warning will be logged.\n \"\"\"\n if tool.name in self.tool_map:\n logger.warning(f\"Tool {tool.name} already exists in collection, skipping\")\n return self\n\n self.tools += (tool,)\n self.tool_map[tool.name] = tool\n return self\n\n def add_tools(self, *tools: BaseTool):\n \"\"\"Add multiple tools to the collection.\n\n If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged.\n \"\"\"\n for tool in tools:\n self.add_tool(tool)\n return self\n"], ["/OpenManus/app/agent/swe.py", "from typing import List\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.prompt.swe import SYSTEM_PROMPT\nfrom app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection\n\n\nclass SWEAgent(ToolCallAgent):\n \"\"\"An agent that implements the SWEAgent paradigm for executing code and natural conversations.\"\"\"\n\n name: str = \"swe\"\n description: str = \"an autonomous AI programmer that interacts directly with the computer to solve tasks.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = \"\"\n\n available_tools: ToolCollection = ToolCollection(\n Bash(), StrReplaceEditor(), Terminate()\n )\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n max_steps: int = 20\n"], ["/OpenManus/app/tool/bash.py", "import asyncio\nimport os\nfrom typing import Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, CLIResult\n\n\n_BASH_DESCRIPTION = \"\"\"Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says \"Command timed out. Sending SIGINT to the process\", the assistant should retry running the command in the background.\n\"\"\"\n\n\nclass _BashSession:\n \"\"\"A session of a bash shell.\"\"\"\n\n _started: bool\n _process: asyncio.subprocess.Process\n\n command: str = \"/bin/bash\"\n _output_delay: float = 0.2 # seconds\n _timeout: float = 120.0 # seconds\n _sentinel: str = \"<>\"\n\n def __init__(self):\n self._started = False\n self._timed_out = False\n\n async def start(self):\n if self._started:\n return\n\n self._process = await asyncio.create_subprocess_shell(\n self.command,\n preexec_fn=os.setsid,\n shell=True,\n bufsize=0,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n self._started = True\n\n def stop(self):\n \"\"\"Terminate the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return\n self._process.terminate()\n\n async def run(self, command: str):\n \"\"\"Execute a command in the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return CLIResult(\n system=\"tool must be restarted\",\n error=f\"bash has exited with returncode {self._process.returncode}\",\n )\n if self._timed_out:\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n )\n\n # we know these are not None because we created the process with PIPEs\n assert self._process.stdin\n assert self._process.stdout\n assert self._process.stderr\n\n # send command to the process\n self._process.stdin.write(\n command.encode() + f\"; echo '{self._sentinel}'\\n\".encode()\n )\n await self._process.stdin.drain()\n\n # read output from the process, until the sentinel is found\n try:\n async with asyncio.timeout(self._timeout):\n while True:\n await asyncio.sleep(self._output_delay)\n # if we read directly from stdout/stderr, it will wait forever for\n # EOF. use the StreamReader buffer directly instead.\n output = (\n self._process.stdout._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if self._sentinel in output:\n # strip the sentinel and break\n output = output[: output.index(self._sentinel)]\n break\n except asyncio.TimeoutError:\n self._timed_out = True\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n ) from None\n\n if output.endswith(\"\\n\"):\n output = output[:-1]\n\n error = (\n self._process.stderr._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if error.endswith(\"\\n\"):\n error = error[:-1]\n\n # clear the buffers so that the next output can be read correctly\n self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n\n return CLIResult(output=output, error=error)\n\n\nclass Bash(BaseTool):\n \"\"\"A tool for executing bash commands\"\"\"\n\n name: str = \"bash\"\n description: str = _BASH_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"description\": \"The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.\",\n },\n },\n \"required\": [\"command\"],\n }\n\n _session: Optional[_BashSession] = None\n\n async def execute(\n self, command: str | None = None, restart: bool = False, **kwargs\n ) -> CLIResult:\n if restart:\n if self._session:\n self._session.stop()\n self._session = _BashSession()\n await self._session.start()\n\n return CLIResult(system=\"tool has been restarted.\")\n\n if self._session is None:\n self._session = _BashSession()\n await self._session.start()\n\n if command is not None:\n return await self._session.run(command)\n\n raise ToolError(\"no command provided.\")\n\n\nif __name__ == \"__main__\":\n bash = Bash()\n rst = asyncio.run(bash.execute(\"ls -l\"))\n print(rst)\n"], ["/OpenManus/protocol/a2a/app/main.py", "import httpx\nimport argparse\n\nfrom a2a.server.apps import A2AStarletteApplication\nfrom a2a.server.request_handlers import DefaultRequestHandler\nfrom a2a.server.tasks import InMemoryTaskStore, InMemoryPushNotifier\nfrom a2a.types import (\n AgentCapabilities,\n AgentCard,\n AgentSkill,\n)\n\nfrom .agent_executor import ManusExecutor\n\nfrom .agent import A2AManus\nfrom app.tool.browser_use_tool import _BROWSER_DESCRIPTION\nfrom app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION\nfrom app.tool.terminate import _TERMINATE_DESCRIPTION\nimport logging\nfrom dotenv import load_dotenv\nimport asyncio\nfrom typing import Optional\n\nload_dotenv()\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nasync def main(host: str = \"localhost\", port: int = 10000):\n \"\"\"Starts the Manus Agent server.\"\"\"\n try:\n capabilities = AgentCapabilities(streaming=False, pushNotifications=True)\n skills = [\n AgentSkill(\n id=\"Python Execute\",\n name=\"Python Execute Tool\",\n description=\"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\",\n tags=[\"Execute Python Code\"],\n examples=[\n \"Execute Python code:'''python \\n Print('Hello World') \\n '''\"\n ],\n ),\n AgentSkill(\n id=\"Browser use\",\n name=\"Browser use Tool\",\n description=_BROWSER_DESCRIPTION,\n tags=[\"Use Browser\"],\n examples=[\"go_to 'https://www.google.com'\"],\n ),\n AgentSkill(\n id=\"Replace String\",\n name=\"Str_replace Tool\",\n description=_STR_REPLACE_EDITOR_DESCRIPTION,\n tags=[\"Operate Files\"],\n examples=[\"Replace 'old' with 'new' in 'file.txt'\"],\n ),\n AgentSkill(\n id=\"Ask human\",\n name=\"Ask human Tool\",\n description=\"Use this tool to ask human for help.\",\n tags=[\"Ask human for help\"],\n examples=[\"Ask human: 'What time is it?'\"],\n ),\n AgentSkill(\n id=\"terminate\",\n name=\"terminate Tool\",\n description=_TERMINATE_DESCRIPTION,\n tags=[\"terminate task\"],\n examples=[\"terminate\"],\n ),\n # Add more skills as needed\n ]\n\n agent_card = AgentCard(\n name=\"Manus Agent\",\n description=\"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\",\n url=f\"http://{host}:{port}/\",\n version=\"1.0.0\",\n defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n capabilities=capabilities,\n skills=skills,\n )\n\n httpx_client = httpx.AsyncClient()\n request_handler = DefaultRequestHandler(\n agent_executor=ManusExecutor(\n agent_factory=lambda: A2AManus.create(max_steps=3)\n ),\n task_store=InMemoryTaskStore(),\n push_notifier=InMemoryPushNotifier(httpx_client),\n )\n\n server = A2AStarletteApplication(\n agent_card=agent_card, http_handler=request_handler\n )\n\n logger.info(f\"Starting server on {host}:{port}\")\n return server.build()\n except Exception as e:\n logger.error(f\"An error occurred during server startup: {e}\")\n exit(1)\n\n\ndef run_server(host: Optional[str] = \"localhost\", port: Optional[int] = 10000):\n try:\n import uvicorn\n\n app = asyncio.run(main(host, port))\n config = uvicorn.Config(\n app=app, host=host, port=port, loop=\"asyncio\", proxy_headers=True\n )\n uvicorn.Server(config=config).run()\n logger.info(f\"Server started on {host}:{port}\")\n except Exception as e:\n logger.error(f\"An error occurred while starting the server: {e}\")\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments for host and port, with default values\n parser = argparse.ArgumentParser(description=\"Start Manus Agent service\")\n parser.add_argument(\n \"--host\",\n type=str,\n default=\"localhost\",\n help=\"Server host address, default is localhost\",\n )\n parser.add_argument(\n \"--port\", type=int, default=10000, help=\"Server port, default is 10000\"\n )\n args = parser.parse_args()\n # Start the server with the specified or default host and port\n run_server(args.host, args.port)\n"], ["/OpenManus/app/agent/react.py", "from abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.llm import LLM\nfrom app.schema import AgentState, Memory\n\n\nclass ReActAgent(BaseAgent, ABC):\n name: str\n description: Optional[str] = None\n\n system_prompt: Optional[str] = None\n next_step_prompt: Optional[str] = None\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n memory: Memory = Field(default_factory=Memory)\n state: AgentState = AgentState.IDLE\n\n max_steps: int = 10\n current_step: int = 0\n\n @abstractmethod\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action\"\"\"\n\n @abstractmethod\n async def act(self) -> str:\n \"\"\"Execute decided actions\"\"\"\n\n async def step(self) -> str:\n \"\"\"Execute a single step: think and act.\"\"\"\n should_act = await self.think()\n if not should_act:\n return \"Thinking complete - no action needed\"\n return await self.act()\n"], ["/OpenManus/protocol/a2a/app/agent_executor.py", "import logging\n\nfrom a2a.server.agent_execution import AgentExecutor, RequestContext\nfrom a2a.server.events import Event, EventQueue\nfrom a2a.server.tasks import TaskUpdater\nfrom a2a.types import (\n InvalidParamsError,\n Part,\n Task,\n TextPart,\n UnsupportedOperationError,\n)\nfrom a2a.utils import (\n completed_task,\n new_artifact,\n)\nfrom .agent import A2AManus\nfrom a2a.utils.errors import ServerError\nfrom typing import Callable, Awaitable\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ManusExecutor(AgentExecutor):\n \"\"\"Currency Conversion AgentExecutor Example.\"\"\"\n\n def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):\n self.agent_factory = agent_factory\n\n async def execute(\n self,\n context: RequestContext,\n event_queue: EventQueue,\n ) -> None:\n error = self._validate_request(context)\n if error:\n raise ServerError(error=InvalidParamsError())\n\n query = context.get_user_input()\n try:\n self.agent = await self.agent_factory()\n result = await self.agent.invoke(query, context.context_id)\n print(f\"Final Result ===> {result}\")\n except Exception as e:\n print(\"Error invoking agent: %s\", e)\n raise ServerError(error=ValueError(f\"Error invoking agent: {e}\")) from e\n parts = [\n Part(\n root=TextPart(\n text=(\n result[\"content\"]\n if result[\"content\"]\n else \"failed to generate response\"\n )\n ),\n )\n ]\n event_queue.enqueue_event(\n completed_task(\n context.task_id,\n context.context_id,\n [new_artifact(parts, f\"task_{context.task_id}\")],\n [context.message],\n )\n )\n\n def _validate_request(self, context: RequestContext) -> bool:\n return False\n\n async def cancel(\n self, request: RequestContext, event_queue: EventQueue\n ) -> Task | None:\n raise ServerError(error=UnsupportedOperationError())\n"], ["/OpenManus/run_flow.py", "import asyncio\nimport time\n\nfrom app.agent.data_analysis import DataAnalysis\nfrom app.agent.manus import Manus\nfrom app.config import config\nfrom app.flow.flow_factory import FlowFactory, FlowType\nfrom app.logger import logger\n\n\nasync def run_flow():\n agents = {\n \"manus\": Manus(),\n }\n if config.run_flow_config.use_data_analysis_agent:\n agents[\"data_analysis\"] = DataAnalysis()\n try:\n prompt = input(\"Enter your prompt: \")\n\n if prompt.strip().isspace() or not prompt:\n logger.warning(\"Empty prompt provided.\")\n return\n\n flow = FlowFactory.create_flow(\n flow_type=FlowType.PLANNING,\n agents=agents,\n )\n logger.warning(\"Processing your request...\")\n\n try:\n start_time = time.time()\n result = await asyncio.wait_for(\n flow.execute(prompt),\n timeout=3600, # 60 minute timeout for the entire execution\n )\n elapsed_time = time.time() - start_time\n logger.info(f\"Request processed in {elapsed_time:.2f} seconds\")\n logger.info(result)\n except asyncio.TimeoutError:\n logger.error(\"Request processing timed out after 1 hour\")\n logger.info(\n \"Operation terminated due to timeout. Please try a simpler request.\"\n )\n\n except KeyboardInterrupt:\n logger.info(\"Operation cancelled by user.\")\n except Exception as e:\n logger.error(f\"Error: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_flow())\n"], ["/OpenManus/app/flow/base.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import BaseModel\n\nfrom app.agent.base import BaseAgent\n\n\nclass BaseFlow(BaseModel, ABC):\n \"\"\"Base class for execution flows supporting multiple agents\"\"\"\n\n agents: Dict[str, BaseAgent]\n tools: Optional[List] = None\n primary_agent_key: Optional[str] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Handle different ways of providing agents\n if isinstance(agents, BaseAgent):\n agents_dict = {\"default\": agents}\n elif isinstance(agents, list):\n agents_dict = {f\"agent_{i}\": agent for i, agent in enumerate(agents)}\n else:\n agents_dict = agents\n\n # If primary agent not specified, use first agent\n primary_key = data.get(\"primary_agent_key\")\n if not primary_key and agents_dict:\n primary_key = next(iter(agents_dict))\n data[\"primary_agent_key\"] = primary_key\n\n # Set the agents dictionary\n data[\"agents\"] = agents_dict\n\n # Initialize using BaseModel's init\n super().__init__(**data)\n\n @property\n def primary_agent(self) -> Optional[BaseAgent]:\n \"\"\"Get the primary agent for the flow\"\"\"\n return self.agents.get(self.primary_agent_key)\n\n def get_agent(self, key: str) -> Optional[BaseAgent]:\n \"\"\"Get a specific agent by key\"\"\"\n return self.agents.get(key)\n\n def add_agent(self, key: str, agent: BaseAgent) -> None:\n \"\"\"Add a new agent to the flow\"\"\"\n self.agents[key] = agent\n\n @abstractmethod\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the flow with given input\"\"\"\n"], ["/OpenManus/app/tool/python_execute.py", "import multiprocessing\nimport sys\nfrom io import StringIO\nfrom typing import Dict\n\nfrom app.tool.base import BaseTool\n\n\nclass PythonExecute(BaseTool):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n \"description\": \"The Python code to execute.\",\n },\n },\n \"required\": [\"code\"],\n }\n\n def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:\n original_stdout = sys.stdout\n try:\n output_buffer = StringIO()\n sys.stdout = output_buffer\n exec(code, safe_globals, safe_globals)\n result_dict[\"observation\"] = output_buffer.getvalue()\n result_dict[\"success\"] = True\n except Exception as e:\n result_dict[\"observation\"] = str(e)\n result_dict[\"success\"] = False\n finally:\n sys.stdout = original_stdout\n\n async def execute(\n self,\n code: str,\n timeout: int = 5,\n ) -> Dict:\n \"\"\"\n Executes the provided Python code with a timeout.\n\n Args:\n code (str): The Python code to execute.\n timeout (int): Execution timeout in seconds.\n\n Returns:\n Dict: Contains 'output' with execution output or error message and 'success' status.\n \"\"\"\n\n with multiprocessing.Manager() as manager:\n result = manager.dict({\"observation\": \"\", \"success\": False})\n if isinstance(__builtins__, dict):\n safe_globals = {\"__builtins__\": __builtins__}\n else:\n safe_globals = {\"__builtins__\": __builtins__.__dict__.copy()}\n proc = multiprocessing.Process(\n target=self._run_code, args=(code, result, safe_globals)\n )\n proc.start()\n proc.join(timeout)\n\n # timeout process\n if proc.is_alive():\n proc.terminate()\n proc.join(1)\n return {\n \"observation\": f\"Execution timeout after {timeout} seconds\",\n \"success\": False,\n }\n return dict(result)\n"], ["/OpenManus/app/tool/search/baidu_search.py", "from typing import List\n\nfrom baidusearch.baidusearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass BaiduSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Baidu search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results)\n\n # Convert raw results to SearchItem format\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(title=f\"Baidu Result {i+1}\", url=item, description=None)\n )\n elif isinstance(item, dict):\n # If it's a dictionary with details\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"Baidu Result {i+1}\"),\n url=item.get(\"url\", \"\"),\n description=item.get(\"abstract\", None),\n )\n )\n else:\n # Try to get attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"Baidu Result {i+1}\"),\n url=getattr(item, \"url\", \"\"),\n description=getattr(item, \"abstract\", None),\n )\n )\n except Exception:\n # Fallback to a basic result\n results.append(\n SearchItem(\n title=f\"Baidu Result {i+1}\", url=str(item), description=None\n )\n )\n\n return results\n"], ["/OpenManus/app/tool/search/duckduckgo_search.py", "from typing import List\n\nfrom duckduckgo_search import DDGS\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass DuckDuckGoSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n DuckDuckGo search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = DDGS().text(query, max_results=num_results)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\", url=item, description=None\n )\n )\n elif isinstance(item, dict):\n # Extract data from the dictionary\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=item.get(\"href\", \"\"),\n description=item.get(\"body\", None),\n )\n )\n else:\n # Try to extract attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=getattr(item, \"href\", \"\"),\n description=getattr(item, \"body\", None),\n )\n )\n except Exception:\n # Fallback\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\",\n url=str(item),\n description=None,\n )\n )\n\n return results\n"], ["/OpenManus/app/sandbox/client.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, Optional, Protocol\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxFileOperations(Protocol):\n \"\"\"Protocol for sandbox file operations.\"\"\"\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n \"\"\"\n ...\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n \"\"\"\n ...\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file content from container.\n\n Args:\n path: File path in container.\n\n Returns:\n str: File content.\n \"\"\"\n ...\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to file in container.\n\n Args:\n path: File path in container.\n content: Content to write.\n \"\"\"\n ...\n\n\nclass BaseSandboxClient(ABC):\n \"\"\"Base sandbox client interface.\"\"\"\n\n @abstractmethod\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates sandbox.\"\"\"\n\n @abstractmethod\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes command.\"\"\"\n\n @abstractmethod\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container.\"\"\"\n\n @abstractmethod\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file to container.\"\"\"\n\n @abstractmethod\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file.\"\"\"\n\n @abstractmethod\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file.\"\"\"\n\n @abstractmethod\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n\n\nclass LocalSandboxClient(BaseSandboxClient):\n \"\"\"Local sandbox client implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes local sandbox client.\"\"\"\n self.sandbox: Optional[DockerSandbox] = None\n\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates a sandbox.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mappings.\n\n Raises:\n RuntimeError: If sandbox creation fails.\n \"\"\"\n self.sandbox = DockerSandbox(config, volume_bindings)\n await self.sandbox.create()\n\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs command in sandbox.\n\n Args:\n command: Command to execute.\n timeout: Execution timeout in seconds.\n\n Returns:\n Command output.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.run_command(command, timeout)\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_from(container_path, local_path)\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_to(local_path, container_path)\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file from container.\n\n Args:\n path: File path in container.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.read_file(path)\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file to container.\n\n Args:\n path: File path in container.\n content: File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.write_file(path, content)\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n if self.sandbox:\n await self.sandbox.cleanup()\n self.sandbox = None\n\n\ndef create_sandbox_client() -> LocalSandboxClient:\n \"\"\"Creates a sandbox client.\n\n Returns:\n LocalSandboxClient: Sandbox client instance.\n \"\"\"\n return LocalSandboxClient()\n\n\nSANDBOX_CLIENT = create_sandbox_client()\n"], ["/OpenManus/protocol/a2a/app/agent.py", "import httpx\nfrom typing import Any, Dict, AsyncIterable, Literal, List, ClassVar\nfrom pydantic import BaseModel\nfrom app.agent.manus import Manus\n\n\nclass ResponseFormat(BaseModel):\n \"\"\"Respond to the user in this format.\"\"\"\n\n status: Literal[\"input_required\", \"completed\", \"error\"] = \"input_required\"\n message: str\n\n\nclass A2AManus(Manus):\n\n async def invoke(self, query, sessionId) -> str:\n config = {\"configurable\": {\"thread_id\": sessionId}}\n response = await self.run(query)\n return self.get_agent_response(config, response)\n\n async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:\n \"\"\"Streaming is not supported by Manus.\"\"\"\n raise NotImplementedError(\"Streaming is not supported by Manus yet.\")\n\n def get_agent_response(self, config, agent_response):\n return {\n \"is_task_complete\": True,\n \"require_user_input\": False,\n \"content\": agent_response,\n }\n\n SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = [\"text\", \"text/plain\"]\n"], ["/OpenManus/app/tool/search/base.py", "from typing import List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass SearchItem(BaseModel):\n \"\"\"Represents a single search result item\"\"\"\n\n title: str = Field(description=\"The title of the search result\")\n url: str = Field(description=\"The URL of the search result\")\n description: Optional[str] = Field(\n default=None, description=\"A description or snippet of the search result\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result item.\"\"\"\n return f\"{self.title} - {self.url}\"\n\n\nclass WebSearchEngine(BaseModel):\n \"\"\"Base class for web search engines.\"\"\"\n\n model_config = {\"arbitrary_types_allowed\": True}\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Perform a web search and return a list of search items.\n\n Args:\n query (str): The search query to submit to the search engine.\n num_results (int, optional): The number of search results to return. Default is 10.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n List[SearchItem]: A list of SearchItem objects matching the search query.\n \"\"\"\n raise NotImplementedError\n"], ["/OpenManus/main.py", "import argparse\nimport asyncio\n\nfrom app.agent.manus import Manus\nfrom app.logger import logger\n\n\nasync def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Run Manus agent with a prompt\")\n parser.add_argument(\n \"--prompt\", type=str, required=False, help=\"Input prompt for the agent\"\n )\n args = parser.parse_args()\n\n # Create and initialize Manus agent\n agent = await Manus.create()\n try:\n # Use command line prompt if provided, otherwise ask for input\n prompt = args.prompt if args.prompt else input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await agent.run(prompt)\n logger.info(\"Request processing completed.\")\n except KeyboardInterrupt:\n logger.warning(\"Operation interrupted.\")\n finally:\n # Ensure agent resources are cleaned up before exiting\n await agent.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n"], ["/OpenManus/app/tool/chart_visualization/python_execute.py", "from app.config import config\nfrom app.tool.python_execute import PythonExecute\n\n\nclass NormalPythonExecute(PythonExecute):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"\"\"Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, data process / data report / others\",\n \"type\": \"string\",\n \"default\": \"process\",\n \"enum\": [\"process\", \"report\", \"others\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code to execute.\n# Note\n1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights.\n2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also\n3. Save any report / processed files / each analysis result in worksapce directory: {directory}\n4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization.\n5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also\"\"\".format(\n directory=config.workspace_root\n ),\n },\n },\n \"required\": [\"code\"],\n }\n\n async def execute(self, code: str, code_type: str | None = None, timeout=5):\n return await super().execute(code, timeout)\n"], ["/OpenManus/app/tool/search/google_search.py", "from typing import List\n\nfrom googlesearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass GoogleSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Google search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results, advanced=True)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n {\"title\": f\"Google Result {i+1}\", \"url\": item, \"description\": \"\"}\n )\n else:\n results.append(\n SearchItem(\n title=item.title, url=item.url, description=item.description\n )\n )\n\n return results\n"], ["/OpenManus/app/flow/flow_factory.py", "from enum import Enum\nfrom typing import Dict, List, Union\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.flow.planning import PlanningFlow\n\n\nclass FlowType(str, Enum):\n PLANNING = \"planning\"\n\n\nclass FlowFactory:\n \"\"\"Factory for creating different types of flows with support for multiple agents\"\"\"\n\n @staticmethod\n def create_flow(\n flow_type: FlowType,\n agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]],\n **kwargs,\n ) -> BaseFlow:\n flows = {\n FlowType.PLANNING: PlanningFlow,\n }\n\n flow_class = flows.get(flow_type)\n if not flow_class:\n raise ValueError(f\"Unknown flow type: {flow_type}\")\n\n return flow_class(agents, **kwargs)\n"], ["/OpenManus/app/tool/__init__.py", "from app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.create_chat_completion import CreateChatCompletion\nfrom app.tool.planning import PlanningTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\nfrom app.tool.tool_collection import ToolCollection\nfrom app.tool.web_search import WebSearch\nfrom app.tool.crawl4ai import Crawl4aiTool\n\n\n__all__ = [\n \"BaseTool\",\n \"Bash\",\n \"BrowserUseTool\",\n \"Terminate\",\n \"StrReplaceEditor\",\n \"WebSearch\",\n \"ToolCollection\",\n \"CreateChatCompletion\",\n \"PlanningTool\",\n \"Crawl4aiTool\"\n]\n"], ["/OpenManus/app/tool/terminate.py", "from app.tool.base import BaseTool\n\n\n_TERMINATE_DESCRIPTION = \"\"\"Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task.\nWhen you have finished all the tasks, call this tool to end the work.\"\"\"\n\n\nclass Terminate(BaseTool):\n name: str = \"terminate\"\n description: str = _TERMINATE_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"status\": {\n \"type\": \"string\",\n \"description\": \"The finish status of the interaction.\",\n \"enum\": [\"success\", \"failure\"],\n }\n },\n \"required\": [\"status\"],\n }\n\n async def execute(self, status: str) -> str:\n \"\"\"Finish the current execution\"\"\"\n return f\"The interaction has been completed with status: {status}\"\n"], ["/OpenManus/app/tool/ask_human.py", "from app.tool import BaseTool\n\n\nclass AskHuman(BaseTool):\n \"\"\"Add a tool to ask human for help.\"\"\"\n\n name: str = \"ask_human\"\n description: str = \"Use this tool to ask human for help.\"\n parameters: str = {\n \"type\": \"object\",\n \"properties\": {\n \"inquire\": {\n \"type\": \"string\",\n \"description\": \"The question you want to ask human.\",\n }\n },\n \"required\": [\"inquire\"],\n }\n\n async def execute(self, inquire: str) -> str:\n return input(f\"\"\"Bot: {inquire}\\n\\nYou: \"\"\").strip()\n"], ["/OpenManus/app/tool/chart_visualization/chart_prepare.py", "from app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass VisualizationPrepare(NormalPythonExecute):\n \"\"\"A tool for Chart Generation Preparation\"\"\"\n\n name: str = \"visualization_preparation\"\n description: str = \"Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional).\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, visualization: csv -> chart; insight: choose insight into chart\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code for data_visualization prepare.\n## Visualization Type\n1. Data loading logic\n2. Csv Data and chart description generate\n2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv)\n2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.)\n3. Save information in json file.( format: {\"csvFilePath\": string, \"chartTitle\": string}[])\n## Insight Type\n1. Select the insights from the data_visualization results that you want to add to the chart.\n2. Save information in json file.( format: {\"chartPath\": string, \"insights_id\": number[]}[])\n# Note\n1. You can generate one or multiple csv data with different visualization needs.\n2. Make each chart data esay, clean and different.\n3. Json file saving in utf-8 with path print: print(json_path)\n\"\"\",\n },\n },\n \"required\": [\"code\", \"code_type\"],\n }\n"], ["/OpenManus/app/agent/__init__.py", "from app.agent.base import BaseAgent\nfrom app.agent.browser import BrowserAgent\nfrom app.agent.mcp import MCPAgent\nfrom app.agent.react import ReActAgent\nfrom app.agent.swe import SWEAgent\nfrom app.agent.toolcall import ToolCallAgent\n\n\n__all__ = [\n \"BaseAgent\",\n \"BrowserAgent\",\n \"ReActAgent\",\n \"SWEAgent\",\n \"ToolCallAgent\",\n \"MCPAgent\",\n]\n"], ["/OpenManus/app/prompt/mcp.py", "\"\"\"Prompts for the MCP Agent.\"\"\"\n\nSYSTEM_PROMPT = \"\"\"You are an AI assistant with access to a Model Context Protocol (MCP) server.\nYou can use the tools provided by the MCP server to complete tasks.\nThe MCP server will dynamically expose tools that you can use - always check the available tools first.\n\nWhen using an MCP tool:\n1. Choose the appropriate tool based on your task requirements\n2. Provide properly formatted arguments as required by the tool\n3. Observe the results and use them to determine next steps\n4. Tools may change during operation - new tools might appear or existing ones might disappear\n\nFollow these guidelines:\n- Call tools with valid parameters as documented in their schemas\n- Handle errors gracefully by understanding what went wrong and trying again with corrected parameters\n- For multimedia responses (like images), you'll receive a description of the content\n- Complete user requests step by step, using the most appropriate tools\n- If multiple tools need to be called in sequence, make one call at a time and wait for results\n\nRemember to clearly explain your reasoning and actions to the user.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on the current state and available tools, what should be done next?\nThink step by step about the problem and identify which MCP tool would be most helpful for the current stage.\nIf you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.\n\"\"\"\n\n# Additional specialized prompts\nTOOL_ERROR_PROMPT = \"\"\"You encountered an error with the tool '{tool_name}'.\nTry to understand what went wrong and correct your approach.\nCommon issues include:\n- Missing or incorrect parameters\n- Invalid parameter formats\n- Using a tool that's no longer available\n- Attempting an operation that's not supported\n\nPlease check the tool specifications and try again with corrected parameters.\n\"\"\"\n\nMULTIMEDIA_RESPONSE_PROMPT = \"\"\"You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.\nThis content has been processed and described for you.\nUse this information to continue the task or provide insights to the user.\n\"\"\"\n"], ["/OpenManus/app/__init__.py", "# Python version check: 3.11-3.13\nimport sys\n\n\nif sys.version_info < (3, 11) or sys.version_info > (3, 13):\n print(\n \"Warning: Unsupported Python version {ver}, please use 3.11-3.13\".format(\n ver=\".\".join(map(str, sys.version_info))\n )\n )\n"], ["/OpenManus/app/prompt/planning.py", "PLANNING_SYSTEM_PROMPT = \"\"\"\nYou are an expert Planning Agent tasked with solving problems efficiently through structured plans.\nYour job is:\n1. Analyze requests to understand the task scope\n2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool\n3. Execute steps using available tools as needed\n4. Track progress and adapt plans when necessary\n5. Use `finish` to conclude immediately when the task is complete\n\n\nAvailable tools will vary by task but may include:\n- `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.)\n- `finish`: End the task when complete\nBreak tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps.\nThink about dependencies and verification methods.\nKnow when to conclude - don't continue thinking once objectives are met.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on the current state, what's your next action?\nChoose the most efficient path forward:\n1. Is the plan sufficient, or does it need refinement?\n2. Can you execute the next step immediately?\n3. Is the task complete? If so, use `finish` right away.\n\nBe concise in your reasoning, then select the appropriate tool or action.\n\"\"\"\n"], ["/OpenManus/app/prompt/visualization.py", "SYSTEM_PROMPT = \"\"\"You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests.\n# Note:\n1. The workspace directory is: {directory}; Read / write file in workspace\n2. Generate analysis conclusion report in the end\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on user needs, break down the problem and use different tools step by step to solve it.\n# Note\n1. Each step select the most appropriate tool proactively (ONLY ONE).\n2. After using each tool, clearly explain the execution results and suggest the next steps.\n3. When observation with Error, review and fix it.\"\"\"\n"], ["/OpenManus/app/prompt/manus.py", "SYSTEM_PROMPT = (\n \"You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all.\"\n \"The initial directory is: {directory}\"\n)\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/prompt/swe.py", "SYSTEM_PROMPT = \"\"\"SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.\n\nThe special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time.\nIn addition to typical bash commands, you can also use specific commands to help you navigate and edit files.\nTo call a command, you need to invoke it with a function call/tool call.\n\nPlease note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.\nIf you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.\n\nRESPONSE FORMAT:\nYour shell prompt is formatted as follows:\n(Open file: )\n(Current directory: )\nbash-$\n\nFirst, you should _always_ include a general thought about what you're going to do next.\nThen, for every response, you must include exactly _ONE_ tool call/function call.\n\nRemember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference.\nIf you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call.\nNote that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them.\n\"\"\"\n"], ["/OpenManus/app/prompt/toolcall.py", "SYSTEM_PROMPT = \"You are an agent that can execute tool calls\"\n\nNEXT_STEP_PROMPT = (\n \"If you want to stop interaction, use `terminate` tool/function call.\"\n)\n"], ["/OpenManus/app/exceptions.py", "class ToolError(Exception):\n \"\"\"Raised when a tool encounters an error.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n\nclass OpenManusError(Exception):\n \"\"\"Base exception for all OpenManus errors\"\"\"\n\n\nclass TokenLimitExceeded(OpenManusError):\n \"\"\"Exception raised when the token limit is exceeded\"\"\"\n"], ["/OpenManus/run_mcp_server.py", "# coding: utf-8\n# A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues.\nfrom app.mcp.server import MCPServer, parse_args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/sandbox/__init__.py", "\"\"\"\nDocker Sandbox Module\n\nProvides secure containerized execution environment with resource limits\nand isolation for running untrusted code.\n\"\"\"\nfrom app.sandbox.client import (\n BaseSandboxClient,\n LocalSandboxClient,\n create_sandbox_client,\n)\nfrom app.sandbox.core.exceptions import (\n SandboxError,\n SandboxResourceError,\n SandboxTimeoutError,\n)\nfrom app.sandbox.core.manager import SandboxManager\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\n__all__ = [\n \"DockerSandbox\",\n \"SandboxManager\",\n \"BaseSandboxClient\",\n \"LocalSandboxClient\",\n \"create_sandbox_client\",\n \"SandboxError\",\n \"SandboxTimeoutError\",\n \"SandboxResourceError\",\n]\n"], ["/OpenManus/app/tool/search/__init__.py", "from app.tool.search.baidu_search import BaiduSearchEngine\nfrom app.tool.search.base import WebSearchEngine\nfrom app.tool.search.bing_search import BingSearchEngine\nfrom app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine\nfrom app.tool.search.google_search import GoogleSearchEngine\n\n\n__all__ = [\n \"WebSearchEngine\",\n \"BaiduSearchEngine\",\n \"DuckDuckGoSearchEngine\",\n \"GoogleSearchEngine\",\n \"BingSearchEngine\",\n]\n"], ["/OpenManus/app/tool/chart_visualization/__init__.py", "from app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\n__all__ = [\"DataVisualization\", \"VisualizationPrepare\", \"NormalPythonExecute\"]\n"], ["/OpenManus/app/sandbox/core/exceptions.py", "\"\"\"Exception classes for the sandbox system.\n\nThis module defines custom exceptions used throughout the sandbox system to\nhandle various error conditions in a structured way.\n\"\"\"\n\n\nclass SandboxError(Exception):\n \"\"\"Base exception for sandbox-related errors.\"\"\"\n\n\nclass SandboxTimeoutError(SandboxError):\n \"\"\"Exception raised when a sandbox operation times out.\"\"\"\n\n\nclass SandboxResourceError(SandboxError):\n \"\"\"Exception raised for resource-related errors.\"\"\"\n"], ["/OpenManus/examples/benchmarks/__init__.py", "\"\"\"\nOpenManus benchmark system for standardized agent evaluation.\n\"\"\"\n"], ["/OpenManus/app/mcp/__init__.py", ""], ["/OpenManus/app/flow/__init__.py", ""], ["/OpenManus/app/prompt/__init__.py", ""], ["/OpenManus/protocol/a2a/__init__.py", ""], ["/OpenManus/protocol/a2a/app/__init__.py", ""]], "task_instance_info": {"created_time": "2025-08-20 15:01:29", "created_task_model": "DeepSeek-R1", "class_skeleton": "class BrowserAgent(ToolCallAgent):\n default_factory=lambda: \n pass\n def initialize_helper(self) -> \"BrowserAgent\":\n pass", "code_description": "This code segment functions to: Establish a specialized agent configuration for browser control operations by integrating tool management with context-aware processing workflows, including dynamic prompt generation and resource lifecycle handling."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass BrowserAgent(ToolCallAgent):\n default_factory = lambda: BrowserAgent(\n system_prompt=SYSTEM_PROMPT,\n next_step_prompt=NEXT_STEP_PROMPT,\n tools=ToolCollection([BrowserUseTool(), Terminate()]),\n tool_choice=ToolChoice.AUTO,\n max_auto_attempts=5\n )\n \n def initialize_helper(self) -> \"BrowserAgent\":\n self.helper = BrowserContextHelper(self)\n return self\n[TASK_END]\n```", "inference_time": "2025-08-20 15-01-42"}, "editdistance_info": {"edit_distance": 23.4043, "calculate_time": "2025-08-20 15:01:42", "true_code_clean": "class BrowserAgent(ToolCallAgent):\n name: str = \"browser\"\n description: str = \"A browser agent that can control a browser to accomplish tasks\"\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n max_observe: int = 10000\n max_steps: int = 20\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())\n )\n tool_choices: ToolChoice = ToolChoice.AUTO\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"BrowserAgent\":\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n async def think(self) -> bool:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n return await super().think()\n async def cleanup(self):\n await self.browser_context_helper.cleanup_browser()", "predict_code_clean": "class BrowserAgent(ToolCallAgent):\n default_factory = lambda: BrowserAgent(\n system_prompt=SYSTEM_PROMPT,\n next_step_prompt=NEXT_STEP_PROMPT,\n tools=ToolCollection([BrowserUseTool(), Terminate()]),\n tool_choice=ToolChoice.AUTO,\n max_auto_attempts=5\n )\n def initialize_helper(self) -> \"BrowserAgent\":\n self.helper = BrowserContextHelper(self)\n return self"}} {"repo_name": "OpenManus", "file_name": "/OpenManus/run_mcp.py", "inference_info": {"prefix_code": "#!/usr/bin/env python\nimport argparse\nimport asyncio\nimport sys\n\nfrom app.agent.mcp import MCPAgent\nfrom app.config import config\nfrom app.logger import logger\n\n\n", "suffix_code": "\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Run the MCP Agent\")\n parser.add_argument(\n \"--connection\",\n \"-c\",\n choices=[\"stdio\", \"sse\"],\n default=\"stdio\",\n help=\"Connection type: stdio or sse\",\n )\n parser.add_argument(\n \"--server-url\",\n default=\"http://127.0.0.1:8000/sse\",\n help=\"URL for SSE connection\",\n )\n parser.add_argument(\n \"--interactive\", \"-i\", action=\"store_true\", help=\"Run in interactive mode\"\n )\n parser.add_argument(\"--prompt\", \"-p\", help=\"Single prompt to execute and exit\")\n return parser.parse_args()\n\n\nasync def run_mcp() -> None:\n \"\"\"Main entry point for the MCP runner.\"\"\"\n args = parse_args()\n runner = MCPRunner()\n\n try:\n await runner.initialize(args.connection, args.server_url)\n\n if args.prompt:\n await runner.run_single_prompt(args.prompt)\n elif args.interactive:\n await runner.run_interactive()\n else:\n await runner.run_default()\n\n except KeyboardInterrupt:\n logger.info(\"Program interrupted by user\")\n except Exception as e:\n logger.error(f\"Error running MCPAgent: {str(e)}\", exc_info=True)\n sys.exit(1)\n finally:\n await runner.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_mcp())\n", "middle_code": "class MCPRunner:\n def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()\n async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: \n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n logger.info(f\"Connected to MCP server via {connection_type}\")\n async def run_interactive(self) -> None:\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")\n async def run_single_prompt(self, prompt: str) -> None:\n await self.agent.run(prompt)\n async def run_default(self) -> None:\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")\n async def cleanup(self) -> None:\n await self.agent.cleanup()\n logger.info(\"Session ended\")", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/OpenManus/app/config.py", "import json\nimport threading\nimport tomllib\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\ndef get_project_root() -> Path:\n \"\"\"Get the project root directory\"\"\"\n return Path(__file__).resolve().parent.parent\n\n\nPROJECT_ROOT = get_project_root()\nWORKSPACE_ROOT = PROJECT_ROOT / \"workspace\"\n\n\nclass LLMSettings(BaseModel):\n model: str = Field(..., description=\"Model name\")\n base_url: str = Field(..., description=\"API base URL\")\n api_key: str = Field(..., description=\"API key\")\n max_tokens: int = Field(4096, description=\"Maximum number of tokens per request\")\n max_input_tokens: Optional[int] = Field(\n None,\n description=\"Maximum input tokens to use across all requests (None for unlimited)\",\n )\n temperature: float = Field(1.0, description=\"Sampling temperature\")\n api_type: str = Field(..., description=\"Azure, Openai, or Ollama\")\n api_version: str = Field(..., description=\"Azure Openai version if AzureOpenai\")\n\n\nclass ProxySettings(BaseModel):\n server: str = Field(None, description=\"Proxy server address\")\n username: Optional[str] = Field(None, description=\"Proxy username\")\n password: Optional[str] = Field(None, description=\"Proxy password\")\n\n\nclass SearchSettings(BaseModel):\n engine: str = Field(default=\"Google\", description=\"Search engine the llm to use\")\n fallback_engines: List[str] = Field(\n default_factory=lambda: [\"DuckDuckGo\", \"Baidu\", \"Bing\"],\n description=\"Fallback search engines to try if the primary engine fails\",\n )\n retry_delay: int = Field(\n default=60,\n description=\"Seconds to wait before retrying all engines again after they all fail\",\n )\n max_retries: int = Field(\n default=3,\n description=\"Maximum number of times to retry all engines when all fail\",\n )\n lang: str = Field(\n default=\"en\",\n description=\"Language code for search results (e.g., en, zh, fr)\",\n )\n country: str = Field(\n default=\"us\",\n description=\"Country code for search results (e.g., us, cn, uk)\",\n )\n\n\nclass RunflowSettings(BaseModel):\n use_data_analysis_agent: bool = Field(\n default=False, description=\"Enable data analysis agent in run flow\"\n )\n\n\nclass BrowserSettings(BaseModel):\n headless: bool = Field(False, description=\"Whether to run browser in headless mode\")\n disable_security: bool = Field(\n True, description=\"Disable browser security features\"\n )\n extra_chromium_args: List[str] = Field(\n default_factory=list, description=\"Extra arguments to pass to the browser\"\n )\n chrome_instance_path: Optional[str] = Field(\n None, description=\"Path to a Chrome instance to use\"\n )\n wss_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via WebSocket\"\n )\n cdp_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via CDP\"\n )\n proxy: Optional[ProxySettings] = Field(\n None, description=\"Proxy settings for the browser\"\n )\n max_content_length: int = Field(\n 2000, description=\"Maximum length for content retrieval operations\"\n )\n\n\nclass SandboxSettings(BaseModel):\n \"\"\"Configuration for the execution sandbox\"\"\"\n\n use_sandbox: bool = Field(False, description=\"Whether to use the sandbox\")\n image: str = Field(\"python:3.12-slim\", description=\"Base image\")\n work_dir: str = Field(\"/workspace\", description=\"Container working directory\")\n memory_limit: str = Field(\"512m\", description=\"Memory limit\")\n cpu_limit: float = Field(1.0, description=\"CPU limit\")\n timeout: int = Field(300, description=\"Default command timeout (seconds)\")\n network_enabled: bool = Field(\n False, description=\"Whether network access is allowed\"\n )\n\n\nclass MCPServerConfig(BaseModel):\n \"\"\"Configuration for a single MCP server\"\"\"\n\n type: str = Field(..., description=\"Server connection type (sse or stdio)\")\n url: Optional[str] = Field(None, description=\"Server URL for SSE connections\")\n command: Optional[str] = Field(None, description=\"Command for stdio connections\")\n args: List[str] = Field(\n default_factory=list, description=\"Arguments for stdio command\"\n )\n\n\nclass MCPSettings(BaseModel):\n \"\"\"Configuration for MCP (Model Context Protocol)\"\"\"\n\n server_reference: str = Field(\n \"app.mcp.server\", description=\"Module reference for the MCP server\"\n )\n servers: Dict[str, MCPServerConfig] = Field(\n default_factory=dict, description=\"MCP server configurations\"\n )\n\n @classmethod\n def load_server_config(cls) -> Dict[str, MCPServerConfig]:\n \"\"\"Load MCP server configuration from JSON file\"\"\"\n config_path = PROJECT_ROOT / \"config\" / \"mcp.json\"\n\n try:\n config_file = config_path if config_path.exists() else None\n if not config_file:\n return {}\n\n with config_file.open() as f:\n data = json.load(f)\n servers = {}\n\n for server_id, server_config in data.get(\"mcpServers\", {}).items():\n servers[server_id] = MCPServerConfig(\n type=server_config[\"type\"],\n url=server_config.get(\"url\"),\n command=server_config.get(\"command\"),\n args=server_config.get(\"args\", []),\n )\n return servers\n except Exception as e:\n raise ValueError(f\"Failed to load MCP server config: {e}\")\n\n\nclass AppConfig(BaseModel):\n llm: Dict[str, LLMSettings]\n sandbox: Optional[SandboxSettings] = Field(\n None, description=\"Sandbox configuration\"\n )\n browser_config: Optional[BrowserSettings] = Field(\n None, description=\"Browser configuration\"\n )\n search_config: Optional[SearchSettings] = Field(\n None, description=\"Search configuration\"\n )\n mcp_config: Optional[MCPSettings] = Field(None, description=\"MCP configuration\")\n run_flow_config: Optional[RunflowSettings] = Field(\n None, description=\"Run flow configuration\"\n )\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass Config:\n _instance = None\n _lock = threading.Lock()\n _initialized = False\n\n def __new__(cls):\n if cls._instance is None:\n with cls._lock:\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n if not self._initialized:\n with self._lock:\n if not self._initialized:\n self._config = None\n self._load_initial_config()\n self._initialized = True\n\n @staticmethod\n def _get_config_path() -> Path:\n root = PROJECT_ROOT\n config_path = root / \"config\" / \"config.toml\"\n if config_path.exists():\n return config_path\n example_path = root / \"config\" / \"config.example.toml\"\n if example_path.exists():\n return example_path\n raise FileNotFoundError(\"No configuration file found in config directory\")\n\n def _load_config(self) -> dict:\n config_path = self._get_config_path()\n with config_path.open(\"rb\") as f:\n return tomllib.load(f)\n\n def _load_initial_config(self):\n raw_config = self._load_config()\n base_llm = raw_config.get(\"llm\", {})\n llm_overrides = {\n k: v for k, v in raw_config.get(\"llm\", {}).items() if isinstance(v, dict)\n }\n\n default_settings = {\n \"model\": base_llm.get(\"model\"),\n \"base_url\": base_llm.get(\"base_url\"),\n \"api_key\": base_llm.get(\"api_key\"),\n \"max_tokens\": base_llm.get(\"max_tokens\", 4096),\n \"max_input_tokens\": base_llm.get(\"max_input_tokens\"),\n \"temperature\": base_llm.get(\"temperature\", 1.0),\n \"api_type\": base_llm.get(\"api_type\", \"\"),\n \"api_version\": base_llm.get(\"api_version\", \"\"),\n }\n\n # handle browser config.\n browser_config = raw_config.get(\"browser\", {})\n browser_settings = None\n\n if browser_config:\n # handle proxy settings.\n proxy_config = browser_config.get(\"proxy\", {})\n proxy_settings = None\n\n if proxy_config and proxy_config.get(\"server\"):\n proxy_settings = ProxySettings(\n **{\n k: v\n for k, v in proxy_config.items()\n if k in [\"server\", \"username\", \"password\"] and v\n }\n )\n\n # filter valid browser config parameters.\n valid_browser_params = {\n k: v\n for k, v in browser_config.items()\n if k in BrowserSettings.__annotations__ and v is not None\n }\n\n # if there is proxy settings, add it to the parameters.\n if proxy_settings:\n valid_browser_params[\"proxy\"] = proxy_settings\n\n # only create BrowserSettings when there are valid parameters.\n if valid_browser_params:\n browser_settings = BrowserSettings(**valid_browser_params)\n\n search_config = raw_config.get(\"search\", {})\n search_settings = None\n if search_config:\n search_settings = SearchSettings(**search_config)\n sandbox_config = raw_config.get(\"sandbox\", {})\n if sandbox_config:\n sandbox_settings = SandboxSettings(**sandbox_config)\n else:\n sandbox_settings = SandboxSettings()\n\n mcp_config = raw_config.get(\"mcp\", {})\n mcp_settings = None\n if mcp_config:\n # Load server configurations from JSON\n mcp_config[\"servers\"] = MCPSettings.load_server_config()\n mcp_settings = MCPSettings(**mcp_config)\n else:\n mcp_settings = MCPSettings(servers=MCPSettings.load_server_config())\n\n run_flow_config = raw_config.get(\"runflow\")\n if run_flow_config:\n run_flow_settings = RunflowSettings(**run_flow_config)\n else:\n run_flow_settings = RunflowSettings()\n config_dict = {\n \"llm\": {\n \"default\": default_settings,\n **{\n name: {**default_settings, **override_config}\n for name, override_config in llm_overrides.items()\n },\n },\n \"sandbox\": sandbox_settings,\n \"browser_config\": browser_settings,\n \"search_config\": search_settings,\n \"mcp_config\": mcp_settings,\n \"run_flow_config\": run_flow_settings,\n }\n\n self._config = AppConfig(**config_dict)\n\n @property\n def llm(self) -> Dict[str, LLMSettings]:\n return self._config.llm\n\n @property\n def sandbox(self) -> SandboxSettings:\n return self._config.sandbox\n\n @property\n def browser_config(self) -> Optional[BrowserSettings]:\n return self._config.browser_config\n\n @property\n def search_config(self) -> Optional[SearchSettings]:\n return self._config.search_config\n\n @property\n def mcp_config(self) -> MCPSettings:\n \"\"\"Get the MCP configuration\"\"\"\n return self._config.mcp_config\n\n @property\n def run_flow_config(self) -> RunflowSettings:\n \"\"\"Get the Run Flow configuration\"\"\"\n return self._config.run_flow_config\n\n @property\n def workspace_root(self) -> Path:\n \"\"\"Get the workspace root directory\"\"\"\n return WORKSPACE_ROOT\n\n @property\n def root_path(self) -> Path:\n \"\"\"Get the root path of the application\"\"\"\n return PROJECT_ROOT\n\n\nconfig = Config()\n"], ["/OpenManus/app/logger.py", "import sys\nfrom datetime import datetime\n\nfrom loguru import logger as _logger\n\nfrom app.config import PROJECT_ROOT\n\n\n_print_level = \"INFO\"\n\n\ndef define_log_level(print_level=\"INFO\", logfile_level=\"DEBUG\", name: str = None):\n \"\"\"Adjust the log level to above level\"\"\"\n global _print_level\n _print_level = print_level\n\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%Y%m%d%H%M%S\")\n log_name = (\n f\"{name}_{formatted_date}\" if name else formatted_date\n ) # name a log with prefix name\n\n _logger.remove()\n _logger.add(sys.stderr, level=print_level)\n _logger.add(PROJECT_ROOT / f\"logs/{log_name}.log\", level=logfile_level)\n return _logger\n\n\nlogger = define_log_level()\n\n\nif __name__ == \"__main__\":\n logger.info(\"Starting application\")\n logger.debug(\"Debug message\")\n logger.warning(\"Warning message\")\n logger.error(\"Error message\")\n logger.critical(\"Critical message\")\n\n try:\n raise ValueError(\"Test error\")\n except Exception as e:\n logger.exception(f\"An error occurred: {e}\")\n"], ["/OpenManus/app/mcp/server.py", "import logging\nimport sys\n\n\nlogging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])\n\nimport argparse\nimport asyncio\nimport atexit\nimport json\nfrom inspect import Parameter, Signature\nfrom typing import Any, Dict, Optional\n\nfrom mcp.server.fastmcp import FastMCP\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\n\n\nclass MCPServer:\n \"\"\"MCP Server implementation with tool registration and management.\"\"\"\n\n def __init__(self, name: str = \"openmanus\"):\n self.server = FastMCP(name)\n self.tools: Dict[str, BaseTool] = {}\n\n # Initialize standard tools\n self.tools[\"bash\"] = Bash()\n self.tools[\"browser\"] = BrowserUseTool()\n self.tools[\"editor\"] = StrReplaceEditor()\n self.tools[\"terminate\"] = Terminate()\n\n def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:\n \"\"\"Register a tool with parameter validation and documentation.\"\"\"\n tool_name = method_name or tool.name\n tool_param = tool.to_param()\n tool_function = tool_param[\"function\"]\n\n # Define the async function to be registered\n async def tool_method(**kwargs):\n logger.info(f\"Executing {tool_name}: {kwargs}\")\n result = await tool.execute(**kwargs)\n\n logger.info(f\"Result of {tool_name}: {result}\")\n\n # Handle different types of results (match original logic)\n if hasattr(result, \"model_dump\"):\n return json.dumps(result.model_dump())\n elif isinstance(result, dict):\n return json.dumps(result)\n return result\n\n # Set method metadata\n tool_method.__name__ = tool_name\n tool_method.__doc__ = self._build_docstring(tool_function)\n tool_method.__signature__ = self._build_signature(tool_function)\n\n # Store parameter schema (important for tools that access it programmatically)\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n tool_method._parameter_schema = {\n param_name: {\n \"description\": param_details.get(\"description\", \"\"),\n \"type\": param_details.get(\"type\", \"any\"),\n \"required\": param_name in required_params,\n }\n for param_name, param_details in param_props.items()\n }\n\n # Register with server\n self.server.tool()(tool_method)\n logger.info(f\"Registered tool: {tool_name}\")\n\n def _build_docstring(self, tool_function: dict) -> str:\n \"\"\"Build a formatted docstring from tool function metadata.\"\"\"\n description = tool_function.get(\"description\", \"\")\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n # Build docstring (match original format)\n docstring = description\n if param_props:\n docstring += \"\\n\\nParameters:\\n\"\n for param_name, param_details in param_props.items():\n required_str = (\n \"(required)\" if param_name in required_params else \"(optional)\"\n )\n param_type = param_details.get(\"type\", \"any\")\n param_desc = param_details.get(\"description\", \"\")\n docstring += (\n f\" {param_name} ({param_type}) {required_str}: {param_desc}\\n\"\n )\n\n return docstring\n\n def _build_signature(self, tool_function: dict) -> Signature:\n \"\"\"Build a function signature from tool function metadata.\"\"\"\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n parameters = []\n\n # Follow original type mapping\n for param_name, param_details in param_props.items():\n param_type = param_details.get(\"type\", \"\")\n default = Parameter.empty if param_name in required_params else None\n\n # Map JSON Schema types to Python types (same as original)\n annotation = Any\n if param_type == \"string\":\n annotation = str\n elif param_type == \"integer\":\n annotation = int\n elif param_type == \"number\":\n annotation = float\n elif param_type == \"boolean\":\n annotation = bool\n elif param_type == \"object\":\n annotation = dict\n elif param_type == \"array\":\n annotation = list\n\n # Create parameter with same structure as original\n param = Parameter(\n name=param_name,\n kind=Parameter.KEYWORD_ONLY,\n default=default,\n annotation=annotation,\n )\n parameters.append(param)\n\n return Signature(parameters=parameters)\n\n async def cleanup(self) -> None:\n \"\"\"Clean up server resources.\"\"\"\n logger.info(\"Cleaning up resources\")\n # Follow original cleanup logic - only clean browser tool\n if \"browser\" in self.tools and hasattr(self.tools[\"browser\"], \"cleanup\"):\n await self.tools[\"browser\"].cleanup()\n\n def register_all_tools(self) -> None:\n \"\"\"Register all tools with the server.\"\"\"\n for tool in self.tools.values():\n self.register_tool(tool)\n\n def run(self, transport: str = \"stdio\") -> None:\n \"\"\"Run the MCP server.\"\"\"\n # Register all tools\n self.register_all_tools()\n\n # Register cleanup function (match original behavior)\n atexit.register(lambda: asyncio.run(self.cleanup()))\n\n # Start server (with same logging as original)\n logger.info(f\"Starting OpenManus server ({transport} mode)\")\n self.server.run(transport=transport)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"OpenManus MCP Server\")\n parser.add_argument(\n \"--transport\",\n choices=[\"stdio\"],\n default=\"stdio\",\n help=\"Communication method: stdio or http (default: stdio)\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/main.py", "import argparse\nimport asyncio\n\nfrom app.agent.manus import Manus\nfrom app.logger import logger\n\n\nasync def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Run Manus agent with a prompt\")\n parser.add_argument(\n \"--prompt\", type=str, required=False, help=\"Input prompt for the agent\"\n )\n args = parser.parse_args()\n\n # Create and initialize Manus agent\n agent = await Manus.create()\n try:\n # Use command line prompt if provided, otherwise ask for input\n prompt = args.prompt if args.prompt else input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await agent.run(prompt)\n logger.info(\"Request processing completed.\")\n except KeyboardInterrupt:\n logger.warning(\"Operation interrupted.\")\n finally:\n # Ensure agent resources are cleaned up before exiting\n await agent.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n"], ["/OpenManus/app/agent/mcp.py", "from typing import Any, Dict, List, Optional, Tuple\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.mcp import MULTIMEDIA_RESPONSE_PROMPT, NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import AgentState, Message\nfrom app.tool.base import ToolResult\nfrom app.tool.mcp import MCPClients\n\n\nclass MCPAgent(ToolCallAgent):\n \"\"\"Agent for interacting with MCP (Model Context Protocol) servers.\n\n This agent connects to an MCP server using either SSE or stdio transport\n and makes the server's tools available through the agent's tool interface.\n \"\"\"\n\n name: str = \"mcp_agent\"\n description: str = \"An agent that connects to an MCP server and uses its tools.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n # Initialize MCP tool collection\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n available_tools: MCPClients = None # Will be set in initialize()\n\n max_steps: int = 20\n connection_type: str = \"stdio\" # \"stdio\" or \"sse\"\n\n # Track tool schemas to detect changes\n tool_schemas: Dict[str, Dict[str, Any]] = Field(default_factory=dict)\n _refresh_tools_interval: int = 5 # Refresh tools every N steps\n\n # Special tool names that should trigger termination\n special_tool_names: List[str] = Field(default_factory=lambda: [\"terminate\"])\n\n async def initialize(\n self,\n connection_type: Optional[str] = None,\n server_url: Optional[str] = None,\n command: Optional[str] = None,\n args: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Initialize the MCP connection.\n\n Args:\n connection_type: Type of connection to use (\"stdio\" or \"sse\")\n server_url: URL of the MCP server (for SSE connection)\n command: Command to run (for stdio connection)\n args: Arguments for the command (for stdio connection)\n \"\"\"\n if connection_type:\n self.connection_type = connection_type\n\n # Connect to the MCP server based on connection type\n if self.connection_type == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL is required for SSE connection\")\n await self.mcp_clients.connect_sse(server_url=server_url)\n elif self.connection_type == \"stdio\":\n if not command:\n raise ValueError(\"Command is required for stdio connection\")\n await self.mcp_clients.connect_stdio(command=command, args=args or [])\n else:\n raise ValueError(f\"Unsupported connection type: {self.connection_type}\")\n\n # Set available_tools to our MCP instance\n self.available_tools = self.mcp_clients\n\n # Store initial tool schemas\n await self._refresh_tools()\n\n # Add system message about available tools\n tool_names = list(self.mcp_clients.tool_map.keys())\n tools_info = \", \".join(tool_names)\n\n # Add system prompt and available tools information\n self.memory.add_message(\n Message.system_message(\n f\"{self.system_prompt}\\n\\nAvailable MCP tools: {tools_info}\"\n )\n )\n\n async def _refresh_tools(self) -> Tuple[List[str], List[str]]:\n \"\"\"Refresh the list of available tools from the MCP server.\n\n Returns:\n A tuple of (added_tools, removed_tools)\n \"\"\"\n if not self.mcp_clients.sessions:\n return [], []\n\n # Get current tool schemas directly from the server\n response = await self.mcp_clients.list_tools()\n current_tools = {tool.name: tool.inputSchema for tool in response.tools}\n\n # Determine added, removed, and changed tools\n current_names = set(current_tools.keys())\n previous_names = set(self.tool_schemas.keys())\n\n added_tools = list(current_names - previous_names)\n removed_tools = list(previous_names - current_names)\n\n # Check for schema changes in existing tools\n changed_tools = []\n for name in current_names.intersection(previous_names):\n if current_tools[name] != self.tool_schemas.get(name):\n changed_tools.append(name)\n\n # Update stored schemas\n self.tool_schemas = current_tools\n\n # Log and notify about changes\n if added_tools:\n logger.info(f\"Added MCP tools: {added_tools}\")\n self.memory.add_message(\n Message.system_message(f\"New tools available: {', '.join(added_tools)}\")\n )\n if removed_tools:\n logger.info(f\"Removed MCP tools: {removed_tools}\")\n self.memory.add_message(\n Message.system_message(\n f\"Tools no longer available: {', '.join(removed_tools)}\"\n )\n )\n if changed_tools:\n logger.info(f\"Changed MCP tools: {changed_tools}\")\n\n return added_tools, removed_tools\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action.\"\"\"\n # Check MCP session and tools availability\n if not self.mcp_clients.sessions or not self.mcp_clients.tool_map:\n logger.info(\"MCP service is no longer available, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Refresh tools periodically\n if self.current_step % self._refresh_tools_interval == 0:\n await self._refresh_tools()\n # All tools removed indicates shutdown\n if not self.mcp_clients.tool_map:\n logger.info(\"MCP service has shut down, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Use the parent class's think method\n return await super().think()\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs) -> None:\n \"\"\"Handle special tool execution and state changes\"\"\"\n # First process with parent handler\n await super()._handle_special_tool(name, result, **kwargs)\n\n # Handle multimedia responses\n if isinstance(result, ToolResult) and result.base64_image:\n self.memory.add_message(\n Message.system_message(\n MULTIMEDIA_RESPONSE_PROMPT.format(tool_name=name)\n )\n )\n\n def _should_finish_execution(self, name: str, **kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n # Terminate if the tool name is 'terminate'\n return name.lower() == \"terminate\"\n\n async def cleanup(self) -> None:\n \"\"\"Clean up MCP connection when done.\"\"\"\n if self.mcp_clients.sessions:\n await self.mcp_clients.disconnect()\n logger.info(\"MCP connection closed\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n result = await super().run(request)\n return result\n finally:\n # Ensure cleanup happens even if there's an error\n await self.cleanup()\n"], ["/OpenManus/app/agent/manus.py", "from typing import Dict, List, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.browser import BrowserContextHelper\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.logger import logger\nfrom app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.ask_human import AskHuman\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.mcp import MCPClients, MCPClientTool\nfrom app.tool.python_execute import PythonExecute\nfrom app.tool.str_replace_editor import StrReplaceEditor\n\n\nclass Manus(ToolCallAgent):\n \"\"\"A versatile general-purpose agent with support for both local and MCP tools.\"\"\"\n\n name: str = \"Manus\"\n description: str = \"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # MCP clients for remote tool access\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n PythonExecute(),\n BrowserUseTool(),\n StrReplaceEditor(),\n AskHuman(),\n Terminate(),\n )\n )\n\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n # Track connected MCP servers\n connected_servers: Dict[str, str] = Field(\n default_factory=dict\n ) # server_id -> url/command\n _initialized: bool = False\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"Manus\":\n \"\"\"Initialize basic components synchronously.\"\"\"\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n @classmethod\n async def create(cls, **kwargs) -> \"Manus\":\n \"\"\"Factory method to create and properly initialize a Manus instance.\"\"\"\n instance = cls(**kwargs)\n await instance.initialize_mcp_servers()\n instance._initialized = True\n return instance\n\n async def initialize_mcp_servers(self) -> None:\n \"\"\"Initialize connections to configured MCP servers.\"\"\"\n for server_id, server_config in config.mcp_config.servers.items():\n try:\n if server_config.type == \"sse\":\n if server_config.url:\n await self.connect_mcp_server(server_config.url, server_id)\n logger.info(\n f\"Connected to MCP server {server_id} at {server_config.url}\"\n )\n elif server_config.type == \"stdio\":\n if server_config.command:\n await self.connect_mcp_server(\n server_config.command,\n server_id,\n use_stdio=True,\n stdio_args=server_config.args,\n )\n logger.info(\n f\"Connected to MCP server {server_id} using command {server_config.command}\"\n )\n except Exception as e:\n logger.error(f\"Failed to connect to MCP server {server_id}: {e}\")\n\n async def connect_mcp_server(\n self,\n server_url: str,\n server_id: str = \"\",\n use_stdio: bool = False,\n stdio_args: List[str] = None,\n ) -> None:\n \"\"\"Connect to an MCP server and add its tools.\"\"\"\n if use_stdio:\n await self.mcp_clients.connect_stdio(\n server_url, stdio_args or [], server_id\n )\n self.connected_servers[server_id or server_url] = server_url\n else:\n await self.mcp_clients.connect_sse(server_url, server_id)\n self.connected_servers[server_id or server_url] = server_url\n\n # Update available tools with only the new tools from this server\n new_tools = [\n tool for tool in self.mcp_clients.tools if tool.server_id == server_id\n ]\n self.available_tools.add_tools(*new_tools)\n\n async def disconnect_mcp_server(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from an MCP server and remove its tools.\"\"\"\n await self.mcp_clients.disconnect(server_id)\n if server_id:\n self.connected_servers.pop(server_id, None)\n else:\n self.connected_servers.clear()\n\n # Rebuild available tools without the disconnected server's tools\n base_tools = [\n tool\n for tool in self.available_tools.tools\n if not isinstance(tool, MCPClientTool)\n ]\n self.available_tools = ToolCollection(*base_tools)\n self.available_tools.add_tools(*self.mcp_clients.tools)\n\n async def cleanup(self):\n \"\"\"Clean up Manus agent resources.\"\"\"\n if self.browser_context_helper:\n await self.browser_context_helper.cleanup_browser()\n # Disconnect from all MCP servers only if we were initialized\n if self._initialized:\n await self.disconnect_mcp_server()\n self._initialized = False\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions with appropriate context.\"\"\"\n if not self._initialized:\n await self.initialize_mcp_servers()\n self._initialized = True\n\n original_prompt = self.next_step_prompt\n recent_messages = self.memory.messages[-3:] if self.memory.messages else []\n browser_in_use = any(\n tc.function.name == BrowserUseTool().name\n for msg in recent_messages\n if msg.tool_calls\n for tc in msg.tool_calls\n )\n\n if browser_in_use:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n\n result = await super().think()\n\n # Restore original prompt\n self.next_step_prompt = original_prompt\n\n return result\n"], ["/OpenManus/protocol/a2a/app/main.py", "import httpx\nimport argparse\n\nfrom a2a.server.apps import A2AStarletteApplication\nfrom a2a.server.request_handlers import DefaultRequestHandler\nfrom a2a.server.tasks import InMemoryTaskStore, InMemoryPushNotifier\nfrom a2a.types import (\n AgentCapabilities,\n AgentCard,\n AgentSkill,\n)\n\nfrom .agent_executor import ManusExecutor\n\nfrom .agent import A2AManus\nfrom app.tool.browser_use_tool import _BROWSER_DESCRIPTION\nfrom app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION\nfrom app.tool.terminate import _TERMINATE_DESCRIPTION\nimport logging\nfrom dotenv import load_dotenv\nimport asyncio\nfrom typing import Optional\n\nload_dotenv()\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nasync def main(host: str = \"localhost\", port: int = 10000):\n \"\"\"Starts the Manus Agent server.\"\"\"\n try:\n capabilities = AgentCapabilities(streaming=False, pushNotifications=True)\n skills = [\n AgentSkill(\n id=\"Python Execute\",\n name=\"Python Execute Tool\",\n description=\"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\",\n tags=[\"Execute Python Code\"],\n examples=[\n \"Execute Python code:'''python \\n Print('Hello World') \\n '''\"\n ],\n ),\n AgentSkill(\n id=\"Browser use\",\n name=\"Browser use Tool\",\n description=_BROWSER_DESCRIPTION,\n tags=[\"Use Browser\"],\n examples=[\"go_to 'https://www.google.com'\"],\n ),\n AgentSkill(\n id=\"Replace String\",\n name=\"Str_replace Tool\",\n description=_STR_REPLACE_EDITOR_DESCRIPTION,\n tags=[\"Operate Files\"],\n examples=[\"Replace 'old' with 'new' in 'file.txt'\"],\n ),\n AgentSkill(\n id=\"Ask human\",\n name=\"Ask human Tool\",\n description=\"Use this tool to ask human for help.\",\n tags=[\"Ask human for help\"],\n examples=[\"Ask human: 'What time is it?'\"],\n ),\n AgentSkill(\n id=\"terminate\",\n name=\"terminate Tool\",\n description=_TERMINATE_DESCRIPTION,\n tags=[\"terminate task\"],\n examples=[\"terminate\"],\n ),\n # Add more skills as needed\n ]\n\n agent_card = AgentCard(\n name=\"Manus Agent\",\n description=\"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\",\n url=f\"http://{host}:{port}/\",\n version=\"1.0.0\",\n defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n capabilities=capabilities,\n skills=skills,\n )\n\n httpx_client = httpx.AsyncClient()\n request_handler = DefaultRequestHandler(\n agent_executor=ManusExecutor(\n agent_factory=lambda: A2AManus.create(max_steps=3)\n ),\n task_store=InMemoryTaskStore(),\n push_notifier=InMemoryPushNotifier(httpx_client),\n )\n\n server = A2AStarletteApplication(\n agent_card=agent_card, http_handler=request_handler\n )\n\n logger.info(f\"Starting server on {host}:{port}\")\n return server.build()\n except Exception as e:\n logger.error(f\"An error occurred during server startup: {e}\")\n exit(1)\n\n\ndef run_server(host: Optional[str] = \"localhost\", port: Optional[int] = 10000):\n try:\n import uvicorn\n\n app = asyncio.run(main(host, port))\n config = uvicorn.Config(\n app=app, host=host, port=port, loop=\"asyncio\", proxy_headers=True\n )\n uvicorn.Server(config=config).run()\n logger.info(f\"Server started on {host}:{port}\")\n except Exception as e:\n logger.error(f\"An error occurred while starting the server: {e}\")\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments for host and port, with default values\n parser = argparse.ArgumentParser(description=\"Start Manus Agent service\")\n parser.add_argument(\n \"--host\",\n type=str,\n default=\"localhost\",\n help=\"Server host address, default is localhost\",\n )\n parser.add_argument(\n \"--port\", type=int, default=10000, help=\"Server port, default is 10000\"\n )\n args = parser.parse_args()\n # Start the server with the specified or default host and port\n run_server(args.host, args.port)\n"], ["/OpenManus/app/tool/mcp.py", "from contextlib import AsyncExitStack\nfrom typing import Dict, List, Optional\n\nfrom mcp import ClientSession, StdioServerParameters\nfrom mcp.client.sse import sse_client\nfrom mcp.client.stdio import stdio_client\nfrom mcp.types import ListToolsResult, TextContent\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.tool_collection import ToolCollection\n\n\nclass MCPClientTool(BaseTool):\n \"\"\"Represents a tool proxy that can be called on the MCP server from the client side.\"\"\"\n\n session: Optional[ClientSession] = None\n server_id: str = \"\" # Add server identifier\n original_name: str = \"\"\n\n async def execute(self, **kwargs) -> ToolResult:\n \"\"\"Execute the tool by making a remote call to the MCP server.\"\"\"\n if not self.session:\n return ToolResult(error=\"Not connected to MCP server\")\n\n try:\n logger.info(f\"Executing tool: {self.original_name}\")\n result = await self.session.call_tool(self.original_name, kwargs)\n content_str = \", \".join(\n item.text for item in result.content if isinstance(item, TextContent)\n )\n return ToolResult(output=content_str or \"No output returned.\")\n except Exception as e:\n return ToolResult(error=f\"Error executing tool: {str(e)}\")\n\n\nclass MCPClients(ToolCollection):\n \"\"\"\n A collection of tools that connects to multiple MCP servers and manages available tools through the Model Context Protocol.\n \"\"\"\n\n sessions: Dict[str, ClientSession] = {}\n exit_stacks: Dict[str, AsyncExitStack] = {}\n description: str = \"MCP client tools for server interaction\"\n\n def __init__(self):\n super().__init__() # Initialize with empty tools list\n self.name = \"mcp\" # Keep name for backward compatibility\n\n async def connect_sse(self, server_url: str, server_id: str = \"\") -> None:\n \"\"\"Connect to an MCP server using SSE transport.\"\"\"\n if not server_url:\n raise ValueError(\"Server URL is required.\")\n\n server_id = server_id or server_url\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n streams_context = sse_client(url=server_url)\n streams = await exit_stack.enter_async_context(streams_context)\n session = await exit_stack.enter_async_context(ClientSession(*streams))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def connect_stdio(\n self, command: str, args: List[str], server_id: str = \"\"\n ) -> None:\n \"\"\"Connect to an MCP server using stdio transport.\"\"\"\n if not command:\n raise ValueError(\"Server command is required.\")\n\n server_id = server_id or command\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n server_params = StdioServerParameters(command=command, args=args)\n stdio_transport = await exit_stack.enter_async_context(\n stdio_client(server_params)\n )\n read, write = stdio_transport\n session = await exit_stack.enter_async_context(ClientSession(read, write))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def _initialize_and_list_tools(self, server_id: str) -> None:\n \"\"\"Initialize session and populate tool map.\"\"\"\n session = self.sessions.get(server_id)\n if not session:\n raise RuntimeError(f\"Session not initialized for server {server_id}\")\n\n await session.initialize()\n response = await session.list_tools()\n\n # Create proper tool objects for each server tool\n for tool in response.tools:\n original_name = tool.name\n tool_name = f\"mcp_{server_id}_{original_name}\"\n tool_name = self._sanitize_tool_name(tool_name)\n\n server_tool = MCPClientTool(\n name=tool_name,\n description=tool.description,\n parameters=tool.inputSchema,\n session=session,\n server_id=server_id,\n original_name=original_name,\n )\n self.tool_map[tool_name] = server_tool\n\n # Update tools tuple\n self.tools = tuple(self.tool_map.values())\n logger.info(\n f\"Connected to server {server_id} with tools: {[tool.name for tool in response.tools]}\"\n )\n\n def _sanitize_tool_name(self, name: str) -> str:\n \"\"\"Sanitize tool name to match MCPClientTool requirements.\"\"\"\n import re\n\n # Replace invalid characters with underscores\n sanitized = re.sub(r\"[^a-zA-Z0-9_-]\", \"_\", name)\n\n # Remove consecutive underscores\n sanitized = re.sub(r\"_+\", \"_\", sanitized)\n\n # Remove leading/trailing underscores\n sanitized = sanitized.strip(\"_\")\n\n # Truncate to 64 characters if needed\n if len(sanitized) > 64:\n sanitized = sanitized[:64]\n\n return sanitized\n\n async def list_tools(self) -> ListToolsResult:\n \"\"\"List all available tools.\"\"\"\n tools_result = ListToolsResult(tools=[])\n for session in self.sessions.values():\n response = await session.list_tools()\n tools_result.tools += response.tools\n return tools_result\n\n async def disconnect(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from a specific MCP server or all servers if no server_id provided.\"\"\"\n if server_id:\n if server_id in self.sessions:\n try:\n exit_stack = self.exit_stacks.get(server_id)\n\n # Close the exit stack which will handle session cleanup\n if exit_stack:\n try:\n await exit_stack.aclose()\n except RuntimeError as e:\n if \"cancel scope\" in str(e).lower():\n logger.warning(\n f\"Cancel scope error during disconnect from {server_id}, continuing with cleanup: {e}\"\n )\n else:\n raise\n\n # Clean up references\n self.sessions.pop(server_id, None)\n self.exit_stacks.pop(server_id, None)\n\n # Remove tools associated with this server\n self.tool_map = {\n k: v\n for k, v in self.tool_map.items()\n if v.server_id != server_id\n }\n self.tools = tuple(self.tool_map.values())\n logger.info(f\"Disconnected from MCP server {server_id}\")\n except Exception as e:\n logger.error(f\"Error disconnecting from server {server_id}: {e}\")\n else:\n # Disconnect from all servers in a deterministic order\n for sid in sorted(list(self.sessions.keys())):\n await self.disconnect(sid)\n self.tool_map = {}\n self.tools = tuple()\n logger.info(\"Disconnected from all MCP servers\")\n"], ["/OpenManus/app/flow/planning.py", "import json\nimport time\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.schema import AgentState, Message, ToolChoice\nfrom app.tool import PlanningTool\n\n\nclass PlanStepStatus(str, Enum):\n \"\"\"Enum class defining possible statuses of a plan step\"\"\"\n\n NOT_STARTED = \"not_started\"\n IN_PROGRESS = \"in_progress\"\n COMPLETED = \"completed\"\n BLOCKED = \"blocked\"\n\n @classmethod\n def get_all_statuses(cls) -> list[str]:\n \"\"\"Return a list of all possible step status values\"\"\"\n return [status.value for status in cls]\n\n @classmethod\n def get_active_statuses(cls) -> list[str]:\n \"\"\"Return a list of values representing active statuses (not started or in progress)\"\"\"\n return [cls.NOT_STARTED.value, cls.IN_PROGRESS.value]\n\n @classmethod\n def get_status_marks(cls) -> Dict[str, str]:\n \"\"\"Return a mapping of statuses to their marker symbols\"\"\"\n return {\n cls.COMPLETED.value: \"[✓]\",\n cls.IN_PROGRESS.value: \"[→]\",\n cls.BLOCKED.value: \"[!]\",\n cls.NOT_STARTED.value: \"[ ]\",\n }\n\n\nclass PlanningFlow(BaseFlow):\n \"\"\"A flow that manages planning and execution of tasks using agents.\"\"\"\n\n llm: LLM = Field(default_factory=lambda: LLM())\n planning_tool: PlanningTool = Field(default_factory=PlanningTool)\n executor_keys: List[str] = Field(default_factory=list)\n active_plan_id: str = Field(default_factory=lambda: f\"plan_{int(time.time())}\")\n current_step_index: Optional[int] = None\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Set executor keys before super().__init__\n if \"executors\" in data:\n data[\"executor_keys\"] = data.pop(\"executors\")\n\n # Set plan ID if provided\n if \"plan_id\" in data:\n data[\"active_plan_id\"] = data.pop(\"plan_id\")\n\n # Initialize the planning tool if not provided\n if \"planning_tool\" not in data:\n planning_tool = PlanningTool()\n data[\"planning_tool\"] = planning_tool\n\n # Call parent's init with the processed data\n super().__init__(agents, **data)\n\n # Set executor_keys to all agent keys if not specified\n if not self.executor_keys:\n self.executor_keys = list(self.agents.keys())\n\n def get_executor(self, step_type: Optional[str] = None) -> BaseAgent:\n \"\"\"\n Get an appropriate executor agent for the current step.\n Can be extended to select agents based on step type/requirements.\n \"\"\"\n # If step type is provided and matches an agent key, use that agent\n if step_type and step_type in self.agents:\n return self.agents[step_type]\n\n # Otherwise use the first available executor or fall back to primary agent\n for key in self.executor_keys:\n if key in self.agents:\n return self.agents[key]\n\n # Fallback to primary agent\n return self.primary_agent\n\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the planning flow with agents.\"\"\"\n try:\n if not self.primary_agent:\n raise ValueError(\"No primary agent available\")\n\n # Create initial plan if input provided\n if input_text:\n await self._create_initial_plan(input_text)\n\n # Verify plan was created successfully\n if self.active_plan_id not in self.planning_tool.plans:\n logger.error(\n f\"Plan creation failed. Plan ID {self.active_plan_id} not found in planning tool.\"\n )\n return f\"Failed to create plan for: {input_text}\"\n\n result = \"\"\n while True:\n # Get current step to execute\n self.current_step_index, step_info = await self._get_current_step_info()\n\n # Exit if no more steps or plan completed\n if self.current_step_index is None:\n result += await self._finalize_plan()\n break\n\n # Execute current step with appropriate agent\n step_type = step_info.get(\"type\") if step_info else None\n executor = self.get_executor(step_type)\n step_result = await self._execute_step(executor, step_info)\n result += step_result + \"\\n\"\n\n # Check if agent wants to terminate\n if hasattr(executor, \"state\") and executor.state == AgentState.FINISHED:\n break\n\n return result\n except Exception as e:\n logger.error(f\"Error in PlanningFlow: {str(e)}\")\n return f\"Execution failed: {str(e)}\"\n\n async def _create_initial_plan(self, request: str) -> None:\n \"\"\"Create an initial plan based on the request using the flow's LLM and PlanningTool.\"\"\"\n logger.info(f\"Creating initial plan with ID: {self.active_plan_id}\")\n\n system_message_content = (\n \"You are a planning assistant. Create a concise, actionable plan with clear steps. \"\n \"Focus on key milestones rather than detailed sub-steps. \"\n \"Optimize for clarity and efficiency.\"\n )\n agents_description = []\n for key in self.executor_keys:\n if key in self.agents:\n agents_description.append(\n {\n \"name\": key.upper(),\n \"description\": self.agents[key].description,\n }\n )\n if len(agents_description) > 1:\n # Add description of agents to select\n system_message_content += (\n f\"\\nNow we have {agents_description} agents. \"\n f\"The infomation of them are below: {json.dumps(agents_description)}\\n\"\n \"When creating steps in the planning tool, please specify the agent names using the format '[agent_name]'.\"\n )\n\n # Create a system message for plan creation\n system_message = Message.system_message(system_message_content)\n\n # Create a user message with the request\n user_message = Message.user_message(\n f\"Create a reasonable plan with clear steps to accomplish the task: {request}\"\n )\n\n # Call LLM with PlanningTool\n response = await self.llm.ask_tool(\n messages=[user_message],\n system_msgs=[system_message],\n tools=[self.planning_tool.to_param()],\n tool_choice=ToolChoice.AUTO,\n )\n\n # Process tool calls if present\n if response.tool_calls:\n for tool_call in response.tool_calls:\n if tool_call.function.name == \"planning\":\n # Parse the arguments\n args = tool_call.function.arguments\n if isinstance(args, str):\n try:\n args = json.loads(args)\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse tool arguments: {args}\")\n continue\n\n # Ensure plan_id is set correctly and execute the tool\n args[\"plan_id\"] = self.active_plan_id\n\n # Execute the tool via ToolCollection instead of directly\n result = await self.planning_tool.execute(**args)\n\n logger.info(f\"Plan creation result: {str(result)}\")\n return\n\n # If execution reached here, create a default plan\n logger.warning(\"Creating default plan\")\n\n # Create default plan using the ToolCollection\n await self.planning_tool.execute(\n **{\n \"command\": \"create\",\n \"plan_id\": self.active_plan_id,\n \"title\": f\"Plan for: {request[:50]}{'...' if len(request) > 50 else ''}\",\n \"steps\": [\"Analyze request\", \"Execute task\", \"Verify results\"],\n }\n )\n\n async def _get_current_step_info(self) -> tuple[Optional[int], Optional[dict]]:\n \"\"\"\n Parse the current plan to identify the first non-completed step's index and info.\n Returns (None, None) if no active step is found.\n \"\"\"\n if (\n not self.active_plan_id\n or self.active_plan_id not in self.planning_tool.plans\n ):\n logger.error(f\"Plan with ID {self.active_plan_id} not found\")\n return None, None\n\n try:\n # Direct access to plan data from planning tool storage\n plan_data = self.planning_tool.plans[self.active_plan_id]\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Find first non-completed step\n for i, step in enumerate(steps):\n if i >= len(step_statuses):\n status = PlanStepStatus.NOT_STARTED.value\n else:\n status = step_statuses[i]\n\n if status in PlanStepStatus.get_active_statuses():\n # Extract step type/category if available\n step_info = {\"text\": step}\n\n # Try to extract step type from the text (e.g., [SEARCH] or [CODE])\n import re\n\n type_match = re.search(r\"\\[([A-Z_]+)\\]\", step)\n if type_match:\n step_info[\"type\"] = type_match.group(1).lower()\n\n # Mark current step as in_progress\n try:\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=i,\n step_status=PlanStepStatus.IN_PROGRESS.value,\n )\n except Exception as e:\n logger.warning(f\"Error marking step as in_progress: {e}\")\n # Update step status directly if needed\n if i < len(step_statuses):\n step_statuses[i] = PlanStepStatus.IN_PROGRESS.value\n else:\n while len(step_statuses) < i:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n step_statuses.append(PlanStepStatus.IN_PROGRESS.value)\n\n plan_data[\"step_statuses\"] = step_statuses\n\n return i, step_info\n\n return None, None # No active step found\n\n except Exception as e:\n logger.warning(f\"Error finding current step index: {e}\")\n return None, None\n\n async def _execute_step(self, executor: BaseAgent, step_info: dict) -> str:\n \"\"\"Execute the current step with the specified agent using agent.run().\"\"\"\n # Prepare context for the agent with current plan status\n plan_status = await self._get_plan_text()\n step_text = step_info.get(\"text\", f\"Step {self.current_step_index}\")\n\n # Create a prompt for the agent to execute the current step\n step_prompt = f\"\"\"\n CURRENT PLAN STATUS:\n {plan_status}\n\n YOUR CURRENT TASK:\n You are now working on step {self.current_step_index}: \"{step_text}\"\n\n Please only execute this current step using the appropriate tools. When you're done, provide a summary of what you accomplished.\n \"\"\"\n\n # Use agent.run() to execute the step\n try:\n step_result = await executor.run(step_prompt)\n\n # Mark the step as completed after successful execution\n await self._mark_step_completed()\n\n return step_result\n except Exception as e:\n logger.error(f\"Error executing step {self.current_step_index}: {e}\")\n return f\"Error executing step {self.current_step_index}: {str(e)}\"\n\n async def _mark_step_completed(self) -> None:\n \"\"\"Mark the current step as completed.\"\"\"\n if self.current_step_index is None:\n return\n\n try:\n # Mark the step as completed\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=self.current_step_index,\n step_status=PlanStepStatus.COMPLETED.value,\n )\n logger.info(\n f\"Marked step {self.current_step_index} as completed in plan {self.active_plan_id}\"\n )\n except Exception as e:\n logger.warning(f\"Failed to update plan status: {e}\")\n # Update step status directly in planning tool storage\n if self.active_plan_id in self.planning_tool.plans:\n plan_data = self.planning_tool.plans[self.active_plan_id]\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Ensure the step_statuses list is long enough\n while len(step_statuses) <= self.current_step_index:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n\n # Update the status\n step_statuses[self.current_step_index] = PlanStepStatus.COMPLETED.value\n plan_data[\"step_statuses\"] = step_statuses\n\n async def _get_plan_text(self) -> str:\n \"\"\"Get the current plan as formatted text.\"\"\"\n try:\n result = await self.planning_tool.execute(\n command=\"get\", plan_id=self.active_plan_id\n )\n return result.output if hasattr(result, \"output\") else str(result)\n except Exception as e:\n logger.error(f\"Error getting plan: {e}\")\n return self._generate_plan_text_from_storage()\n\n def _generate_plan_text_from_storage(self) -> str:\n \"\"\"Generate plan text directly from storage if the planning tool fails.\"\"\"\n try:\n if self.active_plan_id not in self.planning_tool.plans:\n return f\"Error: Plan with ID {self.active_plan_id} not found\"\n\n plan_data = self.planning_tool.plans[self.active_plan_id]\n title = plan_data.get(\"title\", \"Untitled Plan\")\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n step_notes = plan_data.get(\"step_notes\", [])\n\n # Ensure step_statuses and step_notes match the number of steps\n while len(step_statuses) < len(steps):\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n while len(step_notes) < len(steps):\n step_notes.append(\"\")\n\n # Count steps by status\n status_counts = {status: 0 for status in PlanStepStatus.get_all_statuses()}\n\n for status in step_statuses:\n if status in status_counts:\n status_counts[status] += 1\n\n completed = status_counts[PlanStepStatus.COMPLETED.value]\n total = len(steps)\n progress = (completed / total) * 100 if total > 0 else 0\n\n plan_text = f\"Plan: {title} (ID: {self.active_plan_id})\\n\"\n plan_text += \"=\" * len(plan_text) + \"\\n\\n\"\n\n plan_text += (\n f\"Progress: {completed}/{total} steps completed ({progress:.1f}%)\\n\"\n )\n plan_text += f\"Status: {status_counts[PlanStepStatus.COMPLETED.value]} completed, {status_counts[PlanStepStatus.IN_PROGRESS.value]} in progress, \"\n plan_text += f\"{status_counts[PlanStepStatus.BLOCKED.value]} blocked, {status_counts[PlanStepStatus.NOT_STARTED.value]} not started\\n\\n\"\n plan_text += \"Steps:\\n\"\n\n status_marks = PlanStepStatus.get_status_marks()\n\n for i, (step, status, notes) in enumerate(\n zip(steps, step_statuses, step_notes)\n ):\n # Use status marks to indicate step status\n status_mark = status_marks.get(\n status, status_marks[PlanStepStatus.NOT_STARTED.value]\n )\n\n plan_text += f\"{i}. {status_mark} {step}\\n\"\n if notes:\n plan_text += f\" Notes: {notes}\\n\"\n\n return plan_text\n except Exception as e:\n logger.error(f\"Error generating plan text from storage: {e}\")\n return f\"Error: Unable to retrieve plan with ID {self.active_plan_id}\"\n\n async def _finalize_plan(self) -> str:\n \"\"\"Finalize the plan and provide a summary using the flow's LLM directly.\"\"\"\n plan_text = await self._get_plan_text()\n\n # Create a summary using the flow's LLM directly\n try:\n system_message = Message.system_message(\n \"You are a planning assistant. Your task is to summarize the completed plan.\"\n )\n\n user_message = Message.user_message(\n f\"The plan has been completed. Here is the final plan status:\\n\\n{plan_text}\\n\\nPlease provide a summary of what was accomplished and any final thoughts.\"\n )\n\n response = await self.llm.ask(\n messages=[user_message], system_msgs=[system_message]\n )\n\n return f\"Plan completed:\\n\\n{response}\"\n except Exception as e:\n logger.error(f\"Error finalizing plan with LLM: {e}\")\n\n # Fallback to using an agent for the summary\n try:\n agent = self.primary_agent\n summary_prompt = f\"\"\"\n The plan has been completed. Here is the final plan status:\n\n {plan_text}\n\n Please provide a summary of what was accomplished and any final thoughts.\n \"\"\"\n summary = await agent.run(summary_prompt)\n return f\"Plan completed:\\n\\n{summary}\"\n except Exception as e2:\n logger.error(f\"Error finalizing plan with agent: {e2}\")\n return \"Plan completed. Error generating summary.\"\n"], ["/OpenManus/run_flow.py", "import asyncio\nimport time\n\nfrom app.agent.data_analysis import DataAnalysis\nfrom app.agent.manus import Manus\nfrom app.config import config\nfrom app.flow.flow_factory import FlowFactory, FlowType\nfrom app.logger import logger\n\n\nasync def run_flow():\n agents = {\n \"manus\": Manus(),\n }\n if config.run_flow_config.use_data_analysis_agent:\n agents[\"data_analysis\"] = DataAnalysis()\n try:\n prompt = input(\"Enter your prompt: \")\n\n if prompt.strip().isspace() or not prompt:\n logger.warning(\"Empty prompt provided.\")\n return\n\n flow = FlowFactory.create_flow(\n flow_type=FlowType.PLANNING,\n agents=agents,\n )\n logger.warning(\"Processing your request...\")\n\n try:\n start_time = time.time()\n result = await asyncio.wait_for(\n flow.execute(prompt),\n timeout=3600, # 60 minute timeout for the entire execution\n )\n elapsed_time = time.time() - start_time\n logger.info(f\"Request processed in {elapsed_time:.2f} seconds\")\n logger.info(result)\n except asyncio.TimeoutError:\n logger.error(\"Request processing timed out after 1 hour\")\n logger.info(\n \"Operation terminated due to timeout. Please try a simpler request.\"\n )\n\n except KeyboardInterrupt:\n logger.info(\"Operation cancelled by user.\")\n except Exception as e:\n logger.error(f\"Error: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_flow())\n"], ["/OpenManus/app/sandbox/core/terminal.py", "\"\"\"\nAsynchronous Docker Terminal\n\nThis module provides asynchronous terminal functionality for Docker containers,\nallowing interactive command execution with timeout control.\n\"\"\"\n\nimport asyncio\nimport re\nimport socket\nfrom typing import Dict, Optional, Tuple, Union\n\nimport docker\nfrom docker import APIClient\nfrom docker.errors import APIError\nfrom docker.models.containers import Container\n\n\nclass DockerSession:\n def __init__(self, container_id: str) -> None:\n \"\"\"Initializes a Docker session.\n\n Args:\n container_id: ID of the Docker container.\n \"\"\"\n self.api = APIClient()\n self.container_id = container_id\n self.exec_id = None\n self.socket = None\n\n async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None:\n \"\"\"Creates an interactive session with the container.\n\n Args:\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n\n Raises:\n RuntimeError: If socket connection fails.\n \"\"\"\n startup_command = [\n \"bash\",\n \"-c\",\n f\"cd {working_dir} && \"\n \"PROMPT_COMMAND='' \"\n \"PS1='$ ' \"\n \"exec bash --norc --noprofile\",\n ]\n\n exec_data = self.api.exec_create(\n self.container_id,\n startup_command,\n stdin=True,\n tty=True,\n stdout=True,\n stderr=True,\n privileged=True,\n user=\"root\",\n environment={**env_vars, \"TERM\": \"dumb\", \"PS1\": \"$ \", \"PROMPT_COMMAND\": \"\"},\n )\n self.exec_id = exec_data[\"Id\"]\n\n socket_data = self.api.exec_start(\n self.exec_id, socket=True, tty=True, stream=True, demux=True\n )\n\n if hasattr(socket_data, \"_sock\"):\n self.socket = socket_data._sock\n self.socket.setblocking(False)\n else:\n raise RuntimeError(\"Failed to get socket connection\")\n\n await self._read_until_prompt()\n\n async def close(self) -> None:\n \"\"\"Cleans up session resources.\n\n 1. Sends exit command\n 2. Closes socket connection\n 3. Checks and cleans up exec instance\n \"\"\"\n try:\n if self.socket:\n # Send exit command to close bash session\n try:\n self.socket.sendall(b\"exit\\n\")\n # Allow time for command execution\n await asyncio.sleep(0.1)\n except:\n pass # Ignore sending errors, continue cleanup\n\n # Close socket connection\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except:\n pass # Some platforms may not support shutdown\n\n self.socket.close()\n self.socket = None\n\n if self.exec_id:\n try:\n # Check exec instance status\n exec_inspect = self.api.exec_inspect(self.exec_id)\n if exec_inspect.get(\"Running\", False):\n # If still running, wait for it to complete\n await asyncio.sleep(0.5)\n except:\n pass # Ignore inspection errors, continue cleanup\n\n self.exec_id = None\n\n except Exception as e:\n # Log error but don't raise, ensure cleanup continues\n print(f\"Warning: Error during session cleanup: {e}\")\n\n async def _read_until_prompt(self) -> str:\n \"\"\"Reads output until prompt is found.\n\n Returns:\n String containing output up to the prompt.\n\n Raises:\n socket.error: If socket communication fails.\n \"\"\"\n buffer = b\"\"\n while b\"$ \" not in buffer:\n try:\n chunk = self.socket.recv(4096)\n if chunk:\n buffer += chunk\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n return buffer.decode(\"utf-8\")\n\n async def execute(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes a command and returns cleaned output.\n\n Args:\n command: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string with prompt markers removed.\n\n Raises:\n RuntimeError: If session not initialized or execution fails.\n TimeoutError: If command execution exceeds timeout.\n \"\"\"\n if not self.socket:\n raise RuntimeError(\"Session not initialized\")\n\n try:\n # Sanitize command to prevent shell injection\n sanitized_command = self._sanitize_command(command)\n full_command = f\"{sanitized_command}\\necho $?\\n\"\n self.socket.sendall(full_command.encode())\n\n async def read_output() -> str:\n buffer = b\"\"\n result_lines = []\n command_sent = False\n\n while True:\n try:\n chunk = self.socket.recv(4096)\n if not chunk:\n break\n\n buffer += chunk\n lines = buffer.split(b\"\\n\")\n\n buffer = lines[-1]\n lines = lines[:-1]\n\n for line in lines:\n line = line.rstrip(b\"\\r\")\n\n if not command_sent:\n command_sent = True\n continue\n\n if line.strip() == b\"echo $?\" or line.strip().isdigit():\n continue\n\n if line.strip():\n result_lines.append(line)\n\n if buffer.endswith(b\"$ \"):\n break\n\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n\n output = b\"\\n\".join(result_lines).decode(\"utf-8\")\n output = re.sub(r\"\\n\\$ echo \\$\\$?.*$\", \"\", output)\n\n return output\n\n if timeout:\n result = await asyncio.wait_for(read_output(), timeout)\n else:\n result = await read_output()\n\n return result.strip()\n\n except asyncio.TimeoutError:\n raise TimeoutError(f\"Command execution timed out after {timeout} seconds\")\n except Exception as e:\n raise RuntimeError(f\"Failed to execute command: {e}\")\n\n def _sanitize_command(self, command: str) -> str:\n \"\"\"Sanitizes the command string to prevent shell injection.\n\n Args:\n command: Raw command string.\n\n Returns:\n Sanitized command string.\n\n Raises:\n ValueError: If command contains potentially dangerous patterns.\n \"\"\"\n\n # Additional checks for specific risky commands\n risky_commands = [\n \"rm -rf /\",\n \"rm -rf /*\",\n \"mkfs\",\n \"dd if=/dev/zero\",\n \":(){:|:&};:\",\n \"chmod -R 777 /\",\n \"chown -R\",\n ]\n\n for risky in risky_commands:\n if risky in command.lower():\n raise ValueError(\n f\"Command contains potentially dangerous operation: {risky}\"\n )\n\n return command\n\n\nclass AsyncDockerizedTerminal:\n def __init__(\n self,\n container: Union[str, Container],\n working_dir: str = \"/workspace\",\n env_vars: Optional[Dict[str, str]] = None,\n default_timeout: int = 60,\n ) -> None:\n \"\"\"Initializes an asynchronous terminal for Docker containers.\n\n Args:\n container: Docker container ID or Container object.\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n default_timeout: Default command execution timeout in seconds.\n \"\"\"\n self.client = docker.from_env()\n self.container = (\n container\n if isinstance(container, Container)\n else self.client.containers.get(container)\n )\n self.working_dir = working_dir\n self.env_vars = env_vars or {}\n self.default_timeout = default_timeout\n self.session = None\n\n async def init(self) -> None:\n \"\"\"Initializes the terminal environment.\n\n Ensures working directory exists and creates an interactive session.\n\n Raises:\n RuntimeError: If initialization fails.\n \"\"\"\n await self._ensure_workdir()\n\n self.session = DockerSession(self.container.id)\n await self.session.create(self.working_dir, self.env_vars)\n\n async def _ensure_workdir(self) -> None:\n \"\"\"Ensures working directory exists in container.\n\n Raises:\n RuntimeError: If directory creation fails.\n \"\"\"\n try:\n await self._exec_simple(f\"mkdir -p {self.working_dir}\")\n except APIError as e:\n raise RuntimeError(f\"Failed to create working directory: {e}\")\n\n async def _exec_simple(self, cmd: str) -> Tuple[int, str]:\n \"\"\"Executes a simple command using Docker's exec_run.\n\n Args:\n cmd: Command to execute.\n\n Returns:\n Tuple of (exit_code, output).\n \"\"\"\n result = await asyncio.to_thread(\n self.container.exec_run, cmd, environment=self.env_vars\n )\n return result.exit_code, result.output.decode(\"utf-8\")\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the container with timeout.\n\n Args:\n cmd: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If terminal not initialized.\n \"\"\"\n if not self.session:\n raise RuntimeError(\"Terminal not initialized\")\n\n return await self.session.execute(cmd, timeout=timeout or self.default_timeout)\n\n async def close(self) -> None:\n \"\"\"Closes the terminal session.\"\"\"\n if self.session:\n await self.session.close()\n\n async def __aenter__(self) -> \"AsyncDockerizedTerminal\":\n \"\"\"Async context manager entry.\"\"\"\n await self.init()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.close()\n"], ["/OpenManus/app/tool/chart_visualization/data_visualization.py", "import asyncio\nimport json\nimport os\nfrom typing import Any, Hashable\n\nimport pandas as pd\nfrom pydantic import Field, model_validator\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\n\n\nclass DataVisualization(BaseTool):\n name: str = \"data_visualization\"\n description: str = \"\"\"Visualize statistical chart or Add insights in chart with JSON info from visualization_preparation tool. You can do steps as follows:\n1. Visualize statistical chart\n2. Choose insights into chart based on step 1 (Optional)\nOutputs:\n1. Charts (png/html)\n2. Charts Insights (.md)(Optional)\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"json_path\": {\n \"type\": \"string\",\n \"description\": \"\"\"file path of json info with \".json\" in the end\"\"\",\n },\n \"output_type\": {\n \"description\": \"Rendering format (html=interactive)\",\n \"type\": \"string\",\n \"default\": \"html\",\n \"enum\": [\"png\", \"html\"],\n },\n \"tool_type\": {\n \"description\": \"visualize chart or add insights\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"language\": {\n \"description\": \"english(en) / chinese(zh)\",\n \"type\": \"string\",\n \"default\": \"en\",\n \"enum\": [\"zh\", \"en\"],\n },\n },\n \"required\": [\"code\"],\n }\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n\n @model_validator(mode=\"after\")\n def initialize_llm(self):\n \"\"\"Initialize llm with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n return self\n\n def get_file_path(\n self,\n json_info: list[dict[str, str]],\n path_str: str,\n directory: str = None,\n ) -> list[str]:\n res = []\n for item in json_info:\n if os.path.exists(item[path_str]):\n res.append(item[path_str])\n elif os.path.exists(\n os.path.join(f\"{directory or config.workspace_root}\", item[path_str])\n ):\n res.append(\n os.path.join(\n f\"{directory or config.workspace_root}\", item[path_str]\n )\n )\n else:\n raise Exception(f\"No such file or directory: {item[path_str]}\")\n return res\n\n def success_output_template(self, result: list[dict[str, str]]) -> str:\n content = \"\"\n if len(result) == 0:\n return \"Is EMPTY!\"\n for item in result:\n content += f\"\"\"## {item['title']}\\nChart saved in: {item['chart_path']}\"\"\"\n if \"insight_path\" in item and item[\"insight_path\"] and \"insight_md\" in item:\n content += \"\\n\" + item[\"insight_md\"]\n else:\n content += \"\\n\"\n return f\"Chart Generated Successful!\\n{content}\"\n\n async def data_visualization(\n self, json_info: list[dict[str, str]], output_type: str, language: str\n ) -> str:\n data_list = []\n csv_file_path = self.get_file_path(json_info, \"csvFilePath\")\n for index, item in enumerate(json_info):\n df = pd.read_csv(csv_file_path[index], encoding=\"utf-8\")\n df = df.astype(object)\n df = df.where(pd.notnull(df), None)\n data_dict_list = df.to_json(orient=\"records\", force_ascii=False)\n\n data_list.append(\n {\n \"file_name\": os.path.basename(csv_file_path[index]).replace(\n \".csv\", \"\"\n ),\n \"dict_data\": data_dict_list,\n \"chartTitle\": item[\"chartTitle\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n dict_data=item[\"dict_data\"],\n chart_description=item[\"chartTitle\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"visualization\",\n language=language,\n )\n for item in data_list\n ]\n\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n csv_path = csv_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {csv_path}: {result['error']}\")\n else:\n success_list.append(\n {\n **result,\n \"title\": json_info[index][\"chartTitle\"],\n }\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error chart generated{'\\n'.join(error_list)}\\n{self.success_output_template(success_list)}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{self.success_output_template(success_list)}\"}\n\n async def add_insighs(\n self, json_info: list[dict[str, str]], output_type: str\n ) -> str:\n data_list = []\n chart_file_path = self.get_file_path(\n json_info, \"chartPath\", os.path.join(config.workspace_root, \"visualization\")\n )\n for index, item in enumerate(json_info):\n if \"insights_id\" in item:\n data_list.append(\n {\n \"file_name\": os.path.basename(chart_file_path[index]).replace(\n f\".{output_type}\", \"\"\n ),\n \"insights_id\": item[\"insights_id\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n insights_id=item[\"insights_id\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"insight\",\n )\n for item in data_list\n ]\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n chart_path = chart_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {chart_path}: {result['error']}\")\n else:\n success_list.append(chart_path)\n success_template = (\n f\"# Charts Update with Insights\\n{','.join(success_list)}\"\n if len(success_list) > 0\n else \"\"\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error in chart insights:{'\\n'.join(error_list)}\\n{success_template}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{success_template}\"}\n\n async def execute(\n self,\n json_path: str,\n output_type: str | None = \"html\",\n tool_type: str | None = \"visualization\",\n language: str | None = \"en\",\n ) -> str:\n try:\n logger.info(f\"📈 data_visualization with {json_path} in: {tool_type} \")\n with open(json_path, \"r\", encoding=\"utf-8\") as file:\n json_info = json.load(file)\n if tool_type == \"visualization\":\n return await self.data_visualization(json_info, output_type, language)\n else:\n return await self.add_insighs(json_info, output_type)\n except Exception as e:\n return {\n \"observation\": f\"Error: {e}\",\n \"success\": False,\n }\n\n async def invoke_vmind(\n self,\n file_name: str,\n output_type: str,\n task_type: str,\n insights_id: list[str] = None,\n dict_data: list[dict[Hashable, Any]] = None,\n chart_description: str = None,\n language: str = \"en\",\n ):\n llm_config = {\n \"base_url\": self.llm.base_url,\n \"model\": self.llm.model,\n \"api_key\": self.llm.api_key,\n }\n vmind_params = {\n \"llm_config\": llm_config,\n \"user_prompt\": chart_description,\n \"dataset\": dict_data,\n \"file_name\": file_name,\n \"output_type\": output_type,\n \"insights_id\": insights_id,\n \"task_type\": task_type,\n \"directory\": str(config.workspace_root),\n \"language\": language,\n }\n # build async sub process\n process = await asyncio.create_subprocess_exec(\n \"npx\",\n \"ts-node\",\n \"src/chartVisualize.ts\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=os.path.dirname(__file__),\n )\n input_json = json.dumps(vmind_params, ensure_ascii=False).encode(\"utf-8\")\n try:\n stdout, stderr = await process.communicate(input_json)\n stdout_str = stdout.decode(\"utf-8\")\n stderr_str = stderr.decode(\"utf-8\")\n if process.returncode == 0:\n return json.loads(stdout_str)\n else:\n return {\"error\": f\"Node.js Error: {stderr_str}\"}\n except Exception as e:\n return {\"error\": f\"Subprocess Error: {str(e)}\"}\n"], ["/OpenManus/app/tool/web_search.py", "import asyncio\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pydantic import BaseModel, ConfigDict, Field, model_validator\nfrom tenacity import retry, stop_after_attempt, wait_exponential\n\nfrom app.config import config\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.search import (\n BaiduSearchEngine,\n BingSearchEngine,\n DuckDuckGoSearchEngine,\n GoogleSearchEngine,\n WebSearchEngine,\n)\nfrom app.tool.search.base import SearchItem\n\n\nclass SearchResult(BaseModel):\n \"\"\"Represents a single search result returned by a search engine.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n position: int = Field(description=\"Position in search results\")\n url: str = Field(description=\"URL of the search result\")\n title: str = Field(default=\"\", description=\"Title of the search result\")\n description: str = Field(\n default=\"\", description=\"Description or snippet of the search result\"\n )\n source: str = Field(description=\"The search engine that provided this result\")\n raw_content: Optional[str] = Field(\n default=None, description=\"Raw content from the search result page if available\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result.\"\"\"\n return f\"{self.title} ({self.url})\"\n\n\nclass SearchMetadata(BaseModel):\n \"\"\"Metadata about the search operation.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n total_results: int = Field(description=\"Total number of results found\")\n language: str = Field(description=\"Language code used for the search\")\n country: str = Field(description=\"Country code used for the search\")\n\n\nclass SearchResponse(ToolResult):\n \"\"\"Structured response from the web search tool, inheriting ToolResult.\"\"\"\n\n query: str = Field(description=\"The search query that was executed\")\n results: List[SearchResult] = Field(\n default_factory=list, description=\"List of search results\"\n )\n metadata: Optional[SearchMetadata] = Field(\n default=None, description=\"Metadata about the search\"\n )\n\n @model_validator(mode=\"after\")\n def populate_output(self) -> \"SearchResponse\":\n \"\"\"Populate output or error fields based on search results.\"\"\"\n if self.error:\n return self\n\n result_text = [f\"Search results for '{self.query}':\"]\n\n for i, result in enumerate(self.results, 1):\n # Add title with position number\n title = result.title.strip() or \"No title\"\n result_text.append(f\"\\n{i}. {title}\")\n\n # Add URL with proper indentation\n result_text.append(f\" URL: {result.url}\")\n\n # Add description if available\n if result.description.strip():\n result_text.append(f\" Description: {result.description}\")\n\n # Add content preview if available\n if result.raw_content:\n content_preview = result.raw_content[:1000].replace(\"\\n\", \" \").strip()\n if len(result.raw_content) > 1000:\n content_preview += \"...\"\n result_text.append(f\" Content: {content_preview}\")\n\n # Add metadata at the bottom if available\n if self.metadata:\n result_text.extend(\n [\n f\"\\nMetadata:\",\n f\"- Total results: {self.metadata.total_results}\",\n f\"- Language: {self.metadata.language}\",\n f\"- Country: {self.metadata.country}\",\n ]\n )\n\n self.output = \"\\n\".join(result_text)\n return self\n\n\nclass WebContentFetcher:\n \"\"\"Utility class for fetching web content.\"\"\"\n\n @staticmethod\n async def fetch_content(url: str, timeout: int = 10) -> Optional[str]:\n \"\"\"\n Fetch and extract the main content from a webpage.\n\n Args:\n url: The URL to fetch content from\n timeout: Request timeout in seconds\n\n Returns:\n Extracted text content or None if fetching fails\n \"\"\"\n headers = {\n \"WebSearch\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n\n try:\n # Use asyncio to run requests in a thread pool\n response = await asyncio.get_event_loop().run_in_executor(\n None, lambda: requests.get(url, headers=headers, timeout=timeout)\n )\n\n if response.status_code != 200:\n logger.warning(\n f\"Failed to fetch content from {url}: HTTP {response.status_code}\"\n )\n return None\n\n # Parse HTML with BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\", \"header\", \"footer\", \"nav\"]):\n script.extract()\n\n # Get text content\n text = soup.get_text(separator=\"\\n\", strip=True)\n\n # Clean up whitespace and limit size (100KB max)\n text = \" \".join(text.split())\n return text[:10000] if text else None\n\n except Exception as e:\n logger.warning(f\"Error fetching content from {url}: {e}\")\n return None\n\n\nclass WebSearch(BaseTool):\n \"\"\"Search the web for information using various search engines.\"\"\"\n\n name: str = \"web_search\"\n description: str = \"\"\"Search the web for real-time information about any topic.\n This tool returns comprehensive search results with relevant information, URLs, titles, and descriptions.\n If the primary search engine fails, it automatically falls back to alternative engines.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\",\n \"description\": \"(required) The search query to submit to the search engine.\",\n },\n \"num_results\": {\n \"type\": \"integer\",\n \"description\": \"(optional) The number of search results to return. Default is 5.\",\n \"default\": 5,\n },\n \"lang\": {\n \"type\": \"string\",\n \"description\": \"(optional) Language code for search results (default: en).\",\n \"default\": \"en\",\n },\n \"country\": {\n \"type\": \"string\",\n \"description\": \"(optional) Country code for search results (default: us).\",\n \"default\": \"us\",\n },\n \"fetch_content\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to fetch full content from result pages. Default is false.\",\n \"default\": False,\n },\n },\n \"required\": [\"query\"],\n }\n _search_engine: dict[str, WebSearchEngine] = {\n \"google\": GoogleSearchEngine(),\n \"baidu\": BaiduSearchEngine(),\n \"duckduckgo\": DuckDuckGoSearchEngine(),\n \"bing\": BingSearchEngine(),\n }\n content_fetcher: WebContentFetcher = WebContentFetcher()\n\n async def execute(\n self,\n query: str,\n num_results: int = 5,\n lang: Optional[str] = None,\n country: Optional[str] = None,\n fetch_content: bool = False,\n ) -> SearchResponse:\n \"\"\"\n Execute a Web search and return detailed search results.\n\n Args:\n query: The search query to submit to the search engine\n num_results: The number of search results to return (default: 5)\n lang: Language code for search results (default from config)\n country: Country code for search results (default from config)\n fetch_content: Whether to fetch content from result pages (default: False)\n\n Returns:\n A structured response containing search results and metadata\n \"\"\"\n # Get settings from config\n retry_delay = (\n getattr(config.search_config, \"retry_delay\", 60)\n if config.search_config\n else 60\n )\n max_retries = (\n getattr(config.search_config, \"max_retries\", 3)\n if config.search_config\n else 3\n )\n\n # Use config values for lang and country if not specified\n if lang is None:\n lang = (\n getattr(config.search_config, \"lang\", \"en\")\n if config.search_config\n else \"en\"\n )\n\n if country is None:\n country = (\n getattr(config.search_config, \"country\", \"us\")\n if config.search_config\n else \"us\"\n )\n\n search_params = {\"lang\": lang, \"country\": country}\n\n # Try searching with retries when all engines fail\n for retry_count in range(max_retries + 1):\n results = await self._try_all_engines(query, num_results, search_params)\n\n if results:\n # Fetch content if requested\n if fetch_content:\n results = await self._fetch_content_for_results(results)\n\n # Return a successful structured response\n return SearchResponse(\n status=\"success\",\n query=query,\n results=results,\n metadata=SearchMetadata(\n total_results=len(results),\n language=lang,\n country=country,\n ),\n )\n\n if retry_count < max_retries:\n # All engines failed, wait and retry\n logger.warning(\n f\"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}...\"\n )\n await asyncio.sleep(retry_delay)\n else:\n logger.error(\n f\"All search engines failed after {max_retries} retries. Giving up.\"\n )\n\n # Return an error response\n return SearchResponse(\n query=query,\n error=\"All search engines failed to return results after multiple retries.\",\n results=[],\n )\n\n async def _try_all_engines(\n self, query: str, num_results: int, search_params: Dict[str, Any]\n ) -> List[SearchResult]:\n \"\"\"Try all search engines in the configured order.\"\"\"\n engine_order = self._get_engine_order()\n failed_engines = []\n\n for engine_name in engine_order:\n engine = self._search_engine[engine_name]\n logger.info(f\"🔎 Attempting search with {engine_name.capitalize()}...\")\n search_items = await self._perform_search_with_engine(\n engine, query, num_results, search_params\n )\n\n if not search_items:\n continue\n\n if failed_engines:\n logger.info(\n f\"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}\"\n )\n\n # Transform search items into structured results\n return [\n SearchResult(\n position=i + 1,\n url=item.url,\n title=item.title\n or f\"Result {i+1}\", # Ensure we always have a title\n description=item.description or \"\",\n source=engine_name,\n )\n for i, item in enumerate(search_items)\n ]\n\n if failed_engines:\n logger.error(f\"All search engines failed: {', '.join(failed_engines)}\")\n return []\n\n async def _fetch_content_for_results(\n self, results: List[SearchResult]\n ) -> List[SearchResult]:\n \"\"\"Fetch and add web content to search results.\"\"\"\n if not results:\n return []\n\n # Create tasks for each result\n tasks = [self._fetch_single_result_content(result) for result in results]\n\n # Type annotation to help type checker\n fetched_results = await asyncio.gather(*tasks)\n\n # Explicit validation of return type\n return [\n (\n result\n if isinstance(result, SearchResult)\n else SearchResult(**result.dict())\n )\n for result in fetched_results\n ]\n\n async def _fetch_single_result_content(self, result: SearchResult) -> SearchResult:\n \"\"\"Fetch content for a single search result.\"\"\"\n if result.url:\n content = await self.content_fetcher.fetch_content(result.url)\n if content:\n result.raw_content = content\n return result\n\n def _get_engine_order(self) -> List[str]:\n \"\"\"Determines the order in which to try search engines.\"\"\"\n preferred = (\n getattr(config.search_config, \"engine\", \"google\").lower()\n if config.search_config\n else \"google\"\n )\n fallbacks = (\n [engine.lower() for engine in config.search_config.fallback_engines]\n if config.search_config\n and hasattr(config.search_config, \"fallback_engines\")\n else []\n )\n\n # Start with preferred engine, then fallbacks, then remaining engines\n engine_order = [preferred] if preferred in self._search_engine else []\n engine_order.extend(\n [\n fb\n for fb in fallbacks\n if fb in self._search_engine and fb not in engine_order\n ]\n )\n engine_order.extend([e for e in self._search_engine if e not in engine_order])\n\n return engine_order\n\n @retry(\n stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10)\n )\n async def _perform_search_with_engine(\n self,\n engine: WebSearchEngine,\n query: str,\n num_results: int,\n search_params: Dict[str, Any],\n ) -> List[SearchItem]:\n \"\"\"Execute search with the given engine and parameters.\"\"\"\n return await asyncio.get_event_loop().run_in_executor(\n None,\n lambda: list(\n engine.perform_search(\n query,\n num_results=num_results,\n lang=search_params.get(\"lang\"),\n country=search_params.get(\"country\"),\n )\n ),\n )\n\n\nif __name__ == \"__main__\":\n web_search = WebSearch()\n search_response = asyncio.run(\n web_search.execute(\n query=\"Python programming\", fetch_content=True, num_results=1\n )\n )\n print(search_response.to_tool_result())\n"], ["/OpenManus/app/agent/toolcall.py", "import asyncio\nimport json\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.react import ReActAgent\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger\nfrom app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice\nfrom app.tool import CreateChatCompletion, Terminate, ToolCollection\n\n\nTOOL_CALL_REQUIRED = \"Tool calls required but none provided\"\n\n\nclass ToolCallAgent(ReActAgent):\n \"\"\"Base agent class for handling tool/function calls with enhanced abstraction\"\"\"\n\n name: str = \"toolcall\"\n description: str = \"an agent that can execute tool calls.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n available_tools: ToolCollection = ToolCollection(\n CreateChatCompletion(), Terminate()\n )\n tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n tool_calls: List[ToolCall] = Field(default_factory=list)\n _current_base64_image: Optional[str] = None\n\n max_steps: int = 30\n max_observe: Optional[Union[int, bool]] = None\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools\"\"\"\n if self.next_step_prompt:\n user_msg = Message.user_message(self.next_step_prompt)\n self.messages += [user_msg]\n\n try:\n # Get response with tool options\n response = await self.llm.ask_tool(\n messages=self.messages,\n system_msgs=(\n [Message.system_message(self.system_prompt)]\n if self.system_prompt\n else None\n ),\n tools=self.available_tools.to_params(),\n tool_choice=self.tool_choices,\n )\n except ValueError:\n raise\n except Exception as e:\n # Check if this is a RetryError containing TokenLimitExceeded\n if hasattr(e, \"__cause__\") and isinstance(e.__cause__, TokenLimitExceeded):\n token_limit_error = e.__cause__\n logger.error(\n f\"🚨 Token limit error (from RetryError): {token_limit_error}\"\n )\n self.memory.add_message(\n Message.assistant_message(\n f\"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}\"\n )\n )\n self.state = AgentState.FINISHED\n return False\n raise\n\n self.tool_calls = tool_calls = (\n response.tool_calls if response and response.tool_calls else []\n )\n content = response.content if response and response.content else \"\"\n\n # Log response info\n logger.info(f\"✨ {self.name}'s thoughts: {content}\")\n logger.info(\n f\"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use\"\n )\n if tool_calls:\n logger.info(\n f\"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}\"\n )\n logger.info(f\"🔧 Tool arguments: {tool_calls[0].function.arguments}\")\n\n try:\n if response is None:\n raise RuntimeError(\"No response received from the LLM\")\n\n # Handle different tool_choices modes\n if self.tool_choices == ToolChoice.NONE:\n if tool_calls:\n logger.warning(\n f\"🤔 Hmm, {self.name} tried to use tools when they weren't available!\"\n )\n if content:\n self.memory.add_message(Message.assistant_message(content))\n return True\n return False\n\n # Create and add assistant message\n assistant_msg = (\n Message.from_tool_calls(content=content, tool_calls=self.tool_calls)\n if self.tool_calls\n else Message.assistant_message(content)\n )\n self.memory.add_message(assistant_msg)\n\n if self.tool_choices == ToolChoice.REQUIRED and not self.tool_calls:\n return True # Will be handled in act()\n\n # For 'auto' mode, continue with content if no commands but content exists\n if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:\n return bool(content)\n\n return bool(self.tool_calls)\n except Exception as e:\n logger.error(f\"🚨 Oops! The {self.name}'s thinking process hit a snag: {e}\")\n self.memory.add_message(\n Message.assistant_message(\n f\"Error encountered while processing: {str(e)}\"\n )\n )\n return False\n\n async def act(self) -> str:\n \"\"\"Execute tool calls and handle their results\"\"\"\n if not self.tool_calls:\n if self.tool_choices == ToolChoice.REQUIRED:\n raise ValueError(TOOL_CALL_REQUIRED)\n\n # Return last message content if no tool calls\n return self.messages[-1].content or \"No content or commands to execute\"\n\n results = []\n for command in self.tool_calls:\n # Reset base64_image for each tool call\n self._current_base64_image = None\n\n result = await self.execute_tool(command)\n\n if self.max_observe:\n result = result[: self.max_observe]\n\n logger.info(\n f\"🎯 Tool '{command.function.name}' completed its mission! Result: {result}\"\n )\n\n # Add tool response to memory\n tool_msg = Message.tool_message(\n content=result,\n tool_call_id=command.id,\n name=command.function.name,\n base64_image=self._current_base64_image,\n )\n self.memory.add_message(tool_msg)\n results.append(result)\n\n return \"\\n\\n\".join(results)\n\n async def execute_tool(self, command: ToolCall) -> str:\n \"\"\"Execute a single tool call with robust error handling\"\"\"\n if not command or not command.function or not command.function.name:\n return \"Error: Invalid command format\"\n\n name = command.function.name\n if name not in self.available_tools.tool_map:\n return f\"Error: Unknown tool '{name}'\"\n\n try:\n # Parse arguments\n args = json.loads(command.function.arguments or \"{}\")\n\n # Execute the tool\n logger.info(f\"🔧 Activating tool: '{name}'...\")\n result = await self.available_tools.execute(name=name, tool_input=args)\n\n # Handle special tools\n await self._handle_special_tool(name=name, result=result)\n\n # Check if result is a ToolResult with base64_image\n if hasattr(result, \"base64_image\") and result.base64_image:\n # Store the base64_image for later use in tool_message\n self._current_base64_image = result.base64_image\n\n # Format result for display (standard case)\n observation = (\n f\"Observed output of cmd `{name}` executed:\\n{str(result)}\"\n if result\n else f\"Cmd `{name}` completed with no output\"\n )\n\n return observation\n except json.JSONDecodeError:\n error_msg = f\"Error parsing arguments for {name}: Invalid JSON format\"\n logger.error(\n f\"📝 Oops! The arguments for '{name}' don't make sense - invalid JSON, arguments:{command.function.arguments}\"\n )\n return f\"Error: {error_msg}\"\n except Exception as e:\n error_msg = f\"⚠️ Tool '{name}' encountered a problem: {str(e)}\"\n logger.exception(error_msg)\n return f\"Error: {error_msg}\"\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs):\n \"\"\"Handle special tool execution and state changes\"\"\"\n if not self._is_special_tool(name):\n return\n\n if self._should_finish_execution(name=name, result=result, **kwargs):\n # Set agent state to finished\n logger.info(f\"🏁 Special tool '{name}' has completed the task!\")\n self.state = AgentState.FINISHED\n\n @staticmethod\n def _should_finish_execution(**kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n return True\n\n def _is_special_tool(self, name: str) -> bool:\n \"\"\"Check if tool name is in special tools list\"\"\"\n return name.lower() in [n.lower() for n in self.special_tool_names]\n\n async def cleanup(self):\n \"\"\"Clean up resources used by the agent's tools.\"\"\"\n logger.info(f\"🧹 Cleaning up resources for agent '{self.name}'...\")\n for tool_name, tool_instance in self.available_tools.tool_map.items():\n if hasattr(tool_instance, \"cleanup\") and asyncio.iscoroutinefunction(\n tool_instance.cleanup\n ):\n try:\n logger.debug(f\"🧼 Cleaning up tool: {tool_name}\")\n await tool_instance.cleanup()\n except Exception as e:\n logger.error(\n f\"🚨 Error cleaning up tool '{tool_name}': {e}\", exc_info=True\n )\n logger.info(f\"✨ Cleanup complete for agent '{self.name}'.\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n return await super().run(request)\n finally:\n await self.cleanup()\n"], ["/OpenManus/app/agent/browser.py", "import json\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import Message, ToolChoice\nfrom app.tool import BrowserUseTool, Terminate, ToolCollection\n\n\n# Avoid circular import if BrowserAgent needs BrowserContextHelper\nif TYPE_CHECKING:\n from app.agent.base import BaseAgent # Or wherever memory is defined\n\n\nclass BrowserContextHelper:\n def __init__(self, agent: \"BaseAgent\"):\n self.agent = agent\n self._current_base64_image: Optional[str] = None\n\n async def get_browser_state(self) -> Optional[dict]:\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if not browser_tool or not hasattr(browser_tool, \"get_current_state\"):\n logger.warning(\"BrowserUseTool not found or doesn't have get_current_state\")\n return None\n try:\n result = await browser_tool.get_current_state()\n if result.error:\n logger.debug(f\"Browser state error: {result.error}\")\n return None\n if hasattr(result, \"base64_image\") and result.base64_image:\n self._current_base64_image = result.base64_image\n else:\n self._current_base64_image = None\n return json.loads(result.output)\n except Exception as e:\n logger.debug(f\"Failed to get browser state: {str(e)}\")\n return None\n\n async def format_next_step_prompt(self) -> str:\n \"\"\"Gets browser state and formats the browser prompt.\"\"\"\n browser_state = await self.get_browser_state()\n url_info, tabs_info, content_above_info, content_below_info = \"\", \"\", \"\", \"\"\n results_info = \"\" # Or get from agent if needed elsewhere\n\n if browser_state and not browser_state.get(\"error\"):\n url_info = f\"\\n URL: {browser_state.get('url', 'N/A')}\\n Title: {browser_state.get('title', 'N/A')}\"\n tabs = browser_state.get(\"tabs\", [])\n if tabs:\n tabs_info = f\"\\n {len(tabs)} tab(s) available\"\n pixels_above = browser_state.get(\"pixels_above\", 0)\n pixels_below = browser_state.get(\"pixels_below\", 0)\n if pixels_above > 0:\n content_above_info = f\" ({pixels_above} pixels)\"\n if pixels_below > 0:\n content_below_info = f\" ({pixels_below} pixels)\"\n\n if self._current_base64_image:\n image_message = Message.user_message(\n content=\"Current browser screenshot:\",\n base64_image=self._current_base64_image,\n )\n self.agent.memory.add_message(image_message)\n self._current_base64_image = None # Consume the image after adding\n\n return NEXT_STEP_PROMPT.format(\n url_placeholder=url_info,\n tabs_placeholder=tabs_info,\n content_above_placeholder=content_above_info,\n content_below_placeholder=content_below_info,\n results_placeholder=results_info,\n )\n\n async def cleanup_browser(self):\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if browser_tool and hasattr(browser_tool, \"cleanup\"):\n await browser_tool.cleanup()\n\n\nclass BrowserAgent(ToolCallAgent):\n \"\"\"\n A browser agent that uses the browser_use library to control a browser.\n\n This agent can navigate web pages, interact with elements, fill forms,\n extract content, and perform other browser-based actions to accomplish tasks.\n \"\"\"\n\n name: str = \"browser\"\n description: str = \"A browser agent that can control a browser to accomplish tasks\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # Configure the available tools\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())\n )\n\n # Use Auto for tool choice to allow both tool usage and free-form responses\n tool_choices: ToolChoice = ToolChoice.AUTO\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"BrowserAgent\":\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools, with browser state info added\"\"\"\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n return await super().think()\n\n async def cleanup(self):\n \"\"\"Clean up browser agent resources by calling parent cleanup.\"\"\"\n await self.browser_context_helper.cleanup_browser()\n"], ["/OpenManus/app/tool/browser_use_tool.py", "import asyncio\nimport base64\nimport json\nfrom typing import Generic, Optional, TypeVar\n\nfrom browser_use import Browser as BrowserUseBrowser\nfrom browser_use import BrowserConfig\nfrom browser_use.browser.context import BrowserContext, BrowserContextConfig\nfrom browser_use.dom.service import DomService\nfrom pydantic import Field, field_validator\nfrom pydantic_core.core_schema import ValidationInfo\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.web_search import WebSearch\n\n\n_BROWSER_DESCRIPTION = \"\"\"\\\nA powerful browser automation tool that allows interaction with web pages through various actions.\n* This tool provides commands for controlling a browser session, navigating web pages, and extracting information\n* It maintains state across calls, keeping the browser session alive until explicitly closed\n* Use this when you need to browse websites, fill forms, click buttons, extract content, or perform web searches\n* Each action requires specific parameters as defined in the tool's dependencies\n\nKey capabilities include:\n* Navigation: Go to specific URLs, go back, search the web, or refresh pages\n* Interaction: Click elements, input text, select from dropdowns, send keyboard commands\n* Scrolling: Scroll up/down by pixel amount or scroll to specific text\n* Content extraction: Extract and analyze content from web pages based on specific goals\n* Tab management: Switch between tabs, open new tabs, or close tabs\n\nNote: When using element indices, refer to the numbered elements shown in the current browser state.\n\"\"\"\n\nContext = TypeVar(\"Context\")\n\n\nclass BrowserUseTool(BaseTool, Generic[Context]):\n name: str = \"browser_use\"\n description: str = _BROWSER_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"action\": {\n \"type\": \"string\",\n \"enum\": [\n \"go_to_url\",\n \"click_element\",\n \"input_text\",\n \"scroll_down\",\n \"scroll_up\",\n \"scroll_to_text\",\n \"send_keys\",\n \"get_dropdown_options\",\n \"select_dropdown_option\",\n \"go_back\",\n \"web_search\",\n \"wait\",\n \"extract_content\",\n \"switch_tab\",\n \"open_tab\",\n \"close_tab\",\n ],\n \"description\": \"The browser action to perform\",\n },\n \"url\": {\n \"type\": \"string\",\n \"description\": \"URL for 'go_to_url' or 'open_tab' actions\",\n },\n \"index\": {\n \"type\": \"integer\",\n \"description\": \"Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions\",\n },\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions\",\n },\n \"scroll_amount\": {\n \"type\": \"integer\",\n \"description\": \"Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions\",\n },\n \"tab_id\": {\n \"type\": \"integer\",\n \"description\": \"Tab ID for 'switch_tab' action\",\n },\n \"query\": {\n \"type\": \"string\",\n \"description\": \"Search query for 'web_search' action\",\n },\n \"goal\": {\n \"type\": \"string\",\n \"description\": \"Extraction goal for 'extract_content' action\",\n },\n \"keys\": {\n \"type\": \"string\",\n \"description\": \"Keys to send for 'send_keys' action\",\n },\n \"seconds\": {\n \"type\": \"integer\",\n \"description\": \"Seconds to wait for 'wait' action\",\n },\n },\n \"required\": [\"action\"],\n \"dependencies\": {\n \"go_to_url\": [\"url\"],\n \"click_element\": [\"index\"],\n \"input_text\": [\"index\", \"text\"],\n \"switch_tab\": [\"tab_id\"],\n \"open_tab\": [\"url\"],\n \"scroll_down\": [\"scroll_amount\"],\n \"scroll_up\": [\"scroll_amount\"],\n \"scroll_to_text\": [\"text\"],\n \"send_keys\": [\"keys\"],\n \"get_dropdown_options\": [\"index\"],\n \"select_dropdown_option\": [\"index\", \"text\"],\n \"go_back\": [],\n \"web_search\": [\"query\"],\n \"wait\": [\"seconds\"],\n \"extract_content\": [\"goal\"],\n },\n }\n\n lock: asyncio.Lock = Field(default_factory=asyncio.Lock)\n browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True)\n context: Optional[BrowserContext] = Field(default=None, exclude=True)\n dom_service: Optional[DomService] = Field(default=None, exclude=True)\n web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True)\n\n # Context for generic functionality\n tool_context: Optional[Context] = Field(default=None, exclude=True)\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n\n @field_validator(\"parameters\", mode=\"before\")\n def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict:\n if not v:\n raise ValueError(\"Parameters cannot be empty\")\n return v\n\n async def _ensure_browser_initialized(self) -> BrowserContext:\n \"\"\"Ensure browser and context are initialized.\"\"\"\n if self.browser is None:\n browser_config_kwargs = {\"headless\": False, \"disable_security\": True}\n\n if config.browser_config:\n from browser_use.browser.browser import ProxySettings\n\n # handle proxy settings.\n if config.browser_config.proxy and config.browser_config.proxy.server:\n browser_config_kwargs[\"proxy\"] = ProxySettings(\n server=config.browser_config.proxy.server,\n username=config.browser_config.proxy.username,\n password=config.browser_config.proxy.password,\n )\n\n browser_attrs = [\n \"headless\",\n \"disable_security\",\n \"extra_chromium_args\",\n \"chrome_instance_path\",\n \"wss_url\",\n \"cdp_url\",\n ]\n\n for attr in browser_attrs:\n value = getattr(config.browser_config, attr, None)\n if value is not None:\n if not isinstance(value, list) or value:\n browser_config_kwargs[attr] = value\n\n self.browser = BrowserUseBrowser(BrowserConfig(**browser_config_kwargs))\n\n if self.context is None:\n context_config = BrowserContextConfig()\n\n # if there is context config in the config, use it.\n if (\n config.browser_config\n and hasattr(config.browser_config, \"new_context_config\")\n and config.browser_config.new_context_config\n ):\n context_config = config.browser_config.new_context_config\n\n self.context = await self.browser.new_context(context_config)\n self.dom_service = DomService(await self.context.get_current_page())\n\n return self.context\n\n async def execute(\n self,\n action: str,\n url: Optional[str] = None,\n index: Optional[int] = None,\n text: Optional[str] = None,\n scroll_amount: Optional[int] = None,\n tab_id: Optional[int] = None,\n query: Optional[str] = None,\n goal: Optional[str] = None,\n keys: Optional[str] = None,\n seconds: Optional[int] = None,\n **kwargs,\n ) -> ToolResult:\n \"\"\"\n Execute a specified browser action.\n\n Args:\n action: The browser action to perform\n url: URL for navigation or new tab\n index: Element index for click or input actions\n text: Text for input action or search query\n scroll_amount: Pixels to scroll for scroll action\n tab_id: Tab ID for switch_tab action\n query: Search query for Google search\n goal: Extraction goal for content extraction\n keys: Keys to send for keyboard actions\n seconds: Seconds to wait\n **kwargs: Additional arguments\n\n Returns:\n ToolResult with the action's output or error\n \"\"\"\n async with self.lock:\n try:\n context = await self._ensure_browser_initialized()\n\n # Get max content length from config\n max_content_length = getattr(\n config.browser_config, \"max_content_length\", 2000\n )\n\n # Navigation actions\n if action == \"go_to_url\":\n if not url:\n return ToolResult(\n error=\"URL is required for 'go_to_url' action\"\n )\n page = await context.get_current_page()\n await page.goto(url)\n await page.wait_for_load_state()\n return ToolResult(output=f\"Navigated to {url}\")\n\n elif action == \"go_back\":\n await context.go_back()\n return ToolResult(output=\"Navigated back\")\n\n elif action == \"refresh\":\n await context.refresh_page()\n return ToolResult(output=\"Refreshed current page\")\n\n elif action == \"web_search\":\n if not query:\n return ToolResult(\n error=\"Query is required for 'web_search' action\"\n )\n # Execute the web search and return results directly without browser navigation\n search_response = await self.web_search_tool.execute(\n query=query, fetch_content=True, num_results=1\n )\n # Navigate to the first search result\n first_search_result = search_response.results[0]\n url_to_navigate = first_search_result.url\n\n page = await context.get_current_page()\n await page.goto(url_to_navigate)\n await page.wait_for_load_state()\n\n return search_response\n\n # Element interaction actions\n elif action == \"click_element\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'click_element' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n download_path = await context._click_element_node(element)\n output = f\"Clicked element at index {index}\"\n if download_path:\n output += f\" - Downloaded file to {download_path}\"\n return ToolResult(output=output)\n\n elif action == \"input_text\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'input_text' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n await context._input_text_element_node(element, text)\n return ToolResult(\n output=f\"Input '{text}' into element at index {index}\"\n )\n\n elif action == \"scroll_down\" or action == \"scroll_up\":\n direction = 1 if action == \"scroll_down\" else -1\n amount = (\n scroll_amount\n if scroll_amount is not None\n else context.config.browser_window_size[\"height\"]\n )\n await context.execute_javascript(\n f\"window.scrollBy(0, {direction * amount});\"\n )\n return ToolResult(\n output=f\"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels\"\n )\n\n elif action == \"scroll_to_text\":\n if not text:\n return ToolResult(\n error=\"Text is required for 'scroll_to_text' action\"\n )\n page = await context.get_current_page()\n try:\n locator = page.get_by_text(text, exact=False)\n await locator.scroll_into_view_if_needed()\n return ToolResult(output=f\"Scrolled to text: '{text}'\")\n except Exception as e:\n return ToolResult(error=f\"Failed to scroll to text: {str(e)}\")\n\n elif action == \"send_keys\":\n if not keys:\n return ToolResult(\n error=\"Keys are required for 'send_keys' action\"\n )\n page = await context.get_current_page()\n await page.keyboard.press(keys)\n return ToolResult(output=f\"Sent keys: {keys}\")\n\n elif action == \"get_dropdown_options\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'get_dropdown_options' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n options = await page.evaluate(\n \"\"\"\n (xpath) => {\n const select = document.evaluate(xpath, document, null,\n XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;\n if (!select) return null;\n return Array.from(select.options).map(opt => ({\n text: opt.text,\n value: opt.value,\n index: opt.index\n }));\n }\n \"\"\",\n element.xpath,\n )\n return ToolResult(output=f\"Dropdown options: {options}\")\n\n elif action == \"select_dropdown_option\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'select_dropdown_option' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n await page.select_option(element.xpath, label=text)\n return ToolResult(\n output=f\"Selected option '{text}' from dropdown at index {index}\"\n )\n\n # Content extraction actions\n elif action == \"extract_content\":\n if not goal:\n return ToolResult(\n error=\"Goal is required for 'extract_content' action\"\n )\n\n page = await context.get_current_page()\n import markdownify\n\n content = markdownify.markdownify(await page.content())\n\n prompt = f\"\"\"\\\nYour task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format.\nExtraction goal: {goal}\n\nPage content:\n{content[:max_content_length]}\n\"\"\"\n messages = [{\"role\": \"system\", \"content\": prompt}]\n\n # Define extraction function schema\n extraction_function = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"extract_content\",\n \"description\": \"Extract specific information from a webpage based on a goal\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"extracted_content\": {\n \"type\": \"object\",\n \"description\": \"The content extracted from the page according to the goal\",\n \"properties\": {\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text content extracted from the page\",\n },\n \"metadata\": {\n \"type\": \"object\",\n \"description\": \"Additional metadata about the extracted content\",\n \"properties\": {\n \"source\": {\n \"type\": \"string\",\n \"description\": \"Source of the extracted content\",\n }\n },\n },\n },\n }\n },\n \"required\": [\"extracted_content\"],\n },\n },\n }\n\n # Use LLM to extract content with required function calling\n response = await self.llm.ask_tool(\n messages,\n tools=[extraction_function],\n tool_choice=\"required\",\n )\n\n if response and response.tool_calls:\n args = json.loads(response.tool_calls[0].function.arguments)\n extracted_content = args.get(\"extracted_content\", {})\n return ToolResult(\n output=f\"Extracted from page:\\n{extracted_content}\\n\"\n )\n\n return ToolResult(output=\"No content was extracted from the page.\")\n\n # Tab management actions\n elif action == \"switch_tab\":\n if tab_id is None:\n return ToolResult(\n error=\"Tab ID is required for 'switch_tab' action\"\n )\n await context.switch_to_tab(tab_id)\n page = await context.get_current_page()\n await page.wait_for_load_state()\n return ToolResult(output=f\"Switched to tab {tab_id}\")\n\n elif action == \"open_tab\":\n if not url:\n return ToolResult(error=\"URL is required for 'open_tab' action\")\n await context.create_new_tab(url)\n return ToolResult(output=f\"Opened new tab with {url}\")\n\n elif action == \"close_tab\":\n await context.close_current_tab()\n return ToolResult(output=\"Closed current tab\")\n\n # Utility actions\n elif action == \"wait\":\n seconds_to_wait = seconds if seconds is not None else 3\n await asyncio.sleep(seconds_to_wait)\n return ToolResult(output=f\"Waited for {seconds_to_wait} seconds\")\n\n else:\n return ToolResult(error=f\"Unknown action: {action}\")\n\n except Exception as e:\n return ToolResult(error=f\"Browser action '{action}' failed: {str(e)}\")\n\n async def get_current_state(\n self, context: Optional[BrowserContext] = None\n ) -> ToolResult:\n \"\"\"\n Get the current browser state as a ToolResult.\n If context is not provided, uses self.context.\n \"\"\"\n try:\n # Use provided context or fall back to self.context\n ctx = context or self.context\n if not ctx:\n return ToolResult(error=\"Browser context not initialized\")\n\n state = await ctx.get_state()\n\n # Create a viewport_info dictionary if it doesn't exist\n viewport_height = 0\n if hasattr(state, \"viewport_info\") and state.viewport_info:\n viewport_height = state.viewport_info.height\n elif hasattr(ctx, \"config\") and hasattr(ctx.config, \"browser_window_size\"):\n viewport_height = ctx.config.browser_window_size.get(\"height\", 0)\n\n # Take a screenshot for the state\n page = await ctx.get_current_page()\n\n await page.bring_to_front()\n await page.wait_for_load_state()\n\n screenshot = await page.screenshot(\n full_page=True, animations=\"disabled\", type=\"jpeg\", quality=100\n )\n\n screenshot = base64.b64encode(screenshot).decode(\"utf-8\")\n\n # Build the state info with all required fields\n state_info = {\n \"url\": state.url,\n \"title\": state.title,\n \"tabs\": [tab.model_dump() for tab in state.tabs],\n \"help\": \"[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.\",\n \"interactive_elements\": (\n state.element_tree.clickable_elements_to_string()\n if state.element_tree\n else \"\"\n ),\n \"scroll_info\": {\n \"pixels_above\": getattr(state, \"pixels_above\", 0),\n \"pixels_below\": getattr(state, \"pixels_below\", 0),\n \"total_height\": getattr(state, \"pixels_above\", 0)\n + getattr(state, \"pixels_below\", 0)\n + viewport_height,\n },\n \"viewport_height\": viewport_height,\n }\n\n return ToolResult(\n output=json.dumps(state_info, indent=4, ensure_ascii=False),\n base64_image=screenshot,\n )\n except Exception as e:\n return ToolResult(error=f\"Failed to get browser state: {str(e)}\")\n\n async def cleanup(self):\n \"\"\"Clean up browser resources.\"\"\"\n async with self.lock:\n if self.context is not None:\n await self.context.close()\n self.context = None\n self.dom_service = None\n if self.browser is not None:\n await self.browser.close()\n self.browser = None\n\n def __del__(self):\n \"\"\"Ensure cleanup when object is destroyed.\"\"\"\n if self.browser is not None or self.context is not None:\n try:\n asyncio.run(self.cleanup())\n except RuntimeError:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.cleanup())\n loop.close()\n\n @classmethod\n def create_with_context(cls, context: Context) -> \"BrowserUseTool[Context]\":\n \"\"\"Factory method to create a BrowserUseTool with a specific context.\"\"\"\n tool = cls()\n tool.tool_context = context\n return tool\n"], ["/OpenManus/app/llm.py", "import math\nfrom typing import Dict, List, Optional, Union\n\nimport tiktoken\nfrom openai import (\n APIError,\n AsyncAzureOpenAI,\n AsyncOpenAI,\n AuthenticationError,\n OpenAIError,\n RateLimitError,\n)\nfrom openai.types.chat import ChatCompletion, ChatCompletionMessage\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_random_exponential,\n)\n\nfrom app.bedrock import BedrockClient\nfrom app.config import LLMSettings, config\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger # Assuming a logger is set up in your app\nfrom app.schema import (\n ROLE_VALUES,\n TOOL_CHOICE_TYPE,\n TOOL_CHOICE_VALUES,\n Message,\n ToolChoice,\n)\n\n\nREASONING_MODELS = [\"o1\", \"o3-mini\"]\nMULTIMODAL_MODELS = [\n \"gpt-4-vision-preview\",\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"claude-3-opus-20240229\",\n \"claude-3-sonnet-20240229\",\n \"claude-3-haiku-20240307\",\n]\n\n\nclass TokenCounter:\n # Token constants\n BASE_MESSAGE_TOKENS = 4\n FORMAT_TOKENS = 2\n LOW_DETAIL_IMAGE_TOKENS = 85\n HIGH_DETAIL_TILE_TOKENS = 170\n\n # Image processing constants\n MAX_SIZE = 2048\n HIGH_DETAIL_TARGET_SHORT_SIDE = 768\n TILE_SIZE = 512\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n def count_text(self, text: str) -> int:\n \"\"\"Calculate tokens for a text string\"\"\"\n return 0 if not text else len(self.tokenizer.encode(text))\n\n def count_image(self, image_item: dict) -> int:\n \"\"\"\n Calculate tokens for an image based on detail level and dimensions\n\n For \"low\" detail: fixed 85 tokens\n For \"high\" detail:\n 1. Scale to fit in 2048x2048 square\n 2. Scale shortest side to 768px\n 3. Count 512px tiles (170 tokens each)\n 4. Add 85 tokens\n \"\"\"\n detail = image_item.get(\"detail\", \"medium\")\n\n # For low detail, always return fixed token count\n if detail == \"low\":\n return self.LOW_DETAIL_IMAGE_TOKENS\n\n # For medium detail (default in OpenAI), use high detail calculation\n # OpenAI doesn't specify a separate calculation for medium\n\n # For high detail, calculate based on dimensions if available\n if detail == \"high\" or detail == \"medium\":\n # If dimensions are provided in the image_item\n if \"dimensions\" in image_item:\n width, height = image_item[\"dimensions\"]\n return self._calculate_high_detail_tokens(width, height)\n\n return (\n self._calculate_high_detail_tokens(1024, 1024) if detail == \"high\" else 1024\n )\n\n def _calculate_high_detail_tokens(self, width: int, height: int) -> int:\n \"\"\"Calculate tokens for high detail images based on dimensions\"\"\"\n # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square\n if width > self.MAX_SIZE or height > self.MAX_SIZE:\n scale = self.MAX_SIZE / max(width, height)\n width = int(width * scale)\n height = int(height * scale)\n\n # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE\n scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height)\n scaled_width = int(width * scale)\n scaled_height = int(height * scale)\n\n # Step 3: Count number of 512px tiles\n tiles_x = math.ceil(scaled_width / self.TILE_SIZE)\n tiles_y = math.ceil(scaled_height / self.TILE_SIZE)\n total_tiles = tiles_x * tiles_y\n\n # Step 4: Calculate final token count\n return (\n total_tiles * self.HIGH_DETAIL_TILE_TOKENS\n ) + self.LOW_DETAIL_IMAGE_TOKENS\n\n def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int:\n \"\"\"Calculate tokens for message content\"\"\"\n if not content:\n return 0\n\n if isinstance(content, str):\n return self.count_text(content)\n\n token_count = 0\n for item in content:\n if isinstance(item, str):\n token_count += self.count_text(item)\n elif isinstance(item, dict):\n if \"text\" in item:\n token_count += self.count_text(item[\"text\"])\n elif \"image_url\" in item:\n token_count += self.count_image(item)\n return token_count\n\n def count_tool_calls(self, tool_calls: List[dict]) -> int:\n \"\"\"Calculate tokens for tool calls\"\"\"\n token_count = 0\n for tool_call in tool_calls:\n if \"function\" in tool_call:\n function = tool_call[\"function\"]\n token_count += self.count_text(function.get(\"name\", \"\"))\n token_count += self.count_text(function.get(\"arguments\", \"\"))\n return token_count\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n \"\"\"Calculate the total number of tokens in a message list\"\"\"\n total_tokens = self.FORMAT_TOKENS # Base format tokens\n\n for message in messages:\n tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message\n\n # Add role tokens\n tokens += self.count_text(message.get(\"role\", \"\"))\n\n # Add content tokens\n if \"content\" in message:\n tokens += self.count_content(message[\"content\"])\n\n # Add tool calls tokens\n if \"tool_calls\" in message:\n tokens += self.count_tool_calls(message[\"tool_calls\"])\n\n # Add name and tool_call_id tokens\n tokens += self.count_text(message.get(\"name\", \"\"))\n tokens += self.count_text(message.get(\"tool_call_id\", \"\"))\n\n total_tokens += tokens\n\n return total_tokens\n\n\nclass LLM:\n _instances: Dict[str, \"LLM\"] = {}\n\n def __new__(\n cls, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if config_name not in cls._instances:\n instance = super().__new__(cls)\n instance.__init__(config_name, llm_config)\n cls._instances[config_name] = instance\n return cls._instances[config_name]\n\n def __init__(\n self, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if not hasattr(self, \"client\"): # Only initialize if not already initialized\n llm_config = llm_config or config.llm\n llm_config = llm_config.get(config_name, llm_config[\"default\"])\n self.model = llm_config.model\n self.max_tokens = llm_config.max_tokens\n self.temperature = llm_config.temperature\n self.api_type = llm_config.api_type\n self.api_key = llm_config.api_key\n self.api_version = llm_config.api_version\n self.base_url = llm_config.base_url\n\n # Add token counting related attributes\n self.total_input_tokens = 0\n self.total_completion_tokens = 0\n self.max_input_tokens = (\n llm_config.max_input_tokens\n if hasattr(llm_config, \"max_input_tokens\")\n else None\n )\n\n # Initialize tokenizer\n try:\n self.tokenizer = tiktoken.encoding_for_model(self.model)\n except KeyError:\n # If the model is not in tiktoken's presets, use cl100k_base as default\n self.tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n\n if self.api_type == \"azure\":\n self.client = AsyncAzureOpenAI(\n base_url=self.base_url,\n api_key=self.api_key,\n api_version=self.api_version,\n )\n elif self.api_type == \"aws\":\n self.client = BedrockClient()\n else:\n self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)\n\n self.token_counter = TokenCounter(self.tokenizer)\n\n def count_tokens(self, text: str) -> int:\n \"\"\"Calculate the number of tokens in a text\"\"\"\n if not text:\n return 0\n return len(self.tokenizer.encode(text))\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n return self.token_counter.count_message_tokens(messages)\n\n def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None:\n \"\"\"Update token counts\"\"\"\n # Only track tokens if max_input_tokens is set\n self.total_input_tokens += input_tokens\n self.total_completion_tokens += completion_tokens\n logger.info(\n f\"Token usage: Input={input_tokens}, Completion={completion_tokens}, \"\n f\"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, \"\n f\"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}\"\n )\n\n def check_token_limit(self, input_tokens: int) -> bool:\n \"\"\"Check if token limits are exceeded\"\"\"\n if self.max_input_tokens is not None:\n return (self.total_input_tokens + input_tokens) <= self.max_input_tokens\n # If max_input_tokens is not set, always return True\n return True\n\n def get_limit_error_message(self, input_tokens: int) -> str:\n \"\"\"Generate error message for token limit exceeded\"\"\"\n if (\n self.max_input_tokens is not None\n and (self.total_input_tokens + input_tokens) > self.max_input_tokens\n ):\n return f\"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})\"\n\n return \"Token limit exceeded\"\n\n @staticmethod\n def format_messages(\n messages: List[Union[dict, Message]], supports_images: bool = False\n ) -> List[dict]:\n \"\"\"\n Format messages for LLM by converting them to OpenAI message format.\n\n Args:\n messages: List of messages that can be either dict or Message objects\n supports_images: Flag indicating if the target model supports image inputs\n\n Returns:\n List[dict]: List of formatted messages in OpenAI format\n\n Raises:\n ValueError: If messages are invalid or missing required fields\n TypeError: If unsupported message types are provided\n\n Examples:\n >>> msgs = [\n ... Message.system_message(\"You are a helpful assistant\"),\n ... {\"role\": \"user\", \"content\": \"Hello\"},\n ... Message.user_message(\"How are you?\")\n ... ]\n >>> formatted = LLM.format_messages(msgs)\n \"\"\"\n formatted_messages = []\n\n for message in messages:\n # Convert Message objects to dictionaries\n if isinstance(message, Message):\n message = message.to_dict()\n\n if isinstance(message, dict):\n # If message is a dict, ensure it has required fields\n if \"role\" not in message:\n raise ValueError(\"Message dict must contain 'role' field\")\n\n # Process base64 images if present and model supports images\n if supports_images and message.get(\"base64_image\"):\n # Initialize or convert content to appropriate format\n if not message.get(\"content\"):\n message[\"content\"] = []\n elif isinstance(message[\"content\"], str):\n message[\"content\"] = [\n {\"type\": \"text\", \"text\": message[\"content\"]}\n ]\n elif isinstance(message[\"content\"], list):\n # Convert string items to proper text objects\n message[\"content\"] = [\n (\n {\"type\": \"text\", \"text\": item}\n if isinstance(item, str)\n else item\n )\n for item in message[\"content\"]\n ]\n\n # Add the image to content\n message[\"content\"].append(\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": f\"data:image/jpeg;base64,{message['base64_image']}\"\n },\n }\n )\n\n # Remove the base64_image field\n del message[\"base64_image\"]\n # If model doesn't support images but message has base64_image, handle gracefully\n elif not supports_images and message.get(\"base64_image\"):\n # Just remove the base64_image field and keep the text content\n del message[\"base64_image\"]\n\n if \"content\" in message or \"tool_calls\" in message:\n formatted_messages.append(message)\n # else: do not include the message\n else:\n raise TypeError(f\"Unsupported message type: {type(message)}\")\n\n # Validate all messages have required fields\n for msg in formatted_messages:\n if msg[\"role\"] not in ROLE_VALUES:\n raise ValueError(f\"Invalid role: {msg['role']}\")\n\n return formatted_messages\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = True,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format system and user messages with image support check\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n params = {\n \"model\": self.model,\n \"messages\": messages,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n if not stream:\n # Non-streaming request\n response = await self.client.chat.completions.create(\n **params, stream=False\n )\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message.content\n\n # Streaming request, For streaming, update estimated token count before making the request\n self.update_token_count(input_tokens)\n\n response = await self.client.chat.completions.create(**params, stream=True)\n\n collected_messages = []\n completion_text = \"\"\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n completion_text += chunk_message\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n # estimate completion tokens for streaming response\n completion_tokens = self.count_tokens(completion_text)\n logger.info(\n f\"Estimated completion tokens for streaming response: {completion_tokens}\"\n )\n self.total_completion_tokens += completion_tokens\n\n return full_response\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError:\n logger.exception(f\"Validation error\")\n raise\n except OpenAIError as oe:\n logger.exception(f\"OpenAI API error\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception:\n logger.exception(f\"Unexpected error in ask\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_with_images(\n self,\n messages: List[Union[dict, Message]],\n images: List[Union[str, dict]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = False,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt with images to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n images: List of image URLs or image data dictionaries\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # For ask_with_images, we always set supports_images to True because\n # this method should only be called with models that support images\n if self.model not in MULTIMODAL_MODELS:\n raise ValueError(\n f\"Model {self.model} does not support images. Use a model from {MULTIMODAL_MODELS}\"\n )\n\n # Format messages with image support\n formatted_messages = self.format_messages(messages, supports_images=True)\n\n # Ensure the last message is from the user to attach images\n if not formatted_messages or formatted_messages[-1][\"role\"] != \"user\":\n raise ValueError(\n \"The last message must be from the user to attach images\"\n )\n\n # Process the last user message to include images\n last_message = formatted_messages[-1]\n\n # Convert content to multimodal format if needed\n content = last_message[\"content\"]\n multimodal_content = (\n [{\"type\": \"text\", \"text\": content}]\n if isinstance(content, str)\n else content\n if isinstance(content, list)\n else []\n )\n\n # Add images to content\n for image in images:\n if isinstance(image, str):\n multimodal_content.append(\n {\"type\": \"image_url\", \"image_url\": {\"url\": image}}\n )\n elif isinstance(image, dict) and \"url\" in image:\n multimodal_content.append({\"type\": \"image_url\", \"image_url\": image})\n elif isinstance(image, dict) and \"image_url\" in image:\n multimodal_content.append(image)\n else:\n raise ValueError(f\"Unsupported image format: {image}\")\n\n # Update the message with multimodal content\n last_message[\"content\"] = multimodal_content\n\n # Add system messages if provided\n if system_msgs:\n all_messages = (\n self.format_messages(system_msgs, supports_images=True)\n + formatted_messages\n )\n else:\n all_messages = formatted_messages\n\n # Calculate tokens and check limits\n input_tokens = self.count_message_tokens(all_messages)\n if not self.check_token_limit(input_tokens):\n raise TokenLimitExceeded(self.get_limit_error_message(input_tokens))\n\n # Set up API parameters\n params = {\n \"model\": self.model,\n \"messages\": all_messages,\n \"stream\": stream,\n }\n\n # Add model-specific parameters\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n # Handle non-streaming request\n if not stream:\n response = await self.client.chat.completions.create(**params)\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n self.update_token_count(response.usage.prompt_tokens)\n return response.choices[0].message.content\n\n # Handle streaming request\n self.update_token_count(input_tokens)\n response = await self.client.chat.completions.create(**params)\n\n collected_messages = []\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n return full_response\n\n except TokenLimitExceeded:\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_with_images: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_with_images: {e}\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_tool(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n timeout: int = 300,\n tools: Optional[List[dict]] = None,\n tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore\n temperature: Optional[float] = None,\n **kwargs,\n ) -> ChatCompletionMessage | None:\n \"\"\"\n Ask LLM using functions/tools and return the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n timeout: Request timeout in seconds\n tools: List of tools to use\n tool_choice: Tool choice strategy\n temperature: Sampling temperature for the response\n **kwargs: Additional completion arguments\n\n Returns:\n ChatCompletionMessage: The model's response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If tools, tool_choice, or messages are invalid\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Validate tool_choice\n if tool_choice not in TOOL_CHOICE_VALUES:\n raise ValueError(f\"Invalid tool_choice: {tool_choice}\")\n\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format messages\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # If there are tools, calculate token count for tool descriptions\n tools_tokens = 0\n if tools:\n for tool in tools:\n tools_tokens += self.count_tokens(str(tool))\n\n input_tokens += tools_tokens\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n # Validate tools if provided\n if tools:\n for tool in tools:\n if not isinstance(tool, dict) or \"type\" not in tool:\n raise ValueError(\"Each tool must be a dict with 'type' field\")\n\n # Set up the completion request\n params = {\n \"model\": self.model,\n \"messages\": messages,\n \"tools\": tools,\n \"tool_choice\": tool_choice,\n \"timeout\": timeout,\n **kwargs,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n params[\"stream\"] = False # Always use non-streaming for tool requests\n response: ChatCompletion = await self.client.chat.completions.create(\n **params\n )\n\n # Check if response is valid\n if not response.choices or not response.choices[0].message:\n print(response)\n # raise ValueError(\"Invalid or empty response from LLM\")\n return None\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_tool: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_tool: {e}\")\n raise\n"], ["/OpenManus/app/agent/base.py", "from abc import ABC, abstractmethod\nfrom contextlib import asynccontextmanager\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel, Field, model_validator\n\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.sandbox.client import SANDBOX_CLIENT\nfrom app.schema import ROLE_TYPE, AgentState, Memory, Message\n\n\nclass BaseAgent(BaseModel, ABC):\n \"\"\"Abstract base class for managing agent state and execution.\n\n Provides foundational functionality for state transitions, memory management,\n and a step-based execution loop. Subclasses must implement the `step` method.\n \"\"\"\n\n # Core attributes\n name: str = Field(..., description=\"Unique name of the agent\")\n description: Optional[str] = Field(None, description=\"Optional agent description\")\n\n # Prompts\n system_prompt: Optional[str] = Field(\n None, description=\"System-level instruction prompt\"\n )\n next_step_prompt: Optional[str] = Field(\n None, description=\"Prompt for determining next action\"\n )\n\n # Dependencies\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n memory: Memory = Field(default_factory=Memory, description=\"Agent's memory store\")\n state: AgentState = Field(\n default=AgentState.IDLE, description=\"Current agent state\"\n )\n\n # Execution control\n max_steps: int = Field(default=10, description=\"Maximum steps before termination\")\n current_step: int = Field(default=0, description=\"Current step in execution\")\n\n duplicate_threshold: int = 2\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\" # Allow extra fields for flexibility in subclasses\n\n @model_validator(mode=\"after\")\n def initialize_agent(self) -> \"BaseAgent\":\n \"\"\"Initialize agent with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n if not isinstance(self.memory, Memory):\n self.memory = Memory()\n return self\n\n @asynccontextmanager\n async def state_context(self, new_state: AgentState):\n \"\"\"Context manager for safe agent state transitions.\n\n Args:\n new_state: The state to transition to during the context.\n\n Yields:\n None: Allows execution within the new state.\n\n Raises:\n ValueError: If the new_state is invalid.\n \"\"\"\n if not isinstance(new_state, AgentState):\n raise ValueError(f\"Invalid state: {new_state}\")\n\n previous_state = self.state\n self.state = new_state\n try:\n yield\n except Exception as e:\n self.state = AgentState.ERROR # Transition to ERROR on failure\n raise e\n finally:\n self.state = previous_state # Revert to previous state\n\n def update_memory(\n self,\n role: ROLE_TYPE, # type: ignore\n content: str,\n base64_image: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Add a message to the agent's memory.\n\n Args:\n role: The role of the message sender (user, system, assistant, tool).\n content: The message content.\n base64_image: Optional base64 encoded image.\n **kwargs: Additional arguments (e.g., tool_call_id for tool messages).\n\n Raises:\n ValueError: If the role is unsupported.\n \"\"\"\n message_map = {\n \"user\": Message.user_message,\n \"system\": Message.system_message,\n \"assistant\": Message.assistant_message,\n \"tool\": lambda content, **kw: Message.tool_message(content, **kw),\n }\n\n if role not in message_map:\n raise ValueError(f\"Unsupported message role: {role}\")\n\n # Create message with appropriate parameters based on role\n kwargs = {\"base64_image\": base64_image, **(kwargs if role == \"tool\" else {})}\n self.memory.add_message(message_map[role](content, **kwargs))\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Execute the agent's main loop asynchronously.\n\n Args:\n request: Optional initial user request to process.\n\n Returns:\n A string summarizing the execution results.\n\n Raises:\n RuntimeError: If the agent is not in IDLE state at start.\n \"\"\"\n if self.state != AgentState.IDLE:\n raise RuntimeError(f\"Cannot run agent from state: {self.state}\")\n\n if request:\n self.update_memory(\"user\", request)\n\n results: List[str] = []\n async with self.state_context(AgentState.RUNNING):\n while (\n self.current_step < self.max_steps and self.state != AgentState.FINISHED\n ):\n self.current_step += 1\n logger.info(f\"Executing step {self.current_step}/{self.max_steps}\")\n step_result = await self.step()\n\n # Check for stuck state\n if self.is_stuck():\n self.handle_stuck_state()\n\n results.append(f\"Step {self.current_step}: {step_result}\")\n\n if self.current_step >= self.max_steps:\n self.current_step = 0\n self.state = AgentState.IDLE\n results.append(f\"Terminated: Reached max steps ({self.max_steps})\")\n await SANDBOX_CLIENT.cleanup()\n return \"\\n\".join(results) if results else \"No steps executed\"\n\n @abstractmethod\n async def step(self) -> str:\n \"\"\"Execute a single step in the agent's workflow.\n\n Must be implemented by subclasses to define specific behavior.\n \"\"\"\n\n def handle_stuck_state(self):\n \"\"\"Handle stuck state by adding a prompt to change strategy\"\"\"\n stuck_prompt = \"\\\n Observed duplicate responses. Consider new strategies and avoid repeating ineffective paths already attempted.\"\n self.next_step_prompt = f\"{stuck_prompt}\\n{self.next_step_prompt}\"\n logger.warning(f\"Agent detected stuck state. Added prompt: {stuck_prompt}\")\n\n def is_stuck(self) -> bool:\n \"\"\"Check if the agent is stuck in a loop by detecting duplicate content\"\"\"\n if len(self.memory.messages) < 2:\n return False\n\n last_message = self.memory.messages[-1]\n if not last_message.content:\n return False\n\n # Count identical content occurrences\n duplicate_count = sum(\n 1\n for msg in reversed(self.memory.messages[:-1])\n if msg.role == \"assistant\" and msg.content == last_message.content\n )\n\n return duplicate_count >= self.duplicate_threshold\n\n @property\n def messages(self) -> List[Message]:\n \"\"\"Retrieve a list of messages from the agent's memory.\"\"\"\n return self.memory.messages\n\n @messages.setter\n def messages(self, value: List[Message]):\n \"\"\"Set the list of messages in the agent's memory.\"\"\"\n self.memory.messages = value\n"], ["/OpenManus/app/tool/bash.py", "import asyncio\nimport os\nfrom typing import Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, CLIResult\n\n\n_BASH_DESCRIPTION = \"\"\"Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says \"Command timed out. Sending SIGINT to the process\", the assistant should retry running the command in the background.\n\"\"\"\n\n\nclass _BashSession:\n \"\"\"A session of a bash shell.\"\"\"\n\n _started: bool\n _process: asyncio.subprocess.Process\n\n command: str = \"/bin/bash\"\n _output_delay: float = 0.2 # seconds\n _timeout: float = 120.0 # seconds\n _sentinel: str = \"<>\"\n\n def __init__(self):\n self._started = False\n self._timed_out = False\n\n async def start(self):\n if self._started:\n return\n\n self._process = await asyncio.create_subprocess_shell(\n self.command,\n preexec_fn=os.setsid,\n shell=True,\n bufsize=0,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n self._started = True\n\n def stop(self):\n \"\"\"Terminate the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return\n self._process.terminate()\n\n async def run(self, command: str):\n \"\"\"Execute a command in the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return CLIResult(\n system=\"tool must be restarted\",\n error=f\"bash has exited with returncode {self._process.returncode}\",\n )\n if self._timed_out:\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n )\n\n # we know these are not None because we created the process with PIPEs\n assert self._process.stdin\n assert self._process.stdout\n assert self._process.stderr\n\n # send command to the process\n self._process.stdin.write(\n command.encode() + f\"; echo '{self._sentinel}'\\n\".encode()\n )\n await self._process.stdin.drain()\n\n # read output from the process, until the sentinel is found\n try:\n async with asyncio.timeout(self._timeout):\n while True:\n await asyncio.sleep(self._output_delay)\n # if we read directly from stdout/stderr, it will wait forever for\n # EOF. use the StreamReader buffer directly instead.\n output = (\n self._process.stdout._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if self._sentinel in output:\n # strip the sentinel and break\n output = output[: output.index(self._sentinel)]\n break\n except asyncio.TimeoutError:\n self._timed_out = True\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n ) from None\n\n if output.endswith(\"\\n\"):\n output = output[:-1]\n\n error = (\n self._process.stderr._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if error.endswith(\"\\n\"):\n error = error[:-1]\n\n # clear the buffers so that the next output can be read correctly\n self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n\n return CLIResult(output=output, error=error)\n\n\nclass Bash(BaseTool):\n \"\"\"A tool for executing bash commands\"\"\"\n\n name: str = \"bash\"\n description: str = _BASH_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"description\": \"The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.\",\n },\n },\n \"required\": [\"command\"],\n }\n\n _session: Optional[_BashSession] = None\n\n async def execute(\n self, command: str | None = None, restart: bool = False, **kwargs\n ) -> CLIResult:\n if restart:\n if self._session:\n self._session.stop()\n self._session = _BashSession()\n await self._session.start()\n\n return CLIResult(system=\"tool has been restarted.\")\n\n if self._session is None:\n self._session = _BashSession()\n await self._session.start()\n\n if command is not None:\n return await self._session.run(command)\n\n raise ToolError(\"no command provided.\")\n\n\nif __name__ == \"__main__\":\n bash = Bash()\n rst = asyncio.run(bash.execute(\"ls -l\"))\n print(rst)\n"], ["/OpenManus/app/sandbox/core/sandbox.py", "import asyncio\nimport io\nimport os\nimport tarfile\nimport tempfile\nimport uuid\nfrom typing import Dict, Optional\n\nimport docker\nfrom docker.errors import NotFound\nfrom docker.models.containers import Container\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.exceptions import SandboxTimeoutError\nfrom app.sandbox.core.terminal import AsyncDockerizedTerminal\n\n\nclass DockerSandbox:\n \"\"\"Docker sandbox environment.\n\n Provides a containerized execution environment with resource limits,\n file operations, and command execution capabilities.\n\n Attributes:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n client: Docker client.\n container: Docker container instance.\n terminal: Container terminal interface.\n \"\"\"\n\n def __init__(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Initializes a sandbox instance.\n\n Args:\n config: Sandbox configuration. Default configuration used if None.\n volume_bindings: Volume mappings in {host_path: container_path} format.\n \"\"\"\n self.config = config or SandboxSettings()\n self.volume_bindings = volume_bindings or {}\n self.client = docker.from_env()\n self.container: Optional[Container] = None\n self.terminal: Optional[AsyncDockerizedTerminal] = None\n\n async def create(self) -> \"DockerSandbox\":\n \"\"\"Creates and starts the sandbox container.\n\n Returns:\n Current sandbox instance.\n\n Raises:\n docker.errors.APIError: If Docker API call fails.\n RuntimeError: If container creation or startup fails.\n \"\"\"\n try:\n # Prepare container config\n host_config = self.client.api.create_host_config(\n mem_limit=self.config.memory_limit,\n cpu_period=100000,\n cpu_quota=int(100000 * self.config.cpu_limit),\n network_mode=\"none\" if not self.config.network_enabled else \"bridge\",\n binds=self._prepare_volume_bindings(),\n )\n\n # Generate unique container name with sandbox_ prefix\n container_name = f\"sandbox_{uuid.uuid4().hex[:8]}\"\n\n # Create container\n container = await asyncio.to_thread(\n self.client.api.create_container,\n image=self.config.image,\n command=\"tail -f /dev/null\",\n hostname=\"sandbox\",\n working_dir=self.config.work_dir,\n host_config=host_config,\n name=container_name,\n tty=True,\n detach=True,\n )\n\n self.container = self.client.containers.get(container[\"Id\"])\n\n # Start container\n await asyncio.to_thread(self.container.start)\n\n # Initialize terminal\n self.terminal = AsyncDockerizedTerminal(\n container[\"Id\"],\n self.config.work_dir,\n env_vars={\"PYTHONUNBUFFERED\": \"1\"}\n # Ensure Python output is not buffered\n )\n await self.terminal.init()\n\n return self\n\n except Exception as e:\n await self.cleanup() # Ensure resources are cleaned up\n raise RuntimeError(f\"Failed to create sandbox: {e}\") from e\n\n def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]:\n \"\"\"Prepares volume binding configuration.\n\n Returns:\n Volume binding configuration dictionary.\n \"\"\"\n bindings = {}\n\n # Create and add working directory mapping\n work_dir = self._ensure_host_dir(self.config.work_dir)\n bindings[work_dir] = {\"bind\": self.config.work_dir, \"mode\": \"rw\"}\n\n # Add custom volume bindings\n for host_path, container_path in self.volume_bindings.items():\n bindings[host_path] = {\"bind\": container_path, \"mode\": \"rw\"}\n\n return bindings\n\n @staticmethod\n def _ensure_host_dir(path: str) -> str:\n \"\"\"Ensures directory exists on the host.\n\n Args:\n path: Directory path.\n\n Returns:\n Actual path on the host.\n \"\"\"\n host_path = os.path.join(\n tempfile.gettempdir(),\n f\"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}\",\n )\n os.makedirs(host_path, exist_ok=True)\n return host_path\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the sandbox.\n\n Args:\n cmd: Command to execute.\n timeout: Timeout in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If sandbox not initialized or command execution fails.\n TimeoutError: If command execution times out.\n \"\"\"\n if not self.terminal:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n return await self.terminal.run_command(\n cmd, timeout=timeout or self.config.timeout\n )\n except TimeoutError:\n raise SandboxTimeoutError(\n f\"Command execution timed out after {timeout or self.config.timeout} seconds\"\n )\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads a file from the container.\n\n Args:\n path: File path.\n\n Returns:\n File contents as string.\n\n Raises:\n FileNotFoundError: If file does not exist.\n RuntimeError: If read operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n # Get file archive\n resolved_path = self._safe_resolve_path(path)\n tar_stream, _ = await asyncio.to_thread(\n self.container.get_archive, resolved_path\n )\n\n # Read file content from tar stream\n content = await self._read_from_tar(tar_stream)\n return content.decode(\"utf-8\")\n\n except NotFound:\n raise FileNotFoundError(f\"File not found: {path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to read file: {e}\")\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to a file in the container.\n\n Args:\n path: Target path.\n content: File content.\n\n Raises:\n RuntimeError: If write operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n resolved_path = self._safe_resolve_path(path)\n parent_dir = os.path.dirname(resolved_path)\n\n # Create parent directory\n if parent_dir:\n await self.run_command(f\"mkdir -p {parent_dir}\")\n\n # Prepare file data\n tar_stream = await self._create_tar_stream(\n os.path.basename(path), content.encode(\"utf-8\")\n )\n\n # Write file\n await asyncio.to_thread(\n self.container.put_archive, parent_dir or \"/\", tar_stream\n )\n\n except Exception as e:\n raise RuntimeError(f\"Failed to write file: {e}\")\n\n def _safe_resolve_path(self, path: str) -> str:\n \"\"\"Safely resolves container path, preventing path traversal.\n\n Args:\n path: Original path.\n\n Returns:\n Resolved absolute path.\n\n Raises:\n ValueError: If path contains potentially unsafe patterns.\n \"\"\"\n # Check for path traversal attempts\n if \"..\" in path.split(\"/\"):\n raise ValueError(\"Path contains potentially unsafe patterns\")\n\n resolved = (\n os.path.join(self.config.work_dir, path)\n if not os.path.isabs(path)\n else path\n )\n return resolved\n\n async def copy_from(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file from the container.\n\n Args:\n src_path: Source file path (container).\n dst_path: Destination path (host).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n # Ensure destination file's parent directory exists\n parent_dir = os.path.dirname(dst_path)\n if parent_dir:\n os.makedirs(parent_dir, exist_ok=True)\n\n # Get file stream\n resolved_src = self._safe_resolve_path(src_path)\n stream, stat = await asyncio.to_thread(\n self.container.get_archive, resolved_src\n )\n\n # Create temporary directory to extract file\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Write stream to temporary file\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with open(tar_path, \"wb\") as f:\n for chunk in stream:\n f.write(chunk)\n\n # Extract file\n with tarfile.open(tar_path) as tar:\n members = tar.getmembers()\n if not members:\n raise FileNotFoundError(f\"Source file is empty: {src_path}\")\n\n # If destination is a directory, we should preserve relative path structure\n if os.path.isdir(dst_path):\n tar.extractall(dst_path)\n else:\n # If destination is a file, we only extract the source file's content\n if len(members) > 1:\n raise RuntimeError(\n f\"Source path is a directory but destination is a file: {src_path}\"\n )\n\n with open(dst_path, \"wb\") as dst:\n src_file = tar.extractfile(members[0])\n if src_file is None:\n raise RuntimeError(\n f\"Failed to extract file: {src_path}\"\n )\n dst.write(src_file.read())\n\n except docker.errors.NotFound:\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n async def copy_to(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file to the container.\n\n Args:\n src_path: Source file path (host).\n dst_path: Destination path (container).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n if not os.path.exists(src_path):\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n\n # Create destination directory in container\n resolved_dst = self._safe_resolve_path(dst_path)\n container_dir = os.path.dirname(resolved_dst)\n if container_dir:\n await self.run_command(f\"mkdir -p {container_dir}\")\n\n # Create tar file to upload\n with tempfile.TemporaryDirectory() as tmp_dir:\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with tarfile.open(tar_path, \"w\") as tar:\n # Handle directory source path\n if os.path.isdir(src_path):\n os.path.basename(src_path.rstrip(\"/\"))\n for root, _, files in os.walk(src_path):\n for file in files:\n file_path = os.path.join(root, file)\n arcname = os.path.join(\n os.path.basename(dst_path),\n os.path.relpath(file_path, src_path),\n )\n tar.add(file_path, arcname=arcname)\n else:\n # Add single file to tar\n tar.add(src_path, arcname=os.path.basename(dst_path))\n\n # Read tar file content\n with open(tar_path, \"rb\") as f:\n data = f.read()\n\n # Upload to container\n await asyncio.to_thread(\n self.container.put_archive,\n os.path.dirname(resolved_dst) or \"/\",\n data,\n )\n\n # Verify file was created successfully\n try:\n await self.run_command(f\"test -e {resolved_dst}\")\n except Exception:\n raise RuntimeError(f\"Failed to verify file creation: {dst_path}\")\n\n except FileNotFoundError:\n raise\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n @staticmethod\n async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO:\n \"\"\"Creates a tar file stream.\n\n Args:\n name: Filename.\n content: File content.\n\n Returns:\n Tar file stream.\n \"\"\"\n tar_stream = io.BytesIO()\n with tarfile.open(fileobj=tar_stream, mode=\"w\") as tar:\n tarinfo = tarfile.TarInfo(name=name)\n tarinfo.size = len(content)\n tar.addfile(tarinfo, io.BytesIO(content))\n tar_stream.seek(0)\n return tar_stream\n\n @staticmethod\n async def _read_from_tar(tar_stream) -> bytes:\n \"\"\"Reads file content from a tar stream.\n\n Args:\n tar_stream: Tar file stream.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If read operation fails.\n \"\"\"\n with tempfile.NamedTemporaryFile() as tmp:\n for chunk in tar_stream:\n tmp.write(chunk)\n tmp.seek(0)\n\n with tarfile.open(fileobj=tmp) as tar:\n member = tar.next()\n if not member:\n raise RuntimeError(\"Empty tar archive\")\n\n file_content = tar.extractfile(member)\n if not file_content:\n raise RuntimeError(\"Failed to extract file content\")\n\n return file_content.read()\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up sandbox resources.\"\"\"\n errors = []\n try:\n if self.terminal:\n try:\n await self.terminal.close()\n except Exception as e:\n errors.append(f\"Terminal cleanup error: {e}\")\n finally:\n self.terminal = None\n\n if self.container:\n try:\n await asyncio.to_thread(self.container.stop, timeout=5)\n except Exception as e:\n errors.append(f\"Container stop error: {e}\")\n\n try:\n await asyncio.to_thread(self.container.remove, force=True)\n except Exception as e:\n errors.append(f\"Container remove error: {e}\")\n finally:\n self.container = None\n\n except Exception as e:\n errors.append(f\"General cleanup error: {e}\")\n\n if errors:\n print(f\"Warning: Errors during cleanup: {', '.join(errors)}\")\n\n async def __aenter__(self) -> \"DockerSandbox\":\n \"\"\"Async context manager entry.\"\"\"\n return await self.create()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n"], ["/OpenManus/app/sandbox/core/manager.py", "import asyncio\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom typing import Dict, Optional, Set\n\nimport docker\nfrom docker.errors import APIError, ImageNotFound\n\nfrom app.config import SandboxSettings\nfrom app.logger import logger\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxManager:\n \"\"\"Docker sandbox manager.\n\n Manages multiple DockerSandbox instances lifecycle including creation,\n monitoring, and cleanup. Provides concurrent access control and automatic\n cleanup mechanisms for sandbox resources.\n\n Attributes:\n max_sandboxes: Maximum allowed number of sandboxes.\n idle_timeout: Sandbox idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n _sandboxes: Active sandbox instance mapping.\n _last_used: Last used time record for sandboxes.\n \"\"\"\n\n def __init__(\n self,\n max_sandboxes: int = 100,\n idle_timeout: int = 3600,\n cleanup_interval: int = 300,\n ):\n \"\"\"Initializes sandbox manager.\n\n Args:\n max_sandboxes: Maximum sandbox count limit.\n idle_timeout: Idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n \"\"\"\n self.max_sandboxes = max_sandboxes\n self.idle_timeout = idle_timeout\n self.cleanup_interval = cleanup_interval\n\n # Docker client\n self._client = docker.from_env()\n\n # Resource mappings\n self._sandboxes: Dict[str, DockerSandbox] = {}\n self._last_used: Dict[str, float] = {}\n\n # Concurrency control\n self._locks: Dict[str, asyncio.Lock] = {}\n self._global_lock = asyncio.Lock()\n self._active_operations: Set[str] = set()\n\n # Cleanup task\n self._cleanup_task: Optional[asyncio.Task] = None\n self._is_shutting_down = False\n\n # Start automatic cleanup\n self.start_cleanup_task()\n\n async def ensure_image(self, image: str) -> bool:\n \"\"\"Ensures Docker image is available.\n\n Args:\n image: Image name.\n\n Returns:\n bool: Whether image is available.\n \"\"\"\n try:\n self._client.images.get(image)\n return True\n except ImageNotFound:\n try:\n logger.info(f\"Pulling image {image}...\")\n await asyncio.get_event_loop().run_in_executor(\n None, self._client.images.pull, image\n )\n return True\n except (APIError, Exception) as e:\n logger.error(f\"Failed to pull image {image}: {e}\")\n return False\n\n @asynccontextmanager\n async def sandbox_operation(self, sandbox_id: str):\n \"\"\"Context manager for sandbox operations.\n\n Provides concurrency control and usage time updates.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Raises:\n KeyError: If sandbox not found.\n \"\"\"\n if sandbox_id not in self._locks:\n self._locks[sandbox_id] = asyncio.Lock()\n\n async with self._locks[sandbox_id]:\n if sandbox_id not in self._sandboxes:\n raise KeyError(f\"Sandbox {sandbox_id} not found\")\n\n self._active_operations.add(sandbox_id)\n try:\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n yield self._sandboxes[sandbox_id]\n finally:\n self._active_operations.remove(sandbox_id)\n\n async def create_sandbox(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> str:\n \"\"\"Creates a new sandbox instance.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n\n Returns:\n str: Sandbox ID.\n\n Raises:\n RuntimeError: If max sandbox count reached or creation fails.\n \"\"\"\n async with self._global_lock:\n if len(self._sandboxes) >= self.max_sandboxes:\n raise RuntimeError(\n f\"Maximum number of sandboxes ({self.max_sandboxes}) reached\"\n )\n\n config = config or SandboxSettings()\n if not await self.ensure_image(config.image):\n raise RuntimeError(f\"Failed to ensure Docker image: {config.image}\")\n\n sandbox_id = str(uuid.uuid4())\n try:\n sandbox = DockerSandbox(config, volume_bindings)\n await sandbox.create()\n\n self._sandboxes[sandbox_id] = sandbox\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n self._locks[sandbox_id] = asyncio.Lock()\n\n logger.info(f\"Created sandbox {sandbox_id}\")\n return sandbox_id\n\n except Exception as e:\n logger.error(f\"Failed to create sandbox: {e}\")\n if sandbox_id in self._sandboxes:\n await self.delete_sandbox(sandbox_id)\n raise RuntimeError(f\"Failed to create sandbox: {e}\")\n\n async def get_sandbox(self, sandbox_id: str) -> DockerSandbox:\n \"\"\"Gets a sandbox instance.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Returns:\n DockerSandbox: Sandbox instance.\n\n Raises:\n KeyError: If sandbox does not exist.\n \"\"\"\n async with self.sandbox_operation(sandbox_id) as sandbox:\n return sandbox\n\n def start_cleanup_task(self) -> None:\n \"\"\"Starts automatic cleanup task.\"\"\"\n\n async def cleanup_loop():\n while not self._is_shutting_down:\n try:\n await self._cleanup_idle_sandboxes()\n except Exception as e:\n logger.error(f\"Error in cleanup loop: {e}\")\n await asyncio.sleep(self.cleanup_interval)\n\n self._cleanup_task = asyncio.create_task(cleanup_loop())\n\n async def _cleanup_idle_sandboxes(self) -> None:\n \"\"\"Cleans up idle sandboxes.\"\"\"\n current_time = asyncio.get_event_loop().time()\n to_cleanup = []\n\n async with self._global_lock:\n for sandbox_id, last_used in self._last_used.items():\n if (\n sandbox_id not in self._active_operations\n and current_time - last_used > self.idle_timeout\n ):\n to_cleanup.append(sandbox_id)\n\n for sandbox_id in to_cleanup:\n try:\n await self.delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Error cleaning up sandbox {sandbox_id}: {e}\")\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up all resources.\"\"\"\n logger.info(\"Starting manager cleanup...\")\n self._is_shutting_down = True\n\n # Cancel cleanup task\n if self._cleanup_task:\n self._cleanup_task.cancel()\n try:\n await asyncio.wait_for(self._cleanup_task, timeout=1.0)\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n\n # Get all sandbox IDs to clean up\n async with self._global_lock:\n sandbox_ids = list(self._sandboxes.keys())\n\n # Concurrently clean up all sandboxes\n cleanup_tasks = []\n for sandbox_id in sandbox_ids:\n task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id))\n cleanup_tasks.append(task)\n\n if cleanup_tasks:\n # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting\n try:\n await asyncio.wait(cleanup_tasks, timeout=30.0)\n except asyncio.TimeoutError:\n logger.error(\"Sandbox cleanup timed out\")\n\n # Clean up remaining references\n self._sandboxes.clear()\n self._last_used.clear()\n self._locks.clear()\n self._active_operations.clear()\n\n logger.info(\"Manager cleanup completed\")\n\n async def _safe_delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Safely deletes a single sandbox.\n\n Args:\n sandbox_id: Sandbox ID to delete.\n \"\"\"\n try:\n if sandbox_id in self._active_operations:\n logger.warning(\n f\"Sandbox {sandbox_id} has active operations, waiting for completion\"\n )\n for _ in range(10): # Wait at most 10 times\n await asyncio.sleep(0.5)\n if sandbox_id not in self._active_operations:\n break\n else:\n logger.warning(\n f\"Timeout waiting for sandbox {sandbox_id} operations to complete\"\n )\n\n # Get reference to sandbox object\n sandbox = self._sandboxes.get(sandbox_id)\n if sandbox:\n await sandbox.cleanup()\n\n # Remove sandbox record from manager\n async with self._global_lock:\n self._sandboxes.pop(sandbox_id, None)\n self._last_used.pop(sandbox_id, None)\n self._locks.pop(sandbox_id, None)\n logger.info(f\"Deleted sandbox {sandbox_id}\")\n except Exception as e:\n logger.error(f\"Error during cleanup of sandbox {sandbox_id}: {e}\")\n\n async def delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Deletes specified sandbox.\n\n Args:\n sandbox_id: Sandbox ID.\n \"\"\"\n if sandbox_id not in self._sandboxes:\n return\n\n try:\n await self._safe_delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Failed to delete sandbox {sandbox_id}: {e}\")\n\n async def __aenter__(self) -> \"SandboxManager\":\n \"\"\"Async context manager entry.\"\"\"\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n\n def get_stats(self) -> Dict:\n \"\"\"Gets manager statistics.\n\n Returns:\n Dict: Statistics information.\n \"\"\"\n return {\n \"total_sandboxes\": len(self._sandboxes),\n \"active_operations\": len(self._active_operations),\n \"max_sandboxes\": self.max_sandboxes,\n \"idle_timeout\": self.idle_timeout,\n \"cleanup_interval\": self.cleanup_interval,\n \"is_shutting_down\": self._is_shutting_down,\n }\n"], ["/OpenManus/app/tool/crawl4ai.py", "\"\"\"\nCrawl4AI Web Crawler Tool for OpenManus\n\nThis tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents,\nproviding fast, precise, and AI-ready data extraction with clean Markdown generation.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Union\nfrom urllib.parse import urlparse\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\n\n\nclass Crawl4aiTool(BaseTool):\n \"\"\"\n Web crawler tool powered by Crawl4AI.\n\n Provides clean markdown extraction optimized for AI processing.\n \"\"\"\n\n name: str = \"crawl4ai\"\n description: str = \"\"\"Web crawler that extracts clean, AI-ready content from web pages.\n\n Features:\n - Extracts clean markdown content optimized for LLMs\n - Handles JavaScript-heavy sites and dynamic content\n - Supports multiple URLs in a single request\n - Fast and reliable with built-in error handling\n\n Perfect for content analysis, research, and feeding web content to AI models.\"\"\"\n\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"urls\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"(required) List of URLs to crawl. Can be a single URL or multiple URLs.\",\n \"minItems\": 1,\n },\n \"timeout\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Timeout in seconds for each URL. Default is 30.\",\n \"default\": 30,\n \"minimum\": 5,\n \"maximum\": 120,\n },\n \"bypass_cache\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to bypass cache and fetch fresh content. Default is false.\",\n \"default\": False,\n },\n \"word_count_threshold\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Minimum word count for content blocks. Default is 10.\",\n \"default\": 10,\n \"minimum\": 1,\n },\n },\n \"required\": [\"urls\"],\n }\n\n async def execute(\n self,\n urls: Union[str, List[str]],\n timeout: int = 30,\n bypass_cache: bool = False,\n word_count_threshold: int = 10,\n ) -> ToolResult:\n \"\"\"\n Execute web crawling for the specified URLs.\n\n Args:\n urls: Single URL string or list of URLs to crawl\n timeout: Timeout in seconds for each URL\n bypass_cache: Whether to bypass cache\n word_count_threshold: Minimum word count for content blocks\n\n Returns:\n ToolResult with crawl results\n \"\"\"\n # Normalize URLs to list\n if isinstance(urls, str):\n url_list = [urls]\n else:\n url_list = urls\n\n # Validate URLs\n valid_urls = []\n for url in url_list:\n if self._is_valid_url(url):\n valid_urls.append(url)\n else:\n logger.warning(f\"Invalid URL skipped: {url}\")\n\n if not valid_urls:\n return ToolResult(error=\"No valid URLs provided\")\n\n try:\n # Import crawl4ai components\n from crawl4ai import (\n AsyncWebCrawler,\n BrowserConfig,\n CacheMode,\n CrawlerRunConfig,\n )\n\n # Configure browser settings\n browser_config = BrowserConfig(\n headless=True,\n verbose=False,\n browser_type=\"chromium\",\n ignore_https_errors=True,\n java_script_enabled=True,\n )\n\n # Configure crawler settings\n run_config = CrawlerRunConfig(\n cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED,\n word_count_threshold=word_count_threshold,\n process_iframes=True,\n remove_overlay_elements=True,\n excluded_tags=[\"script\", \"style\"],\n page_timeout=timeout * 1000, # Convert to milliseconds\n verbose=False,\n wait_until=\"domcontentloaded\",\n )\n\n results = []\n successful_count = 0\n failed_count = 0\n\n # Process each URL\n async with AsyncWebCrawler(config=browser_config) as crawler:\n for url in valid_urls:\n try:\n logger.info(f\"🕷️ Crawling URL: {url}\")\n start_time = asyncio.get_event_loop().time()\n\n result = await crawler.arun(url=url, config=run_config)\n\n end_time = asyncio.get_event_loop().time()\n execution_time = end_time - start_time\n\n if result.success:\n # Count words in markdown\n word_count = 0\n if hasattr(result, \"markdown\") and result.markdown:\n word_count = len(result.markdown.split())\n\n # Count links\n links_count = 0\n if hasattr(result, \"links\") and result.links:\n internal_links = result.links.get(\"internal\", [])\n external_links = result.links.get(\"external\", [])\n links_count = len(internal_links) + len(external_links)\n\n # Count images\n images_count = 0\n if hasattr(result, \"media\") and result.media:\n images = result.media.get(\"images\", [])\n images_count = len(images)\n\n results.append(\n {\n \"url\": url,\n \"success\": True,\n \"status_code\": getattr(result, \"status_code\", 200),\n \"title\": result.metadata.get(\"title\")\n if result.metadata\n else None,\n \"markdown\": result.markdown\n if hasattr(result, \"markdown\")\n else None,\n \"word_count\": word_count,\n \"links_count\": links_count,\n \"images_count\": images_count,\n \"execution_time\": execution_time,\n }\n )\n successful_count += 1\n logger.info(\n f\"✅ Successfully crawled {url} in {execution_time:.2f}s\"\n )\n\n else:\n results.append(\n {\n \"url\": url,\n \"success\": False,\n \"error_message\": getattr(\n result, \"error_message\", \"Unknown error\"\n ),\n \"execution_time\": execution_time,\n }\n )\n failed_count += 1\n logger.warning(f\"❌ Failed to crawl {url}\")\n\n except Exception as e:\n error_msg = f\"Error crawling {url}: {str(e)}\"\n logger.error(error_msg)\n results.append(\n {\"url\": url, \"success\": False, \"error_message\": error_msg}\n )\n failed_count += 1\n\n # Format output\n output_lines = [f\"🕷️ Crawl4AI Results Summary:\"]\n output_lines.append(f\"📊 Total URLs: {len(valid_urls)}\")\n output_lines.append(f\"✅ Successful: {successful_count}\")\n output_lines.append(f\"❌ Failed: {failed_count}\")\n output_lines.append(\"\")\n\n for i, result in enumerate(results, 1):\n output_lines.append(f\"{i}. {result['url']}\")\n\n if result[\"success\"]:\n output_lines.append(\n f\" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})\"\n )\n if result.get(\"title\"):\n output_lines.append(f\" 📄 Title: {result['title']}\")\n\n if result.get(\"markdown\"):\n # Show first 300 characters of markdown content\n content_preview = result[\"markdown\"]\n if len(result[\"markdown\"]) > 300:\n content_preview += \"...\"\n output_lines.append(f\" 📝 Content: {content_preview}\")\n\n output_lines.append(\n f\" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images\"\n )\n\n if result.get(\"execution_time\"):\n output_lines.append(\n f\" ⏱️ Time: {result['execution_time']:.2f}s\"\n )\n else:\n output_lines.append(f\" ❌ Status: Failed\")\n if result.get(\"error_message\"):\n output_lines.append(f\" 🚫 Error: {result['error_message']}\")\n\n output_lines.append(\"\")\n\n return ToolResult(output=\"\\n\".join(output_lines))\n\n except ImportError as e:\n error_msg = \"Crawl4AI is not installed. Please install it with: pip install crawl4ai\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n except Exception as e:\n error_msg = f\"Crawl4AI execution failed: {str(e)}\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n\n def _is_valid_url(self, url: str) -> bool:\n \"\"\"Validate if a URL is properly formatted.\"\"\"\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc]) and result.scheme in [\n \"http\",\n \"https\",\n ]\n except Exception:\n return False\n"], ["/OpenManus/app/tool/str_replace_editor.py", "\"\"\"File and directory manipulation tool with sandbox support.\"\"\"\n\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, DefaultDict, List, Literal, Optional, get_args\n\nfrom app.config import config\nfrom app.exceptions import ToolError\nfrom app.tool import BaseTool\nfrom app.tool.base import CLIResult, ToolResult\nfrom app.tool.file_operators import (\n FileOperator,\n LocalFileOperator,\n PathLike,\n SandboxFileOperator,\n)\n\n\nCommand = Literal[\n \"view\",\n \"create\",\n \"str_replace\",\n \"insert\",\n \"undo_edit\",\n]\n\n# Constants\nSNIPPET_LINES: int = 4\nMAX_RESPONSE_LEN: int = 16000\nTRUNCATED_MESSAGE: str = (\n \"To save on context only part of this file has been shown to you. \"\n \"You should retry this tool after you have searched inside the file with `grep -n` \"\n \"in order to find the line numbers of what you are looking for.\"\n)\n\n# Tool description\n_STR_REPLACE_EDITOR_DESCRIPTION = \"\"\"Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with ``\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n\"\"\"\n\n\ndef maybe_truncate(\n content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN\n) -> str:\n \"\"\"Truncate content and append a notice if content exceeds the specified length.\"\"\"\n if not truncate_after or len(content) <= truncate_after:\n return content\n return content[:truncate_after] + TRUNCATED_MESSAGE\n\n\nclass StrReplaceEditor(BaseTool):\n \"\"\"A tool for viewing, creating, and editing files with sandbox support.\"\"\"\n\n name: str = \"str_replace_editor\"\n description: str = _STR_REPLACE_EDITOR_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\",\n \"enum\": [\"view\", \"create\", \"str_replace\", \"insert\", \"undo_edit\"],\n \"type\": \"string\",\n },\n \"path\": {\n \"description\": \"Absolute path to file or directory.\",\n \"type\": \"string\",\n },\n \"file_text\": {\n \"description\": \"Required parameter of `create` command, with the content of the file to be created.\",\n \"type\": \"string\",\n },\n \"old_str\": {\n \"description\": \"Required parameter of `str_replace` command containing the string in `path` to replace.\",\n \"type\": \"string\",\n },\n \"new_str\": {\n \"description\": \"Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\",\n \"type\": \"string\",\n },\n \"insert_line\": {\n \"description\": \"Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\",\n \"type\": \"integer\",\n },\n \"view_range\": {\n \"description\": \"Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\",\n \"items\": {\"type\": \"integer\"},\n \"type\": \"array\",\n },\n },\n \"required\": [\"command\", \"path\"],\n }\n _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list)\n _local_operator: LocalFileOperator = LocalFileOperator()\n _sandbox_operator: SandboxFileOperator = SandboxFileOperator()\n\n # def _get_operator(self, use_sandbox: bool) -> FileOperator:\n def _get_operator(self) -> FileOperator:\n \"\"\"Get the appropriate file operator based on execution mode.\"\"\"\n return (\n self._sandbox_operator\n if config.sandbox.use_sandbox\n else self._local_operator\n )\n\n async def execute(\n self,\n *,\n command: Command,\n path: str,\n file_text: str | None = None,\n view_range: list[int] | None = None,\n old_str: str | None = None,\n new_str: str | None = None,\n insert_line: int | None = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Execute a file operation command.\"\"\"\n # Get the appropriate file operator\n operator = self._get_operator()\n\n # Validate path and command combination\n await self.validate_path(command, Path(path), operator)\n\n # Execute the appropriate command\n if command == \"view\":\n result = await self.view(path, view_range, operator)\n elif command == \"create\":\n if file_text is None:\n raise ToolError(\"Parameter `file_text` is required for command: create\")\n await operator.write_file(path, file_text)\n self._file_history[path].append(file_text)\n result = ToolResult(output=f\"File created successfully at: {path}\")\n elif command == \"str_replace\":\n if old_str is None:\n raise ToolError(\n \"Parameter `old_str` is required for command: str_replace\"\n )\n result = await self.str_replace(path, old_str, new_str, operator)\n elif command == \"insert\":\n if insert_line is None:\n raise ToolError(\n \"Parameter `insert_line` is required for command: insert\"\n )\n if new_str is None:\n raise ToolError(\"Parameter `new_str` is required for command: insert\")\n result = await self.insert(path, insert_line, new_str, operator)\n elif command == \"undo_edit\":\n result = await self.undo_edit(path, operator)\n else:\n # This should be caught by type checking, but we include it for safety\n raise ToolError(\n f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {\", \".join(get_args(Command))}'\n )\n\n return str(result)\n\n async def validate_path(\n self, command: str, path: Path, operator: FileOperator\n ) -> None:\n \"\"\"Validate path and command combination based on execution environment.\"\"\"\n # Check if path is absolute\n if not path.is_absolute():\n raise ToolError(f\"The path {path} is not an absolute path\")\n\n # Only check if path exists for non-create commands\n if command != \"create\":\n if not await operator.exists(path):\n raise ToolError(\n f\"The path {path} does not exist. Please provide a valid path.\"\n )\n\n # Check if path is a directory\n is_dir = await operator.is_directory(path)\n if is_dir and command != \"view\":\n raise ToolError(\n f\"The path {path} is a directory and only the `view` command can be used on directories\"\n )\n\n # Check if file exists for create command\n elif command == \"create\":\n exists = await operator.exists(path)\n if exists:\n raise ToolError(\n f\"File already exists at: {path}. Cannot overwrite files using command `create`.\"\n )\n\n async def view(\n self,\n path: PathLike,\n view_range: Optional[List[int]] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Display file or directory content.\"\"\"\n # Determine if path is a directory\n is_dir = await operator.is_directory(path)\n\n if is_dir:\n # Directory handling\n if view_range:\n raise ToolError(\n \"The `view_range` parameter is not allowed when `path` points to a directory.\"\n )\n\n return await self._view_directory(path, operator)\n else:\n # File handling\n return await self._view_file(path, operator, view_range)\n\n @staticmethod\n async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult:\n \"\"\"Display directory contents.\"\"\"\n find_cmd = f\"find {path} -maxdepth 2 -not -path '*/\\\\.*'\"\n\n # Execute command using the operator\n returncode, stdout, stderr = await operator.run_command(find_cmd)\n\n if not stderr:\n stdout = (\n f\"Here's the files and directories up to 2 levels deep in {path}, \"\n f\"excluding hidden items:\\n{stdout}\\n\"\n )\n\n return CLIResult(output=stdout, error=stderr)\n\n async def _view_file(\n self,\n path: PathLike,\n operator: FileOperator,\n view_range: Optional[List[int]] = None,\n ) -> CLIResult:\n \"\"\"Display file content, optionally within a specified line range.\"\"\"\n # Read file content\n file_content = await operator.read_file(path)\n init_line = 1\n\n # Apply view range if specified\n if view_range:\n if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range):\n raise ToolError(\n \"Invalid `view_range`. It should be a list of two integers.\"\n )\n\n file_lines = file_content.split(\"\\n\")\n n_lines_file = len(file_lines)\n init_line, final_line = view_range\n\n # Validate view range\n if init_line < 1 or init_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be \"\n f\"within the range of lines of the file: {[1, n_lines_file]}\"\n )\n if final_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"smaller than the number of lines in the file: `{n_lines_file}`\"\n )\n if final_line != -1 and final_line < init_line:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"larger or equal than its first `{init_line}`\"\n )\n\n # Apply range\n if final_line == -1:\n file_content = \"\\n\".join(file_lines[init_line - 1 :])\n else:\n file_content = \"\\n\".join(file_lines[init_line - 1 : final_line])\n\n # Format and return result\n return CLIResult(\n output=self._make_output(file_content, str(path), init_line=init_line)\n )\n\n async def str_replace(\n self,\n path: PathLike,\n old_str: str,\n new_str: Optional[str] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Replace a unique string in a file with a new string.\"\"\"\n # Read file content and expand tabs\n file_content = (await operator.read_file(path)).expandtabs()\n old_str = old_str.expandtabs()\n new_str = new_str.expandtabs() if new_str is not None else \"\"\n\n # Check if old_str is unique in the file\n occurrences = file_content.count(old_str)\n if occurrences == 0:\n raise ToolError(\n f\"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}.\"\n )\n elif occurrences > 1:\n # Find line numbers of occurrences\n file_content_lines = file_content.split(\"\\n\")\n lines = [\n idx + 1\n for idx, line in enumerate(file_content_lines)\n if old_str in line\n ]\n raise ToolError(\n f\"No replacement was performed. Multiple occurrences of old_str `{old_str}` \"\n f\"in lines {lines}. Please ensure it is unique\"\n )\n\n # Replace old_str with new_str\n new_file_content = file_content.replace(old_str, new_str)\n\n # Write the new content to the file\n await operator.write_file(path, new_file_content)\n\n # Save the original content to history\n self._file_history[path].append(file_content)\n\n # Create a snippet of the edited section\n replacement_line = file_content.split(old_str)[0].count(\"\\n\")\n start_line = max(0, replacement_line - SNIPPET_LINES)\n end_line = replacement_line + SNIPPET_LINES + new_str.count(\"\\n\")\n snippet = \"\\n\".join(new_file_content.split(\"\\n\")[start_line : end_line + 1])\n\n # Prepare the success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet, f\"a snippet of {path}\", start_line + 1\n )\n success_msg += \"Review the changes and make sure they are as expected. Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def insert(\n self,\n path: PathLike,\n insert_line: int,\n new_str: str,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Insert text at a specific line in a file.\"\"\"\n # Read and prepare content\n file_text = (await operator.read_file(path)).expandtabs()\n new_str = new_str.expandtabs()\n file_text_lines = file_text.split(\"\\n\")\n n_lines_file = len(file_text_lines)\n\n # Validate insert_line\n if insert_line < 0 or insert_line > n_lines_file:\n raise ToolError(\n f\"Invalid `insert_line` parameter: {insert_line}. It should be within \"\n f\"the range of lines of the file: {[0, n_lines_file]}\"\n )\n\n # Perform insertion\n new_str_lines = new_str.split(\"\\n\")\n new_file_text_lines = (\n file_text_lines[:insert_line]\n + new_str_lines\n + file_text_lines[insert_line:]\n )\n\n # Create a snippet for preview\n snippet_lines = (\n file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]\n + new_str_lines\n + file_text_lines[insert_line : insert_line + SNIPPET_LINES]\n )\n\n # Join lines and write to file\n new_file_text = \"\\n\".join(new_file_text_lines)\n snippet = \"\\n\".join(snippet_lines)\n\n await operator.write_file(path, new_file_text)\n self._file_history[path].append(file_text)\n\n # Prepare success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet,\n \"a snippet of the edited file\",\n max(1, insert_line - SNIPPET_LINES + 1),\n )\n success_msg += \"Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def undo_edit(\n self, path: PathLike, operator: FileOperator = None\n ) -> CLIResult:\n \"\"\"Revert the last edit made to a file.\"\"\"\n if not self._file_history[path]:\n raise ToolError(f\"No edit history found for {path}.\")\n\n old_text = self._file_history[path].pop()\n await operator.write_file(path, old_text)\n\n return CLIResult(\n output=f\"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}\"\n )\n\n def _make_output(\n self,\n file_content: str,\n file_descriptor: str,\n init_line: int = 1,\n expand_tabs: bool = True,\n ) -> str:\n \"\"\"Format file content for display with line numbers.\"\"\"\n file_content = maybe_truncate(file_content)\n if expand_tabs:\n file_content = file_content.expandtabs()\n\n # Add line numbers to each line\n file_content = \"\\n\".join(\n [\n f\"{i + init_line:6}\\t{line}\"\n for i, line in enumerate(file_content.split(\"\\n\"))\n ]\n )\n\n return (\n f\"Here's the result of running `cat -n` on {file_descriptor}:\\n\"\n + file_content\n + \"\\n\"\n )\n"], ["/OpenManus/app/tool/file_operators.py", "\"\"\"File operation interfaces and implementations for local and sandbox environments.\"\"\"\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import Optional, Protocol, Tuple, Union, runtime_checkable\n\nfrom app.config import SandboxSettings\nfrom app.exceptions import ToolError\nfrom app.sandbox.client import SANDBOX_CLIENT\n\n\nPathLike = Union[str, Path]\n\n\n@runtime_checkable\nclass FileOperator(Protocol):\n \"\"\"Interface for file operations in different environments.\"\"\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file.\"\"\"\n ...\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file.\"\"\"\n ...\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n ...\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n ...\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command and return (return_code, stdout, stderr).\"\"\"\n ...\n\n\nclass LocalFileOperator(FileOperator):\n \"\"\"File operations implementation for local filesystem.\"\"\"\n\n encoding: str = \"utf-8\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a local file.\"\"\"\n try:\n return Path(path).read_text(encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to read {path}: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a local file.\"\"\"\n try:\n Path(path).write_text(content, encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path}: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n return Path(path).is_dir()\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n return Path(path).exists()\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command locally.\"\"\"\n process = await asyncio.create_subprocess_shell(\n cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n try:\n stdout, stderr = await asyncio.wait_for(\n process.communicate(), timeout=timeout\n )\n return (\n process.returncode or 0,\n stdout.decode(),\n stderr.decode(),\n )\n except asyncio.TimeoutError as exc:\n try:\n process.kill()\n except ProcessLookupError:\n pass\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds\"\n ) from exc\n\n\nclass SandboxFileOperator(FileOperator):\n \"\"\"File operations implementation for sandbox environment.\"\"\"\n\n def __init__(self):\n self.sandbox_client = SANDBOX_CLIENT\n\n async def _ensure_sandbox_initialized(self):\n \"\"\"Ensure sandbox is initialized.\"\"\"\n if not self.sandbox_client.sandbox:\n await self.sandbox_client.create(config=SandboxSettings())\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n return await self.sandbox_client.read_file(str(path))\n except Exception as e:\n raise ToolError(f\"Failed to read {path} in sandbox: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n await self.sandbox_client.write_file(str(path), content)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path} in sandbox: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -d {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -e {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a command in sandbox environment.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n stdout = await self.sandbox_client.run_command(\n cmd, timeout=int(timeout) if timeout else None\n )\n return (\n 0, # Always return 0 since we don't have explicit return code from sandbox\n stdout,\n \"\", # No stderr capture in the current sandbox implementation\n )\n except TimeoutError as exc:\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds in sandbox\"\n ) from exc\n except Exception as exc:\n return 1, \"\", f\"Error executing command in sandbox: {str(exc)}\"\n"], ["/OpenManus/app/tool/search/bing_search.py", "from typing import List, Optional, Tuple\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom app.logger import logger\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nABSTRACT_MAX_LENGTH = 300\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3\",\n \"Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866\",\n]\n\nHEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": USER_AGENTS[0],\n \"Referer\": \"https://www.bing.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n}\n\nBING_HOST_URL = \"https://www.bing.com\"\nBING_SEARCH_URL = \"https://www.bing.com/search?q=\"\n\n\nclass BingSearchEngine(WebSearchEngine):\n session: Optional[requests.Session] = None\n\n def __init__(self, **data):\n \"\"\"Initialize the BingSearch tool with a requests session.\"\"\"\n super().__init__(**data)\n self.session = requests.Session()\n self.session.headers.update(HEADERS)\n\n def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:\n \"\"\"\n Synchronous Bing search implementation to retrieve search results.\n\n Args:\n query (str): The search query to submit to Bing.\n num_results (int, optional): Maximum number of results to return. Defaults to 10.\n\n Returns:\n List[SearchItem]: A list of search items with title, URL, and description.\n \"\"\"\n if not query:\n return []\n\n list_result = []\n first = 1\n next_url = BING_SEARCH_URL + query\n\n while len(list_result) < num_results:\n data, next_url = self._parse_html(\n next_url, rank_start=len(list_result), first=first\n )\n if data:\n list_result.extend(data)\n if not next_url:\n break\n first += 10\n\n return list_result[:num_results]\n\n def _parse_html(\n self, url: str, rank_start: int = 0, first: int = 1\n ) -> Tuple[List[SearchItem], str]:\n \"\"\"\n Parse Bing search result HTML to extract search results and the next page URL.\n\n Returns:\n tuple: (List of SearchItem objects, next page URL or None)\n \"\"\"\n try:\n res = self.session.get(url=url)\n res.encoding = \"utf-8\"\n root = BeautifulSoup(res.text, \"lxml\")\n\n list_data = []\n ol_results = root.find(\"ol\", id=\"b_results\")\n if not ol_results:\n return [], None\n\n for li in ol_results.find_all(\"li\", class_=\"b_algo\"):\n title = \"\"\n url = \"\"\n abstract = \"\"\n try:\n h2 = li.find(\"h2\")\n if h2:\n title = h2.text.strip()\n url = h2.a[\"href\"].strip()\n\n p = li.find(\"p\")\n if p:\n abstract = p.text.strip()\n\n if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:\n abstract = abstract[:ABSTRACT_MAX_LENGTH]\n\n rank_start += 1\n\n # Create a SearchItem object\n list_data.append(\n SearchItem(\n title=title or f\"Bing Result {rank_start}\",\n url=url,\n description=abstract,\n )\n )\n except Exception:\n continue\n\n next_btn = root.find(\"a\", title=\"Next page\")\n if not next_btn:\n return list_data, None\n\n next_url = BING_HOST_URL + next_btn[\"href\"]\n return list_data, next_url\n except Exception as e:\n logger.warning(f\"Error parsing HTML: {e}\")\n return [], None\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Bing search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n return self._search_sync(query, num_results=num_results)\n"], ["/OpenManus/protocol/a2a/app/agent_executor.py", "import logging\n\nfrom a2a.server.agent_execution import AgentExecutor, RequestContext\nfrom a2a.server.events import Event, EventQueue\nfrom a2a.server.tasks import TaskUpdater\nfrom a2a.types import (\n InvalidParamsError,\n Part,\n Task,\n TextPart,\n UnsupportedOperationError,\n)\nfrom a2a.utils import (\n completed_task,\n new_artifact,\n)\nfrom .agent import A2AManus\nfrom a2a.utils.errors import ServerError\nfrom typing import Callable, Awaitable\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ManusExecutor(AgentExecutor):\n \"\"\"Currency Conversion AgentExecutor Example.\"\"\"\n\n def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):\n self.agent_factory = agent_factory\n\n async def execute(\n self,\n context: RequestContext,\n event_queue: EventQueue,\n ) -> None:\n error = self._validate_request(context)\n if error:\n raise ServerError(error=InvalidParamsError())\n\n query = context.get_user_input()\n try:\n self.agent = await self.agent_factory()\n result = await self.agent.invoke(query, context.context_id)\n print(f\"Final Result ===> {result}\")\n except Exception as e:\n print(\"Error invoking agent: %s\", e)\n raise ServerError(error=ValueError(f\"Error invoking agent: {e}\")) from e\n parts = [\n Part(\n root=TextPart(\n text=(\n result[\"content\"]\n if result[\"content\"]\n else \"failed to generate response\"\n )\n ),\n )\n ]\n event_queue.enqueue_event(\n completed_task(\n context.task_id,\n context.context_id,\n [new_artifact(parts, f\"task_{context.task_id}\")],\n [context.message],\n )\n )\n\n def _validate_request(self, context: RequestContext) -> bool:\n return False\n\n async def cancel(\n self, request: RequestContext, event_queue: EventQueue\n ) -> Task | None:\n raise ServerError(error=UnsupportedOperationError())\n"], ["/OpenManus/app/tool/create_chat_completion.py", "from typing import Any, List, Optional, Type, Union, get_args, get_origin\n\nfrom pydantic import BaseModel, Field\n\nfrom app.tool import BaseTool\n\n\nclass CreateChatCompletion(BaseTool):\n name: str = \"create_chat_completion\"\n description: str = (\n \"Creates a structured completion with specified output formatting.\"\n )\n\n # Type mapping for JSON schema\n type_mapping: dict = {\n str: \"string\",\n int: \"integer\",\n float: \"number\",\n bool: \"boolean\",\n dict: \"object\",\n list: \"array\",\n }\n response_type: Optional[Type] = None\n required: List[str] = Field(default_factory=lambda: [\"response\"])\n\n def __init__(self, response_type: Optional[Type] = str):\n \"\"\"Initialize with a specific response type.\"\"\"\n super().__init__()\n self.response_type = response_type\n self.parameters = self._build_parameters()\n\n def _build_parameters(self) -> dict:\n \"\"\"Build parameters schema based on response type.\"\"\"\n if self.response_type == str:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\",\n \"description\": \"The response text that should be delivered to the user.\",\n },\n },\n \"required\": self.required,\n }\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n schema = self.response_type.model_json_schema()\n return {\n \"type\": \"object\",\n \"properties\": schema[\"properties\"],\n \"required\": schema.get(\"required\", self.required),\n }\n\n return self._create_type_schema(self.response_type)\n\n def _create_type_schema(self, type_hint: Type) -> dict:\n \"\"\"Create a JSON schema for the given type.\"\"\"\n origin = get_origin(type_hint)\n args = get_args(type_hint)\n\n # Handle primitive types\n if origin is None:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Response of type {type_hint.__name__}\",\n }\n },\n \"required\": self.required,\n }\n\n # Handle List type\n if origin is list:\n item_type = args[0] if args else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"array\",\n \"items\": self._get_type_info(item_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Dict type\n if origin is dict:\n value_type = args[1] if len(args) > 1 else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"object\",\n \"additionalProperties\": self._get_type_info(value_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Union type\n if origin is Union:\n return self._create_union_schema(args)\n\n return self._build_parameters()\n\n def _get_type_info(self, type_hint: Type) -> dict:\n \"\"\"Get type information for a single type.\"\"\"\n if isinstance(type_hint, type) and issubclass(type_hint, BaseModel):\n return type_hint.model_json_schema()\n\n return {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Value of type {getattr(type_hint, '__name__', 'any')}\",\n }\n\n def _create_union_schema(self, types: tuple) -> dict:\n \"\"\"Create schema for Union types.\"\"\"\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\"anyOf\": [self._get_type_info(t) for t in types]}\n },\n \"required\": self.required,\n }\n\n async def execute(self, required: list | None = None, **kwargs) -> Any:\n \"\"\"Execute the chat completion with type conversion.\n\n Args:\n required: List of required field names or None\n **kwargs: Response data\n\n Returns:\n Converted response based on response_type\n \"\"\"\n required = required or self.required\n\n # Handle case when required is a list\n if isinstance(required, list) and len(required) > 0:\n if len(required) == 1:\n required_field = required[0]\n result = kwargs.get(required_field, \"\")\n else:\n # Return multiple fields as a dictionary\n return {field: kwargs.get(field, \"\") for field in required}\n else:\n required_field = \"response\"\n result = kwargs.get(required_field, \"\")\n\n # Type conversion logic\n if self.response_type == str:\n return result\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n return self.response_type(**kwargs)\n\n if get_origin(self.response_type) in (list, dict):\n return result # Assuming result is already in correct format\n\n try:\n return self.response_type(result)\n except (ValueError, TypeError):\n return result\n"], ["/OpenManus/app/tool/planning.py", "# tool/planning.py\nfrom typing import Dict, List, Literal, Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, ToolResult\n\n\n_PLANNING_TOOL_DESCRIPTION = \"\"\"\nA planning tool that allows the agent to create and manage plans for solving complex tasks.\nThe tool provides functionality for creating plans, updating plan steps, and tracking progress.\n\"\"\"\n\n\nclass PlanningTool(BaseTool):\n \"\"\"\n A planning tool that allows the agent to create and manage plans for solving complex tasks.\n The tool provides functionality for creating plans, updating plan steps, and tracking progress.\n \"\"\"\n\n name: str = \"planning\"\n description: str = _PLANNING_TOOL_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The command to execute. Available commands: create, update, list, get, set_active, mark_step, delete.\",\n \"enum\": [\n \"create\",\n \"update\",\n \"list\",\n \"get\",\n \"set_active\",\n \"mark_step\",\n \"delete\",\n ],\n \"type\": \"string\",\n },\n \"plan_id\": {\n \"description\": \"Unique identifier for the plan. Required for create, update, set_active, and delete commands. Optional for get and mark_step (uses active plan if not specified).\",\n \"type\": \"string\",\n },\n \"title\": {\n \"description\": \"Title for the plan. Required for create command, optional for update command.\",\n \"type\": \"string\",\n },\n \"steps\": {\n \"description\": \"List of plan steps. Required for create command, optional for update command.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"step_index\": {\n \"description\": \"Index of the step to update (0-based). Required for mark_step command.\",\n \"type\": \"integer\",\n },\n \"step_status\": {\n \"description\": \"Status to set for a step. Used with mark_step command.\",\n \"enum\": [\"not_started\", \"in_progress\", \"completed\", \"blocked\"],\n \"type\": \"string\",\n },\n \"step_notes\": {\n \"description\": \"Additional notes for a step. Optional for mark_step command.\",\n \"type\": \"string\",\n },\n },\n \"required\": [\"command\"],\n \"additionalProperties\": False,\n }\n\n plans: dict = {} # Dictionary to store plans by plan_id\n _current_plan_id: Optional[str] = None # Track the current active plan\n\n async def execute(\n self,\n *,\n command: Literal[\n \"create\", \"update\", \"list\", \"get\", \"set_active\", \"mark_step\", \"delete\"\n ],\n plan_id: Optional[str] = None,\n title: Optional[str] = None,\n steps: Optional[List[str]] = None,\n step_index: Optional[int] = None,\n step_status: Optional[\n Literal[\"not_started\", \"in_progress\", \"completed\", \"blocked\"]\n ] = None,\n step_notes: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Execute the planning tool with the given command and parameters.\n\n Parameters:\n - command: The operation to perform\n - plan_id: Unique identifier for the plan\n - title: Title for the plan (used with create command)\n - steps: List of steps for the plan (used with create command)\n - step_index: Index of the step to update (used with mark_step command)\n - step_status: Status to set for a step (used with mark_step command)\n - step_notes: Additional notes for a step (used with mark_step command)\n \"\"\"\n\n if command == \"create\":\n return self._create_plan(plan_id, title, steps)\n elif command == \"update\":\n return self._update_plan(plan_id, title, steps)\n elif command == \"list\":\n return self._list_plans()\n elif command == \"get\":\n return self._get_plan(plan_id)\n elif command == \"set_active\":\n return self._set_active_plan(plan_id)\n elif command == \"mark_step\":\n return self._mark_step(plan_id, step_index, step_status, step_notes)\n elif command == \"delete\":\n return self._delete_plan(plan_id)\n else:\n raise ToolError(\n f\"Unrecognized command: {command}. Allowed commands are: create, update, list, get, set_active, mark_step, delete\"\n )\n\n def _create_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Create a new plan with the given ID, title, and steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: create\")\n\n if plan_id in self.plans:\n raise ToolError(\n f\"A plan with ID '{plan_id}' already exists. Use 'update' to modify existing plans.\"\n )\n\n if not title:\n raise ToolError(\"Parameter `title` is required for command: create\")\n\n if (\n not steps\n or not isinstance(steps, list)\n or not all(isinstance(step, str) for step in steps)\n ):\n raise ToolError(\n \"Parameter `steps` must be a non-empty list of strings for command: create\"\n )\n\n # Create a new plan with initialized step statuses\n plan = {\n \"plan_id\": plan_id,\n \"title\": title,\n \"steps\": steps,\n \"step_statuses\": [\"not_started\"] * len(steps),\n \"step_notes\": [\"\"] * len(steps),\n }\n\n self.plans[plan_id] = plan\n self._current_plan_id = plan_id # Set as active plan\n\n return ToolResult(\n output=f\"Plan created successfully with ID: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _update_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Update an existing plan with new title or steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: update\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n\n if title:\n plan[\"title\"] = title\n\n if steps:\n if not isinstance(steps, list) or not all(\n isinstance(step, str) for step in steps\n ):\n raise ToolError(\n \"Parameter `steps` must be a list of strings for command: update\"\n )\n\n # Preserve existing step statuses for unchanged steps\n old_steps = plan[\"steps\"]\n old_statuses = plan[\"step_statuses\"]\n old_notes = plan[\"step_notes\"]\n\n # Create new step statuses and notes\n new_statuses = []\n new_notes = []\n\n for i, step in enumerate(steps):\n # If the step exists at the same position in old steps, preserve status and notes\n if i < len(old_steps) and step == old_steps[i]:\n new_statuses.append(old_statuses[i])\n new_notes.append(old_notes[i])\n else:\n new_statuses.append(\"not_started\")\n new_notes.append(\"\")\n\n plan[\"steps\"] = steps\n plan[\"step_statuses\"] = new_statuses\n plan[\"step_notes\"] = new_notes\n\n return ToolResult(\n output=f\"Plan updated successfully: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _list_plans(self) -> ToolResult:\n \"\"\"List all available plans.\"\"\"\n if not self.plans:\n return ToolResult(\n output=\"No plans available. Create a plan with the 'create' command.\"\n )\n\n output = \"Available plans:\\n\"\n for plan_id, plan in self.plans.items():\n current_marker = \" (active)\" if plan_id == self._current_plan_id else \"\"\n completed = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"completed\"\n )\n total = len(plan[\"steps\"])\n progress = f\"{completed}/{total} steps completed\"\n output += f\"• {plan_id}{current_marker}: {plan['title']} - {progress}\\n\"\n\n return ToolResult(output=output)\n\n def _get_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Get details of a specific plan.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n return ToolResult(output=self._format_plan(plan))\n\n def _set_active_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Set a plan as the active plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: set_active\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n self._current_plan_id = plan_id\n return ToolResult(\n output=f\"Plan '{plan_id}' is now the active plan.\\n\\n{self._format_plan(self.plans[plan_id])}\"\n )\n\n def _mark_step(\n self,\n plan_id: Optional[str],\n step_index: Optional[int],\n step_status: Optional[str],\n step_notes: Optional[str],\n ) -> ToolResult:\n \"\"\"Mark a step with a specific status and optional notes.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n if step_index is None:\n raise ToolError(\"Parameter `step_index` is required for command: mark_step\")\n\n plan = self.plans[plan_id]\n\n if step_index < 0 or step_index >= len(plan[\"steps\"]):\n raise ToolError(\n f\"Invalid step_index: {step_index}. Valid indices range from 0 to {len(plan['steps'])-1}.\"\n )\n\n if step_status and step_status not in [\n \"not_started\",\n \"in_progress\",\n \"completed\",\n \"blocked\",\n ]:\n raise ToolError(\n f\"Invalid step_status: {step_status}. Valid statuses are: not_started, in_progress, completed, blocked\"\n )\n\n if step_status:\n plan[\"step_statuses\"][step_index] = step_status\n\n if step_notes:\n plan[\"step_notes\"][step_index] = step_notes\n\n return ToolResult(\n output=f\"Step {step_index} updated in plan '{plan_id}'.\\n\\n{self._format_plan(plan)}\"\n )\n\n def _delete_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Delete a plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: delete\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n del self.plans[plan_id]\n\n # If the deleted plan was the active plan, clear the active plan\n if self._current_plan_id == plan_id:\n self._current_plan_id = None\n\n return ToolResult(output=f\"Plan '{plan_id}' has been deleted.\")\n\n def _format_plan(self, plan: Dict) -> str:\n \"\"\"Format a plan for display.\"\"\"\n output = f\"Plan: {plan['title']} (ID: {plan['plan_id']})\\n\"\n output += \"=\" * len(output) + \"\\n\\n\"\n\n # Calculate progress statistics\n total_steps = len(plan[\"steps\"])\n completed = sum(1 for status in plan[\"step_statuses\"] if status == \"completed\")\n in_progress = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"in_progress\"\n )\n blocked = sum(1 for status in plan[\"step_statuses\"] if status == \"blocked\")\n not_started = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"not_started\"\n )\n\n output += f\"Progress: {completed}/{total_steps} steps completed \"\n if total_steps > 0:\n percentage = (completed / total_steps) * 100\n output += f\"({percentage:.1f}%)\\n\"\n else:\n output += \"(0%)\\n\"\n\n output += f\"Status: {completed} completed, {in_progress} in progress, {blocked} blocked, {not_started} not started\\n\\n\"\n output += \"Steps:\\n\"\n\n # Add each step with its status and notes\n for i, (step, status, notes) in enumerate(\n zip(plan[\"steps\"], plan[\"step_statuses\"], plan[\"step_notes\"])\n ):\n status_symbol = {\n \"not_started\": \"[ ]\",\n \"in_progress\": \"[→]\",\n \"completed\": \"[✓]\",\n \"blocked\": \"[!]\",\n }.get(status, \"[ ]\")\n\n output += f\"{i}. {status_symbol} {step}\\n\"\n if notes:\n output += f\" Notes: {notes}\\n\"\n\n return output\n"], ["/OpenManus/app/bedrock.py", "import json\nimport sys\nimport time\nimport uuid\nfrom datetime import datetime\nfrom typing import Dict, List, Literal, Optional\n\nimport boto3\n\n\n# Global variables to track the current tool use ID across function calls\n# Tmp solution\nCURRENT_TOOLUSE_ID = None\n\n\n# Class to handle OpenAI-style response formatting\nclass OpenAIResponse:\n def __init__(self, data):\n # Recursively convert nested dicts and lists to OpenAIResponse objects\n for key, value in data.items():\n if isinstance(value, dict):\n value = OpenAIResponse(value)\n elif isinstance(value, list):\n value = [\n OpenAIResponse(item) if isinstance(item, dict) else item\n for item in value\n ]\n setattr(self, key, value)\n\n def model_dump(self, *args, **kwargs):\n # Convert object to dict and add timestamp\n data = self.__dict__\n data[\"created_at\"] = datetime.now().isoformat()\n return data\n\n\n# Main client class for interacting with Amazon Bedrock\nclass BedrockClient:\n def __init__(self):\n # Initialize Bedrock client, you need to configure AWS env first\n try:\n self.client = boto3.client(\"bedrock-runtime\")\n self.chat = Chat(self.client)\n except Exception as e:\n print(f\"Error initializing Bedrock client: {e}\")\n sys.exit(1)\n\n\n# Chat interface class\nclass Chat:\n def __init__(self, client):\n self.completions = ChatCompletions(client)\n\n\n# Core class handling chat completions functionality\nclass ChatCompletions:\n def __init__(self, client):\n self.client = client\n\n def _convert_openai_tools_to_bedrock_format(self, tools):\n # Convert OpenAI function calling format to Bedrock tool format\n bedrock_tools = []\n for tool in tools:\n if tool.get(\"type\") == \"function\":\n function = tool.get(\"function\", {})\n bedrock_tool = {\n \"toolSpec\": {\n \"name\": function.get(\"name\", \"\"),\n \"description\": function.get(\"description\", \"\"),\n \"inputSchema\": {\n \"json\": {\n \"type\": \"object\",\n \"properties\": function.get(\"parameters\", {}).get(\n \"properties\", {}\n ),\n \"required\": function.get(\"parameters\", {}).get(\n \"required\", []\n ),\n }\n },\n }\n }\n bedrock_tools.append(bedrock_tool)\n return bedrock_tools\n\n def _convert_openai_messages_to_bedrock_format(self, messages):\n # Convert OpenAI message format to Bedrock message format\n bedrock_messages = []\n system_prompt = []\n for message in messages:\n if message.get(\"role\") == \"system\":\n system_prompt = [{\"text\": message.get(\"content\")}]\n elif message.get(\"role\") == \"user\":\n bedrock_message = {\n \"role\": message.get(\"role\", \"user\"),\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"assistant\":\n bedrock_message = {\n \"role\": \"assistant\",\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n openai_tool_calls = message.get(\"tool_calls\", [])\n if openai_tool_calls:\n bedrock_tool_use = {\n \"toolUseId\": openai_tool_calls[0][\"id\"],\n \"name\": openai_tool_calls[0][\"function\"][\"name\"],\n \"input\": json.loads(\n openai_tool_calls[0][\"function\"][\"arguments\"]\n ),\n }\n bedrock_message[\"content\"].append({\"toolUse\": bedrock_tool_use})\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = openai_tool_calls[0][\"id\"]\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"tool\":\n bedrock_message = {\n \"role\": \"user\",\n \"content\": [\n {\n \"toolResult\": {\n \"toolUseId\": CURRENT_TOOLUSE_ID,\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n }\n ],\n }\n bedrock_messages.append(bedrock_message)\n else:\n raise ValueError(f\"Invalid role: {message.get('role')}\")\n return system_prompt, bedrock_messages\n\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n # Convert Bedrock response format to OpenAI format\n content = \"\"\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n content_array = bedrock_response[\"output\"][\"message\"][\"content\"]\n content = \"\".join(item.get(\"text\", \"\") for item in content_array)\n if content == \"\":\n content = \".\"\n\n # Handle tool calls in response\n openai_tool_calls = []\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n for content_item in bedrock_response[\"output\"][\"message\"][\"content\"]:\n if content_item.get(\"toolUse\"):\n bedrock_tool_use = content_item[\"toolUse\"]\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n openai_tool_call = {\n \"id\": CURRENT_TOOLUSE_ID,\n \"type\": \"function\",\n \"function\": {\n \"name\": bedrock_tool_use[\"name\"],\n \"arguments\": json.dumps(bedrock_tool_use[\"input\"]),\n },\n }\n openai_tool_calls.append(openai_tool_call)\n\n # Construct final OpenAI format response\n openai_format = {\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"created\": int(time.time()),\n \"object\": \"chat.completion\",\n \"system_fingerprint\": None,\n \"choices\": [\n {\n \"finish_reason\": bedrock_response.get(\"stopReason\", \"end_turn\"),\n \"index\": 0,\n \"message\": {\n \"content\": content,\n \"role\": bedrock_response.get(\"output\", {})\n .get(\"message\", {})\n .get(\"role\", \"assistant\"),\n \"tool_calls\": openai_tool_calls\n if openai_tool_calls != []\n else None,\n \"function_call\": None,\n },\n }\n ],\n \"usage\": {\n \"completion_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"outputTokens\", 0\n ),\n \"prompt_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"inputTokens\", 0\n ),\n \"total_tokens\": bedrock_response.get(\"usage\", {}).get(\"totalTokens\", 0),\n },\n }\n return OpenAIResponse(openai_format)\n\n async def _invoke_bedrock(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Non-streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n openai_response = self._convert_bedrock_response_to_openai_format(response)\n return openai_response\n\n async def _invoke_bedrock_stream(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Streaming invocation of Bedrock model\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse_stream(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n\n # Initialize response structure\n bedrock_response = {\n \"output\": {\"message\": {\"role\": \"\", \"content\": []}},\n \"stopReason\": \"\",\n \"usage\": {},\n \"metrics\": {},\n }\n bedrock_response_text = \"\"\n bedrock_response_tool_input = \"\"\n\n # Process streaming response\n stream = response.get(\"stream\")\n if stream:\n for event in stream:\n if event.get(\"messageStart\", {}).get(\"role\"):\n bedrock_response[\"output\"][\"message\"][\"role\"] = event[\n \"messageStart\"\n ][\"role\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"text\"):\n bedrock_response_text += event[\"contentBlockDelta\"][\"delta\"][\"text\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"text\"], end=\"\", flush=True\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 0:\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"text\": bedrock_response_text}\n )\n if event.get(\"contentBlockStart\", {}).get(\"start\", {}).get(\"toolUse\"):\n bedrock_tool_use = event[\"contentBlockStart\"][\"start\"][\"toolUse\"]\n tool_use = {\n \"toolUseId\": bedrock_tool_use[\"toolUseId\"],\n \"name\": bedrock_tool_use[\"name\"],\n }\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"toolUse\": tool_use}\n )\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"toolUse\"):\n bedrock_response_tool_input += event[\"contentBlockDelta\"][\"delta\"][\n \"toolUse\"\n ][\"input\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"toolUse\"][\"input\"],\n end=\"\",\n flush=True,\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 1:\n bedrock_response[\"output\"][\"message\"][\"content\"][1][\"toolUse\"][\n \"input\"\n ] = json.loads(bedrock_response_tool_input)\n print()\n openai_response = self._convert_bedrock_response_to_openai_format(\n bedrock_response\n )\n return openai_response\n\n def create(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n stream: Optional[bool] = True,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n # Main entry point for chat completion\n bedrock_tools = []\n if tools is not None:\n bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)\n if stream:\n return self._invoke_bedrock_stream(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n else:\n return self._invoke_bedrock(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n"], ["/OpenManus/app/tool/tool_collection.py", "\"\"\"Collection classes for managing multiple tools.\"\"\"\nfrom typing import Any, Dict, List\n\nfrom app.exceptions import ToolError\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolFailure, ToolResult\n\n\nclass ToolCollection:\n \"\"\"A collection of defined tools.\"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, *tools: BaseTool):\n self.tools = tools\n self.tool_map = {tool.name: tool for tool in tools}\n\n def __iter__(self):\n return iter(self.tools)\n\n def to_params(self) -> List[Dict[str, Any]]:\n return [tool.to_param() for tool in self.tools]\n\n async def execute(\n self, *, name: str, tool_input: Dict[str, Any] = None\n ) -> ToolResult:\n tool = self.tool_map.get(name)\n if not tool:\n return ToolFailure(error=f\"Tool {name} is invalid\")\n try:\n result = await tool(**tool_input)\n return result\n except ToolError as e:\n return ToolFailure(error=e.message)\n\n async def execute_all(self) -> List[ToolResult]:\n \"\"\"Execute all tools in the collection sequentially.\"\"\"\n results = []\n for tool in self.tools:\n try:\n result = await tool()\n results.append(result)\n except ToolError as e:\n results.append(ToolFailure(error=e.message))\n return results\n\n def get_tool(self, name: str) -> BaseTool:\n return self.tool_map.get(name)\n\n def add_tool(self, tool: BaseTool):\n \"\"\"Add a single tool to the collection.\n\n If a tool with the same name already exists, it will be skipped and a warning will be logged.\n \"\"\"\n if tool.name in self.tool_map:\n logger.warning(f\"Tool {tool.name} already exists in collection, skipping\")\n return self\n\n self.tools += (tool,)\n self.tool_map[tool.name] = tool\n return self\n\n def add_tools(self, *tools: BaseTool):\n \"\"\"Add multiple tools to the collection.\n\n If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged.\n \"\"\"\n for tool in tools:\n self.add_tool(tool)\n return self\n"], ["/OpenManus/app/sandbox/client.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, Optional, Protocol\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxFileOperations(Protocol):\n \"\"\"Protocol for sandbox file operations.\"\"\"\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n \"\"\"\n ...\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n \"\"\"\n ...\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file content from container.\n\n Args:\n path: File path in container.\n\n Returns:\n str: File content.\n \"\"\"\n ...\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to file in container.\n\n Args:\n path: File path in container.\n content: Content to write.\n \"\"\"\n ...\n\n\nclass BaseSandboxClient(ABC):\n \"\"\"Base sandbox client interface.\"\"\"\n\n @abstractmethod\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates sandbox.\"\"\"\n\n @abstractmethod\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes command.\"\"\"\n\n @abstractmethod\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container.\"\"\"\n\n @abstractmethod\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file to container.\"\"\"\n\n @abstractmethod\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file.\"\"\"\n\n @abstractmethod\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file.\"\"\"\n\n @abstractmethod\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n\n\nclass LocalSandboxClient(BaseSandboxClient):\n \"\"\"Local sandbox client implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes local sandbox client.\"\"\"\n self.sandbox: Optional[DockerSandbox] = None\n\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates a sandbox.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mappings.\n\n Raises:\n RuntimeError: If sandbox creation fails.\n \"\"\"\n self.sandbox = DockerSandbox(config, volume_bindings)\n await self.sandbox.create()\n\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs command in sandbox.\n\n Args:\n command: Command to execute.\n timeout: Execution timeout in seconds.\n\n Returns:\n Command output.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.run_command(command, timeout)\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_from(container_path, local_path)\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_to(local_path, container_path)\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file from container.\n\n Args:\n path: File path in container.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.read_file(path)\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file to container.\n\n Args:\n path: File path in container.\n content: File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.write_file(path, content)\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n if self.sandbox:\n await self.sandbox.cleanup()\n self.sandbox = None\n\n\ndef create_sandbox_client() -> LocalSandboxClient:\n \"\"\"Creates a sandbox client.\n\n Returns:\n LocalSandboxClient: Sandbox client instance.\n \"\"\"\n return LocalSandboxClient()\n\n\nSANDBOX_CLIENT = create_sandbox_client()\n"], ["/OpenManus/app/tool/python_execute.py", "import multiprocessing\nimport sys\nfrom io import StringIO\nfrom typing import Dict\n\nfrom app.tool.base import BaseTool\n\n\nclass PythonExecute(BaseTool):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n \"description\": \"The Python code to execute.\",\n },\n },\n \"required\": [\"code\"],\n }\n\n def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:\n original_stdout = sys.stdout\n try:\n output_buffer = StringIO()\n sys.stdout = output_buffer\n exec(code, safe_globals, safe_globals)\n result_dict[\"observation\"] = output_buffer.getvalue()\n result_dict[\"success\"] = True\n except Exception as e:\n result_dict[\"observation\"] = str(e)\n result_dict[\"success\"] = False\n finally:\n sys.stdout = original_stdout\n\n async def execute(\n self,\n code: str,\n timeout: int = 5,\n ) -> Dict:\n \"\"\"\n Executes the provided Python code with a timeout.\n\n Args:\n code (str): The Python code to execute.\n timeout (int): Execution timeout in seconds.\n\n Returns:\n Dict: Contains 'output' with execution output or error message and 'success' status.\n \"\"\"\n\n with multiprocessing.Manager() as manager:\n result = manager.dict({\"observation\": \"\", \"success\": False})\n if isinstance(__builtins__, dict):\n safe_globals = {\"__builtins__\": __builtins__}\n else:\n safe_globals = {\"__builtins__\": __builtins__.__dict__.copy()}\n proc = multiprocessing.Process(\n target=self._run_code, args=(code, result, safe_globals)\n )\n proc.start()\n proc.join(timeout)\n\n # timeout process\n if proc.is_alive():\n proc.terminate()\n proc.join(1)\n return {\n \"observation\": f\"Execution timeout after {timeout} seconds\",\n \"success\": False,\n }\n return dict(result)\n"], ["/OpenManus/protocol/a2a/app/agent.py", "import httpx\nfrom typing import Any, Dict, AsyncIterable, Literal, List, ClassVar\nfrom pydantic import BaseModel\nfrom app.agent.manus import Manus\n\n\nclass ResponseFormat(BaseModel):\n \"\"\"Respond to the user in this format.\"\"\"\n\n status: Literal[\"input_required\", \"completed\", \"error\"] = \"input_required\"\n message: str\n\n\nclass A2AManus(Manus):\n\n async def invoke(self, query, sessionId) -> str:\n config = {\"configurable\": {\"thread_id\": sessionId}}\n response = await self.run(query)\n return self.get_agent_response(config, response)\n\n async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:\n \"\"\"Streaming is not supported by Manus.\"\"\"\n raise NotImplementedError(\"Streaming is not supported by Manus yet.\")\n\n def get_agent_response(self, config, agent_response):\n return {\n \"is_task_complete\": True,\n \"require_user_input\": False,\n \"content\": agent_response,\n }\n\n SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = [\"text\", \"text/plain\"]\n"], ["/OpenManus/app/agent/react.py", "from abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.llm import LLM\nfrom app.schema import AgentState, Memory\n\n\nclass ReActAgent(BaseAgent, ABC):\n name: str\n description: Optional[str] = None\n\n system_prompt: Optional[str] = None\n next_step_prompt: Optional[str] = None\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n memory: Memory = Field(default_factory=Memory)\n state: AgentState = AgentState.IDLE\n\n max_steps: int = 10\n current_step: int = 0\n\n @abstractmethod\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action\"\"\"\n\n @abstractmethod\n async def act(self) -> str:\n \"\"\"Execute decided actions\"\"\"\n\n async def step(self) -> str:\n \"\"\"Execute a single step: think and act.\"\"\"\n should_act = await self.think()\n if not should_act:\n return \"Thinking complete - no action needed\"\n return await self.act()\n"], ["/OpenManus/app/tool/chart_visualization/python_execute.py", "from app.config import config\nfrom app.tool.python_execute import PythonExecute\n\n\nclass NormalPythonExecute(PythonExecute):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"\"\"Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, data process / data report / others\",\n \"type\": \"string\",\n \"default\": \"process\",\n \"enum\": [\"process\", \"report\", \"others\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code to execute.\n# Note\n1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights.\n2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also\n3. Save any report / processed files / each analysis result in worksapce directory: {directory}\n4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization.\n5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also\"\"\".format(\n directory=config.workspace_root\n ),\n },\n },\n \"required\": [\"code\"],\n }\n\n async def execute(self, code: str, code_type: str | None = None, timeout=5):\n return await super().execute(code, timeout)\n"], ["/OpenManus/app/tool/base.py", "from abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass BaseTool(ABC, BaseModel):\n name: str\n description: str\n parameters: Optional[dict] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n async def __call__(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n return await self.execute(**kwargs)\n\n @abstractmethod\n async def execute(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n\n def to_param(self) -> Dict:\n \"\"\"Convert tool to function call format.\"\"\"\n return {\n \"type\": \"function\",\n \"function\": {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": self.parameters,\n },\n }\n\n\nclass ToolResult(BaseModel):\n \"\"\"Represents the result of a tool execution.\"\"\"\n\n output: Any = Field(default=None)\n error: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n system: Optional[str] = Field(default=None)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __bool__(self):\n return any(getattr(self, field) for field in self.__fields__)\n\n def __add__(self, other: \"ToolResult\"):\n def combine_fields(\n field: Optional[str], other_field: Optional[str], concatenate: bool = True\n ):\n if field and other_field:\n if concatenate:\n return field + other_field\n raise ValueError(\"Cannot combine tool results\")\n return field or other_field\n\n return ToolResult(\n output=combine_fields(self.output, other.output),\n error=combine_fields(self.error, other.error),\n base64_image=combine_fields(self.base64_image, other.base64_image, False),\n system=combine_fields(self.system, other.system),\n )\n\n def __str__(self):\n return f\"Error: {self.error}\" if self.error else self.output\n\n def replace(self, **kwargs):\n \"\"\"Returns a new ToolResult with the given fields replaced.\"\"\"\n # return self.copy(update=kwargs)\n return type(self)(**{**self.dict(), **kwargs})\n\n\nclass CLIResult(ToolResult):\n \"\"\"A ToolResult that can be rendered as a CLI output.\"\"\"\n\n\nclass ToolFailure(ToolResult):\n \"\"\"A ToolResult that represents a failure.\"\"\"\n"], ["/OpenManus/app/schema.py", "from enum import Enum\nfrom typing import Any, List, Literal, Optional, Union\n\nfrom pydantic import BaseModel, Field\n\n\nclass Role(str, Enum):\n \"\"\"Message role options\"\"\"\n\n SYSTEM = \"system\"\n USER = \"user\"\n ASSISTANT = \"assistant\"\n TOOL = \"tool\"\n\n\nROLE_VALUES = tuple(role.value for role in Role)\nROLE_TYPE = Literal[ROLE_VALUES] # type: ignore\n\n\nclass ToolChoice(str, Enum):\n \"\"\"Tool choice options\"\"\"\n\n NONE = \"none\"\n AUTO = \"auto\"\n REQUIRED = \"required\"\n\n\nTOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)\nTOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore\n\n\nclass AgentState(str, Enum):\n \"\"\"Agent execution states\"\"\"\n\n IDLE = \"IDLE\"\n RUNNING = \"RUNNING\"\n FINISHED = \"FINISHED\"\n ERROR = \"ERROR\"\n\n\nclass Function(BaseModel):\n name: str\n arguments: str\n\n\nclass ToolCall(BaseModel):\n \"\"\"Represents a tool/function call in a message\"\"\"\n\n id: str\n type: str = \"function\"\n function: Function\n\n\nclass Message(BaseModel):\n \"\"\"Represents a chat message in the conversation\"\"\"\n\n role: ROLE_TYPE = Field(...) # type: ignore\n content: Optional[str] = Field(default=None)\n tool_calls: Optional[List[ToolCall]] = Field(default=None)\n name: Optional[str] = Field(default=None)\n tool_call_id: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n\n def __add__(self, other) -> List[\"Message\"]:\n \"\"\"支持 Message + list 或 Message + Message 的操作\"\"\"\n if isinstance(other, list):\n return [self] + other\n elif isinstance(other, Message):\n return [self, other]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'\"\n )\n\n def __radd__(self, other) -> List[\"Message\"]:\n \"\"\"支持 list + Message 的操作\"\"\"\n if isinstance(other, list):\n return other + [self]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'\"\n )\n\n def to_dict(self) -> dict:\n \"\"\"Convert message to dictionary format\"\"\"\n message = {\"role\": self.role}\n if self.content is not None:\n message[\"content\"] = self.content\n if self.tool_calls is not None:\n message[\"tool_calls\"] = [tool_call.dict() for tool_call in self.tool_calls]\n if self.name is not None:\n message[\"name\"] = self.name\n if self.tool_call_id is not None:\n message[\"tool_call_id\"] = self.tool_call_id\n if self.base64_image is not None:\n message[\"base64_image\"] = self.base64_image\n return message\n\n @classmethod\n def user_message(\n cls, content: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a user message\"\"\"\n return cls(role=Role.USER, content=content, base64_image=base64_image)\n\n @classmethod\n def system_message(cls, content: str) -> \"Message\":\n \"\"\"Create a system message\"\"\"\n return cls(role=Role.SYSTEM, content=content)\n\n @classmethod\n def assistant_message(\n cls, content: Optional[str] = None, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create an assistant message\"\"\"\n return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image)\n\n @classmethod\n def tool_message(\n cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a tool message\"\"\"\n return cls(\n role=Role.TOOL,\n content=content,\n name=name,\n tool_call_id=tool_call_id,\n base64_image=base64_image,\n )\n\n @classmethod\n def from_tool_calls(\n cls,\n tool_calls: List[Any],\n content: Union[str, List[str]] = \"\",\n base64_image: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Create ToolCallsMessage from raw tool calls.\n\n Args:\n tool_calls: Raw tool calls from LLM\n content: Optional message content\n base64_image: Optional base64 encoded image\n \"\"\"\n formatted_calls = [\n {\"id\": call.id, \"function\": call.function.model_dump(), \"type\": \"function\"}\n for call in tool_calls\n ]\n return cls(\n role=Role.ASSISTANT,\n content=content,\n tool_calls=formatted_calls,\n base64_image=base64_image,\n **kwargs,\n )\n\n\nclass Memory(BaseModel):\n messages: List[Message] = Field(default_factory=list)\n max_messages: int = Field(default=100)\n\n def add_message(self, message: Message) -> None:\n \"\"\"Add a message to memory\"\"\"\n self.messages.append(message)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def add_messages(self, messages: List[Message]) -> None:\n \"\"\"Add multiple messages to memory\"\"\"\n self.messages.extend(messages)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def clear(self) -> None:\n \"\"\"Clear all messages\"\"\"\n self.messages.clear()\n\n def get_recent_messages(self, n: int) -> List[Message]:\n \"\"\"Get n most recent messages\"\"\"\n return self.messages[-n:]\n\n def to_dict_list(self) -> List[dict]:\n \"\"\"Convert messages to list of dicts\"\"\"\n return [msg.to_dict() for msg in self.messages]\n"], ["/OpenManus/app/flow/base.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import BaseModel\n\nfrom app.agent.base import BaseAgent\n\n\nclass BaseFlow(BaseModel, ABC):\n \"\"\"Base class for execution flows supporting multiple agents\"\"\"\n\n agents: Dict[str, BaseAgent]\n tools: Optional[List] = None\n primary_agent_key: Optional[str] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Handle different ways of providing agents\n if isinstance(agents, BaseAgent):\n agents_dict = {\"default\": agents}\n elif isinstance(agents, list):\n agents_dict = {f\"agent_{i}\": agent for i, agent in enumerate(agents)}\n else:\n agents_dict = agents\n\n # If primary agent not specified, use first agent\n primary_key = data.get(\"primary_agent_key\")\n if not primary_key and agents_dict:\n primary_key = next(iter(agents_dict))\n data[\"primary_agent_key\"] = primary_key\n\n # Set the agents dictionary\n data[\"agents\"] = agents_dict\n\n # Initialize using BaseModel's init\n super().__init__(**data)\n\n @property\n def primary_agent(self) -> Optional[BaseAgent]:\n \"\"\"Get the primary agent for the flow\"\"\"\n return self.agents.get(self.primary_agent_key)\n\n def get_agent(self, key: str) -> Optional[BaseAgent]:\n \"\"\"Get a specific agent by key\"\"\"\n return self.agents.get(key)\n\n def add_agent(self, key: str, agent: BaseAgent) -> None:\n \"\"\"Add a new agent to the flow\"\"\"\n self.agents[key] = agent\n\n @abstractmethod\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the flow with given input\"\"\"\n"], ["/OpenManus/run_mcp_server.py", "# coding: utf-8\n# A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues.\nfrom app.mcp.server import MCPServer, parse_args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/tool/search/base.py", "from typing import List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass SearchItem(BaseModel):\n \"\"\"Represents a single search result item\"\"\"\n\n title: str = Field(description=\"The title of the search result\")\n url: str = Field(description=\"The URL of the search result\")\n description: Optional[str] = Field(\n default=None, description=\"A description or snippet of the search result\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result item.\"\"\"\n return f\"{self.title} - {self.url}\"\n\n\nclass WebSearchEngine(BaseModel):\n \"\"\"Base class for web search engines.\"\"\"\n\n model_config = {\"arbitrary_types_allowed\": True}\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Perform a web search and return a list of search items.\n\n Args:\n query (str): The search query to submit to the search engine.\n num_results (int, optional): The number of search results to return. Default is 10.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n List[SearchItem]: A list of SearchItem objects matching the search query.\n \"\"\"\n raise NotImplementedError\n"], ["/OpenManus/app/prompt/browser.py", "SYSTEM_PROMPT = \"\"\"\\\nYou are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.\n\n# Input Format\nTask\nPrevious steps\nCurrent URL\nOpen Tabs\nInteractive Elements\n[index]text\n- index: Numeric identifier for interaction\n- type: HTML element type (button, input, etc.)\n- text: Element description\nExample:\n[33]\n\n- Only elements with numeric indexes in [] are interactive\n- elements without [] provide only context\n\n# Response Rules\n1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:\n{{\"current_state\": {{\"evaluation_previous_goal\": \"Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not\",\n\"memory\": \"Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz\",\n\"next_goal\": \"What needs to be done with the next immediate action\"}},\n\"action\":[{{\"one_action_name\": {{// action-specific parameter}}}}, // ... more actions in sequence]}}\n\n2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.\nCommon action sequences:\n- Form filling: [{{\"input_text\": {{\"index\": 1, \"text\": \"username\"}}}}, {{\"input_text\": {{\"index\": 2, \"text\": \"password\"}}}}, {{\"click_element\": {{\"index\": 3}}}}]\n- Navigation and extraction: [{{\"go_to_url\": {{\"url\": \"https://example.com\"}}}}, {{\"extract_content\": {{\"goal\": \"extract the names\"}}}}]\n- Actions are executed in the given order\n- If the page changes after an action, the sequence is interrupted and you get the new state.\n- Only provide the action sequence until an action which changes the page state significantly.\n- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page\n- only use multiple actions if it makes sense.\n\n3. ELEMENT INTERACTION:\n- Only use indexes of the interactive elements\n- Elements marked with \"[]Non-interactive text\" are non-interactive\n\n4. NAVIGATION & ERROR HANDLING:\n- If no suitable elements exist, use other functions to complete the task\n- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.\n- Handle popups/cookies by accepting or closing them\n- Use scroll to find elements you are looking for\n- If you want to research something, open a new tab instead of using the current tab\n- If captcha pops up, try to solve it - else try a different approach\n- If the page is not fully loaded, use wait action\n\n5. TASK COMPLETION:\n- Use the done action as the last action as soon as the ultimate task is complete\n- Dont use \"done\" before you are done with everything the user asked you, except you reach the last step of max_steps.\n- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!\n- If you have to do something repeatedly for example the task says for \"each\", or \"for all\", or \"x times\", count always inside \"memory\" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.\n- Don't hallucinate actions\n- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.\n\n6. VISUAL CONTEXT:\n- When an image is provided, use it to understand the page layout\n- Bounding boxes with labels on their top right corner correspond to element indexes\n\n7. Form filling:\n- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.\n\n8. Long tasks:\n- Keep track of the status and subresults in the memory.\n\n9. Extraction:\n- If your task is to find information - call extract_content on the specific pages to get and store the information.\nYour responses must be always JSON with the specified format.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nWhat should I do next to achieve my goal?\n\nWhen you see [Current state starts here], focus on the following:\n- Current URL and page title{url_placeholder}\n- Available tabs{tabs_placeholder}\n- Interactive elements and their indices\n- Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated)\n- Any action results or errors{results_placeholder}\n\nFor browser interactions:\n- To navigate: browser_use with action=\"go_to_url\", url=\"...\"\n- To click: browser_use with action=\"click_element\", index=N\n- To type: browser_use with action=\"input_text\", index=N, text=\"...\"\n- To extract: browser_use with action=\"extract_content\", goal=\"...\"\n- To scroll: browser_use with action=\"scroll_down\" or \"scroll_up\"\n\nConsider both what's visible and what might be beyond the current viewport.\nBe methodical - remember your progress and what you've learned so far.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/tool/search/baidu_search.py", "from typing import List\n\nfrom baidusearch.baidusearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass BaiduSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Baidu search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results)\n\n # Convert raw results to SearchItem format\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(title=f\"Baidu Result {i+1}\", url=item, description=None)\n )\n elif isinstance(item, dict):\n # If it's a dictionary with details\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"Baidu Result {i+1}\"),\n url=item.get(\"url\", \"\"),\n description=item.get(\"abstract\", None),\n )\n )\n else:\n # Try to get attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"Baidu Result {i+1}\"),\n url=getattr(item, \"url\", \"\"),\n description=getattr(item, \"abstract\", None),\n )\n )\n except Exception:\n # Fallback to a basic result\n results.append(\n SearchItem(\n title=f\"Baidu Result {i+1}\", url=str(item), description=None\n )\n )\n\n return results\n"], ["/OpenManus/app/tool/search/duckduckgo_search.py", "from typing import List\n\nfrom duckduckgo_search import DDGS\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass DuckDuckGoSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n DuckDuckGo search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = DDGS().text(query, max_results=num_results)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\", url=item, description=None\n )\n )\n elif isinstance(item, dict):\n # Extract data from the dictionary\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=item.get(\"href\", \"\"),\n description=item.get(\"body\", None),\n )\n )\n else:\n # Try to extract attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=getattr(item, \"href\", \"\"),\n description=getattr(item, \"body\", None),\n )\n )\n except Exception:\n # Fallback\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\",\n url=str(item),\n description=None,\n )\n )\n\n return results\n"], ["/OpenManus/app/flow/flow_factory.py", "from enum import Enum\nfrom typing import Dict, List, Union\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.flow.planning import PlanningFlow\n\n\nclass FlowType(str, Enum):\n PLANNING = \"planning\"\n\n\nclass FlowFactory:\n \"\"\"Factory for creating different types of flows with support for multiple agents\"\"\"\n\n @staticmethod\n def create_flow(\n flow_type: FlowType,\n agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]],\n **kwargs,\n ) -> BaseFlow:\n flows = {\n FlowType.PLANNING: PlanningFlow,\n }\n\n flow_class = flows.get(flow_type)\n if not flow_class:\n raise ValueError(f\"Unknown flow type: {flow_type}\")\n\n return flow_class(agents, **kwargs)\n"], ["/OpenManus/app/tool/chart_visualization/chart_prepare.py", "from app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass VisualizationPrepare(NormalPythonExecute):\n \"\"\"A tool for Chart Generation Preparation\"\"\"\n\n name: str = \"visualization_preparation\"\n description: str = \"Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional).\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, visualization: csv -> chart; insight: choose insight into chart\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code for data_visualization prepare.\n## Visualization Type\n1. Data loading logic\n2. Csv Data and chart description generate\n2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv)\n2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.)\n3. Save information in json file.( format: {\"csvFilePath\": string, \"chartTitle\": string}[])\n## Insight Type\n1. Select the insights from the data_visualization results that you want to add to the chart.\n2. Save information in json file.( format: {\"chartPath\": string, \"insights_id\": number[]}[])\n# Note\n1. You can generate one or multiple csv data with different visualization needs.\n2. Make each chart data esay, clean and different.\n3. Json file saving in utf-8 with path print: print(json_path)\n\"\"\",\n },\n },\n \"required\": [\"code\", \"code_type\"],\n }\n"], ["/OpenManus/app/tool/ask_human.py", "from app.tool import BaseTool\n\n\nclass AskHuman(BaseTool):\n \"\"\"Add a tool to ask human for help.\"\"\"\n\n name: str = \"ask_human\"\n description: str = \"Use this tool to ask human for help.\"\n parameters: str = {\n \"type\": \"object\",\n \"properties\": {\n \"inquire\": {\n \"type\": \"string\",\n \"description\": \"The question you want to ask human.\",\n }\n },\n \"required\": [\"inquire\"],\n }\n\n async def execute(self, inquire: str) -> str:\n return input(f\"\"\"Bot: {inquire}\\n\\nYou: \"\"\").strip()\n"], ["/OpenManus/app/agent/data_analysis.py", "from pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass DataAnalysis(ToolCallAgent):\n \"\"\"\n A data analysis agent that uses planning to solve various data analysis tasks.\n\n This agent extends ToolCallAgent with a comprehensive set of tools and capabilities,\n including Data Analysis, Chart Visualization, Data Report.\n \"\"\"\n\n name: str = \"Data_Analysis\"\n description: str = \"An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 15000\n max_steps: int = 20\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n NormalPythonExecute(),\n VisualizationPrepare(),\n DataVisualization(),\n Terminate(),\n )\n )\n"], ["/OpenManus/app/tool/terminate.py", "from app.tool.base import BaseTool\n\n\n_TERMINATE_DESCRIPTION = \"\"\"Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task.\nWhen you have finished all the tasks, call this tool to end the work.\"\"\"\n\n\nclass Terminate(BaseTool):\n name: str = \"terminate\"\n description: str = _TERMINATE_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"status\": {\n \"type\": \"string\",\n \"description\": \"The finish status of the interaction.\",\n \"enum\": [\"success\", \"failure\"],\n }\n },\n \"required\": [\"status\"],\n }\n\n async def execute(self, status: str) -> str:\n \"\"\"Finish the current execution\"\"\"\n return f\"The interaction has been completed with status: {status}\"\n"], ["/OpenManus/app/tool/search/google_search.py", "from typing import List\n\nfrom googlesearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass GoogleSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Google search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results, advanced=True)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n {\"title\": f\"Google Result {i+1}\", \"url\": item, \"description\": \"\"}\n )\n else:\n results.append(\n SearchItem(\n title=item.title, url=item.url, description=item.description\n )\n )\n\n return results\n"], ["/OpenManus/app/agent/__init__.py", "from app.agent.base import BaseAgent\nfrom app.agent.browser import BrowserAgent\nfrom app.agent.mcp import MCPAgent\nfrom app.agent.react import ReActAgent\nfrom app.agent.swe import SWEAgent\nfrom app.agent.toolcall import ToolCallAgent\n\n\n__all__ = [\n \"BaseAgent\",\n \"BrowserAgent\",\n \"ReActAgent\",\n \"SWEAgent\",\n \"ToolCallAgent\",\n \"MCPAgent\",\n]\n"], ["/OpenManus/app/agent/swe.py", "from typing import List\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.prompt.swe import SYSTEM_PROMPT\nfrom app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection\n\n\nclass SWEAgent(ToolCallAgent):\n \"\"\"An agent that implements the SWEAgent paradigm for executing code and natural conversations.\"\"\"\n\n name: str = \"swe\"\n description: str = \"an autonomous AI programmer that interacts directly with the computer to solve tasks.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = \"\"\n\n available_tools: ToolCollection = ToolCollection(\n Bash(), StrReplaceEditor(), Terminate()\n )\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n max_steps: int = 20\n"], ["/OpenManus/app/__init__.py", "# Python version check: 3.11-3.13\nimport sys\n\n\nif sys.version_info < (3, 11) or sys.version_info > (3, 13):\n print(\n \"Warning: Unsupported Python version {ver}, please use 3.11-3.13\".format(\n ver=\".\".join(map(str, sys.version_info))\n )\n )\n"], ["/OpenManus/app/prompt/mcp.py", "\"\"\"Prompts for the MCP Agent.\"\"\"\n\nSYSTEM_PROMPT = \"\"\"You are an AI assistant with access to a Model Context Protocol (MCP) server.\nYou can use the tools provided by the MCP server to complete tasks.\nThe MCP server will dynamically expose tools that you can use - always check the available tools first.\n\nWhen using an MCP tool:\n1. Choose the appropriate tool based on your task requirements\n2. Provide properly formatted arguments as required by the tool\n3. Observe the results and use them to determine next steps\n4. Tools may change during operation - new tools might appear or existing ones might disappear\n\nFollow these guidelines:\n- Call tools with valid parameters as documented in their schemas\n- Handle errors gracefully by understanding what went wrong and trying again with corrected parameters\n- For multimedia responses (like images), you'll receive a description of the content\n- Complete user requests step by step, using the most appropriate tools\n- If multiple tools need to be called in sequence, make one call at a time and wait for results\n\nRemember to clearly explain your reasoning and actions to the user.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on the current state and available tools, what should be done next?\nThink step by step about the problem and identify which MCP tool would be most helpful for the current stage.\nIf you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.\n\"\"\"\n\n# Additional specialized prompts\nTOOL_ERROR_PROMPT = \"\"\"You encountered an error with the tool '{tool_name}'.\nTry to understand what went wrong and correct your approach.\nCommon issues include:\n- Missing or incorrect parameters\n- Invalid parameter formats\n- Using a tool that's no longer available\n- Attempting an operation that's not supported\n\nPlease check the tool specifications and try again with corrected parameters.\n\"\"\"\n\nMULTIMEDIA_RESPONSE_PROMPT = \"\"\"You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.\nThis content has been processed and described for you.\nUse this information to continue the task or provide insights to the user.\n\"\"\"\n"], ["/OpenManus/app/prompt/swe.py", "SYSTEM_PROMPT = \"\"\"SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.\n\nThe special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time.\nIn addition to typical bash commands, you can also use specific commands to help you navigate and edit files.\nTo call a command, you need to invoke it with a function call/tool call.\n\nPlease note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.\nIf you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.\n\nRESPONSE FORMAT:\nYour shell prompt is formatted as follows:\n(Open file: )\n(Current directory: )\nbash-$\n\nFirst, you should _always_ include a general thought about what you're going to do next.\nThen, for every response, you must include exactly _ONE_ tool call/function call.\n\nRemember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference.\nIf you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call.\nNote that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them.\n\"\"\"\n"], ["/OpenManus/app/prompt/planning.py", "PLANNING_SYSTEM_PROMPT = \"\"\"\nYou are an expert Planning Agent tasked with solving problems efficiently through structured plans.\nYour job is:\n1. Analyze requests to understand the task scope\n2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool\n3. Execute steps using available tools as needed\n4. Track progress and adapt plans when necessary\n5. Use `finish` to conclude immediately when the task is complete\n\n\nAvailable tools will vary by task but may include:\n- `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.)\n- `finish`: End the task when complete\nBreak tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps.\nThink about dependencies and verification methods.\nKnow when to conclude - don't continue thinking once objectives are met.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on the current state, what's your next action?\nChoose the most efficient path forward:\n1. Is the plan sufficient, or does it need refinement?\n2. Can you execute the next step immediately?\n3. Is the task complete? If so, use `finish` right away.\n\nBe concise in your reasoning, then select the appropriate tool or action.\n\"\"\"\n"], ["/OpenManus/app/prompt/visualization.py", "SYSTEM_PROMPT = \"\"\"You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests.\n# Note:\n1. The workspace directory is: {directory}; Read / write file in workspace\n2. Generate analysis conclusion report in the end\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on user needs, break down the problem and use different tools step by step to solve it.\n# Note\n1. Each step select the most appropriate tool proactively (ONLY ONE).\n2. After using each tool, clearly explain the execution results and suggest the next steps.\n3. When observation with Error, review and fix it.\"\"\"\n"], ["/OpenManus/app/prompt/manus.py", "SYSTEM_PROMPT = (\n \"You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all.\"\n \"The initial directory is: {directory}\"\n)\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/exceptions.py", "class ToolError(Exception):\n \"\"\"Raised when a tool encounters an error.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n\nclass OpenManusError(Exception):\n \"\"\"Base exception for all OpenManus errors\"\"\"\n\n\nclass TokenLimitExceeded(OpenManusError):\n \"\"\"Exception raised when the token limit is exceeded\"\"\"\n"], ["/OpenManus/app/sandbox/__init__.py", "\"\"\"\nDocker Sandbox Module\n\nProvides secure containerized execution environment with resource limits\nand isolation for running untrusted code.\n\"\"\"\nfrom app.sandbox.client import (\n BaseSandboxClient,\n LocalSandboxClient,\n create_sandbox_client,\n)\nfrom app.sandbox.core.exceptions import (\n SandboxError,\n SandboxResourceError,\n SandboxTimeoutError,\n)\nfrom app.sandbox.core.manager import SandboxManager\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\n__all__ = [\n \"DockerSandbox\",\n \"SandboxManager\",\n \"BaseSandboxClient\",\n \"LocalSandboxClient\",\n \"create_sandbox_client\",\n \"SandboxError\",\n \"SandboxTimeoutError\",\n \"SandboxResourceError\",\n]\n"], ["/OpenManus/app/tool/__init__.py", "from app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.create_chat_completion import CreateChatCompletion\nfrom app.tool.planning import PlanningTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\nfrom app.tool.tool_collection import ToolCollection\nfrom app.tool.web_search import WebSearch\nfrom app.tool.crawl4ai import Crawl4aiTool\n\n\n__all__ = [\n \"BaseTool\",\n \"Bash\",\n \"BrowserUseTool\",\n \"Terminate\",\n \"StrReplaceEditor\",\n \"WebSearch\",\n \"ToolCollection\",\n \"CreateChatCompletion\",\n \"PlanningTool\",\n \"Crawl4aiTool\"\n]\n"], ["/OpenManus/app/tool/search/__init__.py", "from app.tool.search.baidu_search import BaiduSearchEngine\nfrom app.tool.search.base import WebSearchEngine\nfrom app.tool.search.bing_search import BingSearchEngine\nfrom app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine\nfrom app.tool.search.google_search import GoogleSearchEngine\n\n\n__all__ = [\n \"WebSearchEngine\",\n \"BaiduSearchEngine\",\n \"DuckDuckGoSearchEngine\",\n \"GoogleSearchEngine\",\n \"BingSearchEngine\",\n]\n"], ["/OpenManus/app/tool/chart_visualization/__init__.py", "from app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\n__all__ = [\"DataVisualization\", \"VisualizationPrepare\", \"NormalPythonExecute\"]\n"], ["/OpenManus/app/sandbox/core/exceptions.py", "\"\"\"Exception classes for the sandbox system.\n\nThis module defines custom exceptions used throughout the sandbox system to\nhandle various error conditions in a structured way.\n\"\"\"\n\n\nclass SandboxError(Exception):\n \"\"\"Base exception for sandbox-related errors.\"\"\"\n\n\nclass SandboxTimeoutError(SandboxError):\n \"\"\"Exception raised when a sandbox operation times out.\"\"\"\n\n\nclass SandboxResourceError(SandboxError):\n \"\"\"Exception raised for resource-related errors.\"\"\"\n"], ["/OpenManus/app/prompt/toolcall.py", "SYSTEM_PROMPT = \"You are an agent that can execute tool calls\"\n\nNEXT_STEP_PROMPT = (\n \"If you want to stop interaction, use `terminate` tool/function call.\"\n)\n"], ["/OpenManus/examples/benchmarks/__init__.py", "\"\"\"\nOpenManus benchmark system for standardized agent evaluation.\n\"\"\"\n"], ["/OpenManus/app/mcp/__init__.py", ""], ["/OpenManus/app/flow/__init__.py", ""], ["/OpenManus/app/prompt/__init__.py", ""], ["/OpenManus/protocol/a2a/app/__init__.py", ""], ["/OpenManus/protocol/a2a/__init__.py", ""]], "task_instance_info": {"created_time": "2025-08-20 15:01:10", "created_task_model": "DeepSeek-R1", "class_skeleton": "class MCPRunner:\n def __init__(self):\n pass", "code_description": "This code segment functions to: Coordinate the lifecycle of an agent service by establishing communication channels, managing runtime execution modes, and handling user interactions through multiple input pathways while maintaining session state."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass MCPRunner:\n def __init__(self):\n self.agent = None\n self.connection_type = None\n self.server_url = None\n\n async def initialize(self, connection: str, server_url: str = None) -> None:\n \"\"\"Initialize MCP agent with specified connection\"\"\"\n self.connection_type = connection\n self.server_url = server_url\n \n if connection == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL required for SSE connection\")\n self.agent = MCPAgent(connection=connection, server_url=server_url)\n else:\n self.agent = MCPAgent(connection=connection)\n \n await self.agent.initialize()\n\n async def run_single_prompt(self, prompt: str) -> None:\n \"\"\"Execute a single prompt and exit\"\"\"\n response = await self.agent.execute(prompt)\n print(response)\n sys.exit(0)\n\n async def run_interactive(self) -> None:\n \"\"\"Run in interactive REPL mode\"\"\"\n while True:\n try:\n prompt = input(\"> \")\n if prompt.lower() in (\"exit\", \"quit\"):\n return\n response = await self.agent.execute(prompt)\n print(response)\n except KeyboardInterrupt:\n return\n\n async def run_default(self) -> None:\n \"\"\"Run default continuous processing mode\"\"\"\n async for message in self.agent.stream_messages():\n print(message)\n\n async def cleanup(self) -> None:\n \"\"\"Clean up resources\"\"\"\n if self.agent:\n await self.agent.close()\n[TASK_END]\n```", "inference_time": "2025-08-20 15-01-43"}, "editdistance_info": {"edit_distance": 28.4884, "calculate_time": "2025-08-20 15:01:43", "true_code_clean": "class MCPRunner:\n def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()\n async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: \n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n logger.info(f\"Connected to MCP server via {connection_type}\")\n async def run_interactive(self) -> None:\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")\n async def run_single_prompt(self, prompt: str) -> None:\n await self.agent.run(prompt)\n async def run_default(self) -> None:\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")\n async def cleanup(self) -> None:\n await self.agent.cleanup()\n logger.info(\"Session ended\")", "predict_code_clean": "class MCPRunner:\n def __init__(self):\n self.agent = None\n self.connection_type = None\n self.server_url = None\n async def initialize(self, connection: str, server_url: str = None) -> None:\n self.connection_type = connection\n self.server_url = server_url\n if connection == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL required for SSE connection\")\n self.agent = MCPAgent(connection=connection, server_url=server_url)\n else:\n self.agent = MCPAgent(connection=connection)\n await self.agent.initialize()\n async def run_single_prompt(self, prompt: str) -> None:\n response = await self.agent.execute(prompt)\n print(response)\n sys.exit(0)\n async def run_interactive(self) -> None:\n while True:\n try:\n prompt = input(\"> \")\n if prompt.lower() in (\"exit\", \"quit\"):\n return\n response = await self.agent.execute(prompt)\n print(response)\n except KeyboardInterrupt:\n return\n async def run_default(self) -> None:\n async for message in self.agent.stream_messages():\n print(message)\n async def cleanup(self) -> None:\n if self.agent:\n await self.agent.close()"}} {"repo_name": "OpenManus", "file_name": "/OpenManus/app/bedrock.py", "inference_info": {"prefix_code": "import json\nimport sys\nimport time\nimport uuid\nfrom datetime import datetime\nfrom typing import Dict, List, Literal, Optional\n\nimport boto3\n\n\n# Global variables to track the current tool use ID across function calls\n# Tmp solution\nCURRENT_TOOLUSE_ID = None\n\n\n# Class to handle OpenAI-style response formatting\nclass OpenAIResponse:\n def __init__(self, data):\n # Recursively convert nested dicts and lists to OpenAIResponse objects\n for key, value in data.items():\n if isinstance(value, dict):\n value = OpenAIResponse(value)\n elif isinstance(value, list):\n value = [\n OpenAIResponse(item) if isinstance(item, dict) else item\n for item in value\n ]\n setattr(self, key, value)\n\n def model_dump(self, *args, **kwargs):\n # Convert object to dict and add timestamp\n data = self.__dict__\n data[\"created_at\"] = datetime.now().isoformat()\n return data\n\n\n# Main client class for interacting with Amazon Bedrock\nclass BedrockClient:\n def __init__(self):\n # Initialize Bedrock client, you need to configure AWS env first\n try:\n self.client = boto3.client(\"bedrock-runtime\")\n self.chat = Chat(self.client)\n except Exception as e:\n print(f\"Error initializing Bedrock client: {e}\")\n sys.exit(1)\n\n\n# Chat interface class\nclass Chat:\n def __init__(self, client):\n self.completions = ChatCompletions(client)\n\n\n# Core class handling chat completions functionality\n", "suffix_code": "\n", "middle_code": "class ChatCompletions:\n def __init__(self, client):\n self.client = client\n def _convert_openai_tools_to_bedrock_format(self, tools):\n bedrock_tools = []\n for tool in tools:\n if tool.get(\"type\") == \"function\":\n function = tool.get(\"function\", {})\n bedrock_tool = {\n \"toolSpec\": {\n \"name\": function.get(\"name\", \"\"),\n \"description\": function.get(\"description\", \"\"),\n \"inputSchema\": {\n \"json\": {\n \"type\": \"object\",\n \"properties\": function.get(\"parameters\", {}).get(\n \"properties\", {}\n ),\n \"required\": function.get(\"parameters\", {}).get(\n \"required\", []\n ),\n }\n },\n }\n }\n bedrock_tools.append(bedrock_tool)\n return bedrock_tools\n def _convert_openai_messages_to_bedrock_format(self, messages):\n bedrock_messages = []\n system_prompt = []\n for message in messages:\n if message.get(\"role\") == \"system\":\n system_prompt = [{\"text\": message.get(\"content\")}]\n elif message.get(\"role\") == \"user\":\n bedrock_message = {\n \"role\": message.get(\"role\", \"user\"),\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"assistant\":\n bedrock_message = {\n \"role\": \"assistant\",\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n openai_tool_calls = message.get(\"tool_calls\", [])\n if openai_tool_calls:\n bedrock_tool_use = {\n \"toolUseId\": openai_tool_calls[0][\"id\"],\n \"name\": openai_tool_calls[0][\"function\"][\"name\"],\n \"input\": json.loads(\n openai_tool_calls[0][\"function\"][\"arguments\"]\n ),\n }\n bedrock_message[\"content\"].append({\"toolUse\": bedrock_tool_use})\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = openai_tool_calls[0][\"id\"]\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"tool\":\n bedrock_message = {\n \"role\": \"user\",\n \"content\": [\n {\n \"toolResult\": {\n \"toolUseId\": CURRENT_TOOLUSE_ID,\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n }\n ],\n }\n bedrock_messages.append(bedrock_message)\n else:\n raise ValueError(f\"Invalid role: {message.get('role')}\")\n return system_prompt, bedrock_messages\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n content = \"\"\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n content_array = bedrock_response[\"output\"][\"message\"][\"content\"]\n content = \"\".join(item.get(\"text\", \"\") for item in content_array)\n if content == \"\":\n content = \".\"\n openai_tool_calls = []\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n for content_item in bedrock_response[\"output\"][\"message\"][\"content\"]:\n if content_item.get(\"toolUse\"):\n bedrock_tool_use = content_item[\"toolUse\"]\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n openai_tool_call = {\n \"id\": CURRENT_TOOLUSE_ID,\n \"type\": \"function\",\n \"function\": {\n \"name\": bedrock_tool_use[\"name\"],\n \"arguments\": json.dumps(bedrock_tool_use[\"input\"]),\n },\n }\n openai_tool_calls.append(openai_tool_call)\n openai_format = {\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"created\": int(time.time()),\n \"object\": \"chat.completion\",\n \"system_fingerprint\": None,\n \"choices\": [\n {\n \"finish_reason\": bedrock_response.get(\"stopReason\", \"end_turn\"),\n \"index\": 0,\n \"message\": {\n \"content\": content,\n \"role\": bedrock_response.get(\"output\", {})\n .get(\"message\", {})\n .get(\"role\", \"assistant\"),\n \"tool_calls\": openai_tool_calls\n if openai_tool_calls != []\n else None,\n \"function_call\": None,\n },\n }\n ],\n \"usage\": {\n \"completion_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"outputTokens\", 0\n ),\n \"prompt_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"inputTokens\", 0\n ),\n \"total_tokens\": bedrock_response.get(\"usage\", {}).get(\"totalTokens\", 0),\n },\n }\n return OpenAIResponse(openai_format)\n async def _invoke_bedrock(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n openai_response = self._convert_bedrock_response_to_openai_format(response)\n return openai_response\n async def _invoke_bedrock_stream(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse_stream(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n bedrock_response = {\n \"output\": {\"message\": {\"role\": \"\", \"content\": []}},\n \"stopReason\": \"\",\n \"usage\": {},\n \"metrics\": {},\n }\n bedrock_response_text = \"\"\n bedrock_response_tool_input = \"\"\n stream = response.get(\"stream\")\n if stream:\n for event in stream:\n if event.get(\"messageStart\", {}).get(\"role\"):\n bedrock_response[\"output\"][\"message\"][\"role\"] = event[\n \"messageStart\"\n ][\"role\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"text\"):\n bedrock_response_text += event[\"contentBlockDelta\"][\"delta\"][\"text\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"text\"], end=\"\", flush=True\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 0:\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"text\": bedrock_response_text}\n )\n if event.get(\"contentBlockStart\", {}).get(\"start\", {}).get(\"toolUse\"):\n bedrock_tool_use = event[\"contentBlockStart\"][\"start\"][\"toolUse\"]\n tool_use = {\n \"toolUseId\": bedrock_tool_use[\"toolUseId\"],\n \"name\": bedrock_tool_use[\"name\"],\n }\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"toolUse\": tool_use}\n )\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"toolUse\"):\n bedrock_response_tool_input += event[\"contentBlockDelta\"][\"delta\"][\n \"toolUse\"\n ][\"input\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"toolUse\"][\"input\"],\n end=\"\",\n flush=True,\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 1:\n bedrock_response[\"output\"][\"message\"][\"content\"][1][\"toolUse\"][\n \"input\"\n ] = json.loads(bedrock_response_tool_input)\n print()\n openai_response = self._convert_bedrock_response_to_openai_format(\n bedrock_response\n )\n return openai_response\n def create(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n stream: Optional[bool] = True,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n bedrock_tools = []\n if tools is not None:\n bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)\n if stream:\n return self._invoke_bedrock_stream(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n else:\n return self._invoke_bedrock(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/OpenManus/app/llm.py", "import math\nfrom typing import Dict, List, Optional, Union\n\nimport tiktoken\nfrom openai import (\n APIError,\n AsyncAzureOpenAI,\n AsyncOpenAI,\n AuthenticationError,\n OpenAIError,\n RateLimitError,\n)\nfrom openai.types.chat import ChatCompletion, ChatCompletionMessage\nfrom tenacity import (\n retry,\n retry_if_exception_type,\n stop_after_attempt,\n wait_random_exponential,\n)\n\nfrom app.bedrock import BedrockClient\nfrom app.config import LLMSettings, config\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger # Assuming a logger is set up in your app\nfrom app.schema import (\n ROLE_VALUES,\n TOOL_CHOICE_TYPE,\n TOOL_CHOICE_VALUES,\n Message,\n ToolChoice,\n)\n\n\nREASONING_MODELS = [\"o1\", \"o3-mini\"]\nMULTIMODAL_MODELS = [\n \"gpt-4-vision-preview\",\n \"gpt-4o\",\n \"gpt-4o-mini\",\n \"claude-3-opus-20240229\",\n \"claude-3-sonnet-20240229\",\n \"claude-3-haiku-20240307\",\n]\n\n\nclass TokenCounter:\n # Token constants\n BASE_MESSAGE_TOKENS = 4\n FORMAT_TOKENS = 2\n LOW_DETAIL_IMAGE_TOKENS = 85\n HIGH_DETAIL_TILE_TOKENS = 170\n\n # Image processing constants\n MAX_SIZE = 2048\n HIGH_DETAIL_TARGET_SHORT_SIDE = 768\n TILE_SIZE = 512\n\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n\n def count_text(self, text: str) -> int:\n \"\"\"Calculate tokens for a text string\"\"\"\n return 0 if not text else len(self.tokenizer.encode(text))\n\n def count_image(self, image_item: dict) -> int:\n \"\"\"\n Calculate tokens for an image based on detail level and dimensions\n\n For \"low\" detail: fixed 85 tokens\n For \"high\" detail:\n 1. Scale to fit in 2048x2048 square\n 2. Scale shortest side to 768px\n 3. Count 512px tiles (170 tokens each)\n 4. Add 85 tokens\n \"\"\"\n detail = image_item.get(\"detail\", \"medium\")\n\n # For low detail, always return fixed token count\n if detail == \"low\":\n return self.LOW_DETAIL_IMAGE_TOKENS\n\n # For medium detail (default in OpenAI), use high detail calculation\n # OpenAI doesn't specify a separate calculation for medium\n\n # For high detail, calculate based on dimensions if available\n if detail == \"high\" or detail == \"medium\":\n # If dimensions are provided in the image_item\n if \"dimensions\" in image_item:\n width, height = image_item[\"dimensions\"]\n return self._calculate_high_detail_tokens(width, height)\n\n return (\n self._calculate_high_detail_tokens(1024, 1024) if detail == \"high\" else 1024\n )\n\n def _calculate_high_detail_tokens(self, width: int, height: int) -> int:\n \"\"\"Calculate tokens for high detail images based on dimensions\"\"\"\n # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square\n if width > self.MAX_SIZE or height > self.MAX_SIZE:\n scale = self.MAX_SIZE / max(width, height)\n width = int(width * scale)\n height = int(height * scale)\n\n # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE\n scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height)\n scaled_width = int(width * scale)\n scaled_height = int(height * scale)\n\n # Step 3: Count number of 512px tiles\n tiles_x = math.ceil(scaled_width / self.TILE_SIZE)\n tiles_y = math.ceil(scaled_height / self.TILE_SIZE)\n total_tiles = tiles_x * tiles_y\n\n # Step 4: Calculate final token count\n return (\n total_tiles * self.HIGH_DETAIL_TILE_TOKENS\n ) + self.LOW_DETAIL_IMAGE_TOKENS\n\n def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int:\n \"\"\"Calculate tokens for message content\"\"\"\n if not content:\n return 0\n\n if isinstance(content, str):\n return self.count_text(content)\n\n token_count = 0\n for item in content:\n if isinstance(item, str):\n token_count += self.count_text(item)\n elif isinstance(item, dict):\n if \"text\" in item:\n token_count += self.count_text(item[\"text\"])\n elif \"image_url\" in item:\n token_count += self.count_image(item)\n return token_count\n\n def count_tool_calls(self, tool_calls: List[dict]) -> int:\n \"\"\"Calculate tokens for tool calls\"\"\"\n token_count = 0\n for tool_call in tool_calls:\n if \"function\" in tool_call:\n function = tool_call[\"function\"]\n token_count += self.count_text(function.get(\"name\", \"\"))\n token_count += self.count_text(function.get(\"arguments\", \"\"))\n return token_count\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n \"\"\"Calculate the total number of tokens in a message list\"\"\"\n total_tokens = self.FORMAT_TOKENS # Base format tokens\n\n for message in messages:\n tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message\n\n # Add role tokens\n tokens += self.count_text(message.get(\"role\", \"\"))\n\n # Add content tokens\n if \"content\" in message:\n tokens += self.count_content(message[\"content\"])\n\n # Add tool calls tokens\n if \"tool_calls\" in message:\n tokens += self.count_tool_calls(message[\"tool_calls\"])\n\n # Add name and tool_call_id tokens\n tokens += self.count_text(message.get(\"name\", \"\"))\n tokens += self.count_text(message.get(\"tool_call_id\", \"\"))\n\n total_tokens += tokens\n\n return total_tokens\n\n\nclass LLM:\n _instances: Dict[str, \"LLM\"] = {}\n\n def __new__(\n cls, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if config_name not in cls._instances:\n instance = super().__new__(cls)\n instance.__init__(config_name, llm_config)\n cls._instances[config_name] = instance\n return cls._instances[config_name]\n\n def __init__(\n self, config_name: str = \"default\", llm_config: Optional[LLMSettings] = None\n ):\n if not hasattr(self, \"client\"): # Only initialize if not already initialized\n llm_config = llm_config or config.llm\n llm_config = llm_config.get(config_name, llm_config[\"default\"])\n self.model = llm_config.model\n self.max_tokens = llm_config.max_tokens\n self.temperature = llm_config.temperature\n self.api_type = llm_config.api_type\n self.api_key = llm_config.api_key\n self.api_version = llm_config.api_version\n self.base_url = llm_config.base_url\n\n # Add token counting related attributes\n self.total_input_tokens = 0\n self.total_completion_tokens = 0\n self.max_input_tokens = (\n llm_config.max_input_tokens\n if hasattr(llm_config, \"max_input_tokens\")\n else None\n )\n\n # Initialize tokenizer\n try:\n self.tokenizer = tiktoken.encoding_for_model(self.model)\n except KeyError:\n # If the model is not in tiktoken's presets, use cl100k_base as default\n self.tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n\n if self.api_type == \"azure\":\n self.client = AsyncAzureOpenAI(\n base_url=self.base_url,\n api_key=self.api_key,\n api_version=self.api_version,\n )\n elif self.api_type == \"aws\":\n self.client = BedrockClient()\n else:\n self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)\n\n self.token_counter = TokenCounter(self.tokenizer)\n\n def count_tokens(self, text: str) -> int:\n \"\"\"Calculate the number of tokens in a text\"\"\"\n if not text:\n return 0\n return len(self.tokenizer.encode(text))\n\n def count_message_tokens(self, messages: List[dict]) -> int:\n return self.token_counter.count_message_tokens(messages)\n\n def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None:\n \"\"\"Update token counts\"\"\"\n # Only track tokens if max_input_tokens is set\n self.total_input_tokens += input_tokens\n self.total_completion_tokens += completion_tokens\n logger.info(\n f\"Token usage: Input={input_tokens}, Completion={completion_tokens}, \"\n f\"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, \"\n f\"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}\"\n )\n\n def check_token_limit(self, input_tokens: int) -> bool:\n \"\"\"Check if token limits are exceeded\"\"\"\n if self.max_input_tokens is not None:\n return (self.total_input_tokens + input_tokens) <= self.max_input_tokens\n # If max_input_tokens is not set, always return True\n return True\n\n def get_limit_error_message(self, input_tokens: int) -> str:\n \"\"\"Generate error message for token limit exceeded\"\"\"\n if (\n self.max_input_tokens is not None\n and (self.total_input_tokens + input_tokens) > self.max_input_tokens\n ):\n return f\"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})\"\n\n return \"Token limit exceeded\"\n\n @staticmethod\n def format_messages(\n messages: List[Union[dict, Message]], supports_images: bool = False\n ) -> List[dict]:\n \"\"\"\n Format messages for LLM by converting them to OpenAI message format.\n\n Args:\n messages: List of messages that can be either dict or Message objects\n supports_images: Flag indicating if the target model supports image inputs\n\n Returns:\n List[dict]: List of formatted messages in OpenAI format\n\n Raises:\n ValueError: If messages are invalid or missing required fields\n TypeError: If unsupported message types are provided\n\n Examples:\n >>> msgs = [\n ... Message.system_message(\"You are a helpful assistant\"),\n ... {\"role\": \"user\", \"content\": \"Hello\"},\n ... Message.user_message(\"How are you?\")\n ... ]\n >>> formatted = LLM.format_messages(msgs)\n \"\"\"\n formatted_messages = []\n\n for message in messages:\n # Convert Message objects to dictionaries\n if isinstance(message, Message):\n message = message.to_dict()\n\n if isinstance(message, dict):\n # If message is a dict, ensure it has required fields\n if \"role\" not in message:\n raise ValueError(\"Message dict must contain 'role' field\")\n\n # Process base64 images if present and model supports images\n if supports_images and message.get(\"base64_image\"):\n # Initialize or convert content to appropriate format\n if not message.get(\"content\"):\n message[\"content\"] = []\n elif isinstance(message[\"content\"], str):\n message[\"content\"] = [\n {\"type\": \"text\", \"text\": message[\"content\"]}\n ]\n elif isinstance(message[\"content\"], list):\n # Convert string items to proper text objects\n message[\"content\"] = [\n (\n {\"type\": \"text\", \"text\": item}\n if isinstance(item, str)\n else item\n )\n for item in message[\"content\"]\n ]\n\n # Add the image to content\n message[\"content\"].append(\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": f\"data:image/jpeg;base64,{message['base64_image']}\"\n },\n }\n )\n\n # Remove the base64_image field\n del message[\"base64_image\"]\n # If model doesn't support images but message has base64_image, handle gracefully\n elif not supports_images and message.get(\"base64_image\"):\n # Just remove the base64_image field and keep the text content\n del message[\"base64_image\"]\n\n if \"content\" in message or \"tool_calls\" in message:\n formatted_messages.append(message)\n # else: do not include the message\n else:\n raise TypeError(f\"Unsupported message type: {type(message)}\")\n\n # Validate all messages have required fields\n for msg in formatted_messages:\n if msg[\"role\"] not in ROLE_VALUES:\n raise ValueError(f\"Invalid role: {msg['role']}\")\n\n return formatted_messages\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = True,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format system and user messages with image support check\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n params = {\n \"model\": self.model,\n \"messages\": messages,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n if not stream:\n # Non-streaming request\n response = await self.client.chat.completions.create(\n **params, stream=False\n )\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message.content\n\n # Streaming request, For streaming, update estimated token count before making the request\n self.update_token_count(input_tokens)\n\n response = await self.client.chat.completions.create(**params, stream=True)\n\n collected_messages = []\n completion_text = \"\"\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n completion_text += chunk_message\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n # estimate completion tokens for streaming response\n completion_tokens = self.count_tokens(completion_text)\n logger.info(\n f\"Estimated completion tokens for streaming response: {completion_tokens}\"\n )\n self.total_completion_tokens += completion_tokens\n\n return full_response\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError:\n logger.exception(f\"Validation error\")\n raise\n except OpenAIError as oe:\n logger.exception(f\"OpenAI API error\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception:\n logger.exception(f\"Unexpected error in ask\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_with_images(\n self,\n messages: List[Union[dict, Message]],\n images: List[Union[str, dict]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n stream: bool = False,\n temperature: Optional[float] = None,\n ) -> str:\n \"\"\"\n Send a prompt with images to the LLM and get the response.\n\n Args:\n messages: List of conversation messages\n images: List of image URLs or image data dictionaries\n system_msgs: Optional system messages to prepend\n stream (bool): Whether to stream the response\n temperature (float): Sampling temperature for the response\n\n Returns:\n str: The generated response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If messages are invalid or response is empty\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # For ask_with_images, we always set supports_images to True because\n # this method should only be called with models that support images\n if self.model not in MULTIMODAL_MODELS:\n raise ValueError(\n f\"Model {self.model} does not support images. Use a model from {MULTIMODAL_MODELS}\"\n )\n\n # Format messages with image support\n formatted_messages = self.format_messages(messages, supports_images=True)\n\n # Ensure the last message is from the user to attach images\n if not formatted_messages or formatted_messages[-1][\"role\"] != \"user\":\n raise ValueError(\n \"The last message must be from the user to attach images\"\n )\n\n # Process the last user message to include images\n last_message = formatted_messages[-1]\n\n # Convert content to multimodal format if needed\n content = last_message[\"content\"]\n multimodal_content = (\n [{\"type\": \"text\", \"text\": content}]\n if isinstance(content, str)\n else content\n if isinstance(content, list)\n else []\n )\n\n # Add images to content\n for image in images:\n if isinstance(image, str):\n multimodal_content.append(\n {\"type\": \"image_url\", \"image_url\": {\"url\": image}}\n )\n elif isinstance(image, dict) and \"url\" in image:\n multimodal_content.append({\"type\": \"image_url\", \"image_url\": image})\n elif isinstance(image, dict) and \"image_url\" in image:\n multimodal_content.append(image)\n else:\n raise ValueError(f\"Unsupported image format: {image}\")\n\n # Update the message with multimodal content\n last_message[\"content\"] = multimodal_content\n\n # Add system messages if provided\n if system_msgs:\n all_messages = (\n self.format_messages(system_msgs, supports_images=True)\n + formatted_messages\n )\n else:\n all_messages = formatted_messages\n\n # Calculate tokens and check limits\n input_tokens = self.count_message_tokens(all_messages)\n if not self.check_token_limit(input_tokens):\n raise TokenLimitExceeded(self.get_limit_error_message(input_tokens))\n\n # Set up API parameters\n params = {\n \"model\": self.model,\n \"messages\": all_messages,\n \"stream\": stream,\n }\n\n # Add model-specific parameters\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n # Handle non-streaming request\n if not stream:\n response = await self.client.chat.completions.create(**params)\n\n if not response.choices or not response.choices[0].message.content:\n raise ValueError(\"Empty or invalid response from LLM\")\n\n self.update_token_count(response.usage.prompt_tokens)\n return response.choices[0].message.content\n\n # Handle streaming request\n self.update_token_count(input_tokens)\n response = await self.client.chat.completions.create(**params)\n\n collected_messages = []\n async for chunk in response:\n chunk_message = chunk.choices[0].delta.content or \"\"\n collected_messages.append(chunk_message)\n print(chunk_message, end=\"\", flush=True)\n\n print() # Newline after streaming\n full_response = \"\".join(collected_messages).strip()\n\n if not full_response:\n raise ValueError(\"Empty response from streaming LLM\")\n\n return full_response\n\n except TokenLimitExceeded:\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_with_images: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_with_images: {e}\")\n raise\n\n @retry(\n wait=wait_random_exponential(min=1, max=60),\n stop=stop_after_attempt(6),\n retry=retry_if_exception_type(\n (OpenAIError, Exception, ValueError)\n ), # Don't retry TokenLimitExceeded\n )\n async def ask_tool(\n self,\n messages: List[Union[dict, Message]],\n system_msgs: Optional[List[Union[dict, Message]]] = None,\n timeout: int = 300,\n tools: Optional[List[dict]] = None,\n tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore\n temperature: Optional[float] = None,\n **kwargs,\n ) -> ChatCompletionMessage | None:\n \"\"\"\n Ask LLM using functions/tools and return the response.\n\n Args:\n messages: List of conversation messages\n system_msgs: Optional system messages to prepend\n timeout: Request timeout in seconds\n tools: List of tools to use\n tool_choice: Tool choice strategy\n temperature: Sampling temperature for the response\n **kwargs: Additional completion arguments\n\n Returns:\n ChatCompletionMessage: The model's response\n\n Raises:\n TokenLimitExceeded: If token limits are exceeded\n ValueError: If tools, tool_choice, or messages are invalid\n OpenAIError: If API call fails after retries\n Exception: For unexpected errors\n \"\"\"\n try:\n # Validate tool_choice\n if tool_choice not in TOOL_CHOICE_VALUES:\n raise ValueError(f\"Invalid tool_choice: {tool_choice}\")\n\n # Check if the model supports images\n supports_images = self.model in MULTIMODAL_MODELS\n\n # Format messages\n if system_msgs:\n system_msgs = self.format_messages(system_msgs, supports_images)\n messages = system_msgs + self.format_messages(messages, supports_images)\n else:\n messages = self.format_messages(messages, supports_images)\n\n # Calculate input token count\n input_tokens = self.count_message_tokens(messages)\n\n # If there are tools, calculate token count for tool descriptions\n tools_tokens = 0\n if tools:\n for tool in tools:\n tools_tokens += self.count_tokens(str(tool))\n\n input_tokens += tools_tokens\n\n # Check if token limits are exceeded\n if not self.check_token_limit(input_tokens):\n error_message = self.get_limit_error_message(input_tokens)\n # Raise a special exception that won't be retried\n raise TokenLimitExceeded(error_message)\n\n # Validate tools if provided\n if tools:\n for tool in tools:\n if not isinstance(tool, dict) or \"type\" not in tool:\n raise ValueError(\"Each tool must be a dict with 'type' field\")\n\n # Set up the completion request\n params = {\n \"model\": self.model,\n \"messages\": messages,\n \"tools\": tools,\n \"tool_choice\": tool_choice,\n \"timeout\": timeout,\n **kwargs,\n }\n\n if self.model in REASONING_MODELS:\n params[\"max_completion_tokens\"] = self.max_tokens\n else:\n params[\"max_tokens\"] = self.max_tokens\n params[\"temperature\"] = (\n temperature if temperature is not None else self.temperature\n )\n\n params[\"stream\"] = False # Always use non-streaming for tool requests\n response: ChatCompletion = await self.client.chat.completions.create(\n **params\n )\n\n # Check if response is valid\n if not response.choices or not response.choices[0].message:\n print(response)\n # raise ValueError(\"Invalid or empty response from LLM\")\n return None\n\n # Update token counts\n self.update_token_count(\n response.usage.prompt_tokens, response.usage.completion_tokens\n )\n\n return response.choices[0].message\n\n except TokenLimitExceeded:\n # Re-raise token limit errors without logging\n raise\n except ValueError as ve:\n logger.error(f\"Validation error in ask_tool: {ve}\")\n raise\n except OpenAIError as oe:\n logger.error(f\"OpenAI API error: {oe}\")\n if isinstance(oe, AuthenticationError):\n logger.error(\"Authentication failed. Check API key.\")\n elif isinstance(oe, RateLimitError):\n logger.error(\"Rate limit exceeded. Consider increasing retry attempts.\")\n elif isinstance(oe, APIError):\n logger.error(f\"API error: {oe}\")\n raise\n except Exception as e:\n logger.error(f\"Unexpected error in ask_tool: {e}\")\n raise\n"], ["/OpenManus/app/tool/browser_use_tool.py", "import asyncio\nimport base64\nimport json\nfrom typing import Generic, Optional, TypeVar\n\nfrom browser_use import Browser as BrowserUseBrowser\nfrom browser_use import BrowserConfig\nfrom browser_use.browser.context import BrowserContext, BrowserContextConfig\nfrom browser_use.dom.service import DomService\nfrom pydantic import Field, field_validator\nfrom pydantic_core.core_schema import ValidationInfo\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.web_search import WebSearch\n\n\n_BROWSER_DESCRIPTION = \"\"\"\\\nA powerful browser automation tool that allows interaction with web pages through various actions.\n* This tool provides commands for controlling a browser session, navigating web pages, and extracting information\n* It maintains state across calls, keeping the browser session alive until explicitly closed\n* Use this when you need to browse websites, fill forms, click buttons, extract content, or perform web searches\n* Each action requires specific parameters as defined in the tool's dependencies\n\nKey capabilities include:\n* Navigation: Go to specific URLs, go back, search the web, or refresh pages\n* Interaction: Click elements, input text, select from dropdowns, send keyboard commands\n* Scrolling: Scroll up/down by pixel amount or scroll to specific text\n* Content extraction: Extract and analyze content from web pages based on specific goals\n* Tab management: Switch between tabs, open new tabs, or close tabs\n\nNote: When using element indices, refer to the numbered elements shown in the current browser state.\n\"\"\"\n\nContext = TypeVar(\"Context\")\n\n\nclass BrowserUseTool(BaseTool, Generic[Context]):\n name: str = \"browser_use\"\n description: str = _BROWSER_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"action\": {\n \"type\": \"string\",\n \"enum\": [\n \"go_to_url\",\n \"click_element\",\n \"input_text\",\n \"scroll_down\",\n \"scroll_up\",\n \"scroll_to_text\",\n \"send_keys\",\n \"get_dropdown_options\",\n \"select_dropdown_option\",\n \"go_back\",\n \"web_search\",\n \"wait\",\n \"extract_content\",\n \"switch_tab\",\n \"open_tab\",\n \"close_tab\",\n ],\n \"description\": \"The browser action to perform\",\n },\n \"url\": {\n \"type\": \"string\",\n \"description\": \"URL for 'go_to_url' or 'open_tab' actions\",\n },\n \"index\": {\n \"type\": \"integer\",\n \"description\": \"Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions\",\n },\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions\",\n },\n \"scroll_amount\": {\n \"type\": \"integer\",\n \"description\": \"Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions\",\n },\n \"tab_id\": {\n \"type\": \"integer\",\n \"description\": \"Tab ID for 'switch_tab' action\",\n },\n \"query\": {\n \"type\": \"string\",\n \"description\": \"Search query for 'web_search' action\",\n },\n \"goal\": {\n \"type\": \"string\",\n \"description\": \"Extraction goal for 'extract_content' action\",\n },\n \"keys\": {\n \"type\": \"string\",\n \"description\": \"Keys to send for 'send_keys' action\",\n },\n \"seconds\": {\n \"type\": \"integer\",\n \"description\": \"Seconds to wait for 'wait' action\",\n },\n },\n \"required\": [\"action\"],\n \"dependencies\": {\n \"go_to_url\": [\"url\"],\n \"click_element\": [\"index\"],\n \"input_text\": [\"index\", \"text\"],\n \"switch_tab\": [\"tab_id\"],\n \"open_tab\": [\"url\"],\n \"scroll_down\": [\"scroll_amount\"],\n \"scroll_up\": [\"scroll_amount\"],\n \"scroll_to_text\": [\"text\"],\n \"send_keys\": [\"keys\"],\n \"get_dropdown_options\": [\"index\"],\n \"select_dropdown_option\": [\"index\", \"text\"],\n \"go_back\": [],\n \"web_search\": [\"query\"],\n \"wait\": [\"seconds\"],\n \"extract_content\": [\"goal\"],\n },\n }\n\n lock: asyncio.Lock = Field(default_factory=asyncio.Lock)\n browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True)\n context: Optional[BrowserContext] = Field(default=None, exclude=True)\n dom_service: Optional[DomService] = Field(default=None, exclude=True)\n web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True)\n\n # Context for generic functionality\n tool_context: Optional[Context] = Field(default=None, exclude=True)\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n\n @field_validator(\"parameters\", mode=\"before\")\n def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict:\n if not v:\n raise ValueError(\"Parameters cannot be empty\")\n return v\n\n async def _ensure_browser_initialized(self) -> BrowserContext:\n \"\"\"Ensure browser and context are initialized.\"\"\"\n if self.browser is None:\n browser_config_kwargs = {\"headless\": False, \"disable_security\": True}\n\n if config.browser_config:\n from browser_use.browser.browser import ProxySettings\n\n # handle proxy settings.\n if config.browser_config.proxy and config.browser_config.proxy.server:\n browser_config_kwargs[\"proxy\"] = ProxySettings(\n server=config.browser_config.proxy.server,\n username=config.browser_config.proxy.username,\n password=config.browser_config.proxy.password,\n )\n\n browser_attrs = [\n \"headless\",\n \"disable_security\",\n \"extra_chromium_args\",\n \"chrome_instance_path\",\n \"wss_url\",\n \"cdp_url\",\n ]\n\n for attr in browser_attrs:\n value = getattr(config.browser_config, attr, None)\n if value is not None:\n if not isinstance(value, list) or value:\n browser_config_kwargs[attr] = value\n\n self.browser = BrowserUseBrowser(BrowserConfig(**browser_config_kwargs))\n\n if self.context is None:\n context_config = BrowserContextConfig()\n\n # if there is context config in the config, use it.\n if (\n config.browser_config\n and hasattr(config.browser_config, \"new_context_config\")\n and config.browser_config.new_context_config\n ):\n context_config = config.browser_config.new_context_config\n\n self.context = await self.browser.new_context(context_config)\n self.dom_service = DomService(await self.context.get_current_page())\n\n return self.context\n\n async def execute(\n self,\n action: str,\n url: Optional[str] = None,\n index: Optional[int] = None,\n text: Optional[str] = None,\n scroll_amount: Optional[int] = None,\n tab_id: Optional[int] = None,\n query: Optional[str] = None,\n goal: Optional[str] = None,\n keys: Optional[str] = None,\n seconds: Optional[int] = None,\n **kwargs,\n ) -> ToolResult:\n \"\"\"\n Execute a specified browser action.\n\n Args:\n action: The browser action to perform\n url: URL for navigation or new tab\n index: Element index for click or input actions\n text: Text for input action or search query\n scroll_amount: Pixels to scroll for scroll action\n tab_id: Tab ID for switch_tab action\n query: Search query for Google search\n goal: Extraction goal for content extraction\n keys: Keys to send for keyboard actions\n seconds: Seconds to wait\n **kwargs: Additional arguments\n\n Returns:\n ToolResult with the action's output or error\n \"\"\"\n async with self.lock:\n try:\n context = await self._ensure_browser_initialized()\n\n # Get max content length from config\n max_content_length = getattr(\n config.browser_config, \"max_content_length\", 2000\n )\n\n # Navigation actions\n if action == \"go_to_url\":\n if not url:\n return ToolResult(\n error=\"URL is required for 'go_to_url' action\"\n )\n page = await context.get_current_page()\n await page.goto(url)\n await page.wait_for_load_state()\n return ToolResult(output=f\"Navigated to {url}\")\n\n elif action == \"go_back\":\n await context.go_back()\n return ToolResult(output=\"Navigated back\")\n\n elif action == \"refresh\":\n await context.refresh_page()\n return ToolResult(output=\"Refreshed current page\")\n\n elif action == \"web_search\":\n if not query:\n return ToolResult(\n error=\"Query is required for 'web_search' action\"\n )\n # Execute the web search and return results directly without browser navigation\n search_response = await self.web_search_tool.execute(\n query=query, fetch_content=True, num_results=1\n )\n # Navigate to the first search result\n first_search_result = search_response.results[0]\n url_to_navigate = first_search_result.url\n\n page = await context.get_current_page()\n await page.goto(url_to_navigate)\n await page.wait_for_load_state()\n\n return search_response\n\n # Element interaction actions\n elif action == \"click_element\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'click_element' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n download_path = await context._click_element_node(element)\n output = f\"Clicked element at index {index}\"\n if download_path:\n output += f\" - Downloaded file to {download_path}\"\n return ToolResult(output=output)\n\n elif action == \"input_text\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'input_text' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n await context._input_text_element_node(element, text)\n return ToolResult(\n output=f\"Input '{text}' into element at index {index}\"\n )\n\n elif action == \"scroll_down\" or action == \"scroll_up\":\n direction = 1 if action == \"scroll_down\" else -1\n amount = (\n scroll_amount\n if scroll_amount is not None\n else context.config.browser_window_size[\"height\"]\n )\n await context.execute_javascript(\n f\"window.scrollBy(0, {direction * amount});\"\n )\n return ToolResult(\n output=f\"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels\"\n )\n\n elif action == \"scroll_to_text\":\n if not text:\n return ToolResult(\n error=\"Text is required for 'scroll_to_text' action\"\n )\n page = await context.get_current_page()\n try:\n locator = page.get_by_text(text, exact=False)\n await locator.scroll_into_view_if_needed()\n return ToolResult(output=f\"Scrolled to text: '{text}'\")\n except Exception as e:\n return ToolResult(error=f\"Failed to scroll to text: {str(e)}\")\n\n elif action == \"send_keys\":\n if not keys:\n return ToolResult(\n error=\"Keys are required for 'send_keys' action\"\n )\n page = await context.get_current_page()\n await page.keyboard.press(keys)\n return ToolResult(output=f\"Sent keys: {keys}\")\n\n elif action == \"get_dropdown_options\":\n if index is None:\n return ToolResult(\n error=\"Index is required for 'get_dropdown_options' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n options = await page.evaluate(\n \"\"\"\n (xpath) => {\n const select = document.evaluate(xpath, document, null,\n XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;\n if (!select) return null;\n return Array.from(select.options).map(opt => ({\n text: opt.text,\n value: opt.value,\n index: opt.index\n }));\n }\n \"\"\",\n element.xpath,\n )\n return ToolResult(output=f\"Dropdown options: {options}\")\n\n elif action == \"select_dropdown_option\":\n if index is None or not text:\n return ToolResult(\n error=\"Index and text are required for 'select_dropdown_option' action\"\n )\n element = await context.get_dom_element_by_index(index)\n if not element:\n return ToolResult(error=f\"Element with index {index} not found\")\n page = await context.get_current_page()\n await page.select_option(element.xpath, label=text)\n return ToolResult(\n output=f\"Selected option '{text}' from dropdown at index {index}\"\n )\n\n # Content extraction actions\n elif action == \"extract_content\":\n if not goal:\n return ToolResult(\n error=\"Goal is required for 'extract_content' action\"\n )\n\n page = await context.get_current_page()\n import markdownify\n\n content = markdownify.markdownify(await page.content())\n\n prompt = f\"\"\"\\\nYour task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format.\nExtraction goal: {goal}\n\nPage content:\n{content[:max_content_length]}\n\"\"\"\n messages = [{\"role\": \"system\", \"content\": prompt}]\n\n # Define extraction function schema\n extraction_function = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"extract_content\",\n \"description\": \"Extract specific information from a webpage based on a goal\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"extracted_content\": {\n \"type\": \"object\",\n \"description\": \"The content extracted from the page according to the goal\",\n \"properties\": {\n \"text\": {\n \"type\": \"string\",\n \"description\": \"Text content extracted from the page\",\n },\n \"metadata\": {\n \"type\": \"object\",\n \"description\": \"Additional metadata about the extracted content\",\n \"properties\": {\n \"source\": {\n \"type\": \"string\",\n \"description\": \"Source of the extracted content\",\n }\n },\n },\n },\n }\n },\n \"required\": [\"extracted_content\"],\n },\n },\n }\n\n # Use LLM to extract content with required function calling\n response = await self.llm.ask_tool(\n messages,\n tools=[extraction_function],\n tool_choice=\"required\",\n )\n\n if response and response.tool_calls:\n args = json.loads(response.tool_calls[0].function.arguments)\n extracted_content = args.get(\"extracted_content\", {})\n return ToolResult(\n output=f\"Extracted from page:\\n{extracted_content}\\n\"\n )\n\n return ToolResult(output=\"No content was extracted from the page.\")\n\n # Tab management actions\n elif action == \"switch_tab\":\n if tab_id is None:\n return ToolResult(\n error=\"Tab ID is required for 'switch_tab' action\"\n )\n await context.switch_to_tab(tab_id)\n page = await context.get_current_page()\n await page.wait_for_load_state()\n return ToolResult(output=f\"Switched to tab {tab_id}\")\n\n elif action == \"open_tab\":\n if not url:\n return ToolResult(error=\"URL is required for 'open_tab' action\")\n await context.create_new_tab(url)\n return ToolResult(output=f\"Opened new tab with {url}\")\n\n elif action == \"close_tab\":\n await context.close_current_tab()\n return ToolResult(output=\"Closed current tab\")\n\n # Utility actions\n elif action == \"wait\":\n seconds_to_wait = seconds if seconds is not None else 3\n await asyncio.sleep(seconds_to_wait)\n return ToolResult(output=f\"Waited for {seconds_to_wait} seconds\")\n\n else:\n return ToolResult(error=f\"Unknown action: {action}\")\n\n except Exception as e:\n return ToolResult(error=f\"Browser action '{action}' failed: {str(e)}\")\n\n async def get_current_state(\n self, context: Optional[BrowserContext] = None\n ) -> ToolResult:\n \"\"\"\n Get the current browser state as a ToolResult.\n If context is not provided, uses self.context.\n \"\"\"\n try:\n # Use provided context or fall back to self.context\n ctx = context or self.context\n if not ctx:\n return ToolResult(error=\"Browser context not initialized\")\n\n state = await ctx.get_state()\n\n # Create a viewport_info dictionary if it doesn't exist\n viewport_height = 0\n if hasattr(state, \"viewport_info\") and state.viewport_info:\n viewport_height = state.viewport_info.height\n elif hasattr(ctx, \"config\") and hasattr(ctx.config, \"browser_window_size\"):\n viewport_height = ctx.config.browser_window_size.get(\"height\", 0)\n\n # Take a screenshot for the state\n page = await ctx.get_current_page()\n\n await page.bring_to_front()\n await page.wait_for_load_state()\n\n screenshot = await page.screenshot(\n full_page=True, animations=\"disabled\", type=\"jpeg\", quality=100\n )\n\n screenshot = base64.b64encode(screenshot).decode(\"utf-8\")\n\n # Build the state info with all required fields\n state_info = {\n \"url\": state.url,\n \"title\": state.title,\n \"tabs\": [tab.model_dump() for tab in state.tabs],\n \"help\": \"[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.\",\n \"interactive_elements\": (\n state.element_tree.clickable_elements_to_string()\n if state.element_tree\n else \"\"\n ),\n \"scroll_info\": {\n \"pixels_above\": getattr(state, \"pixels_above\", 0),\n \"pixels_below\": getattr(state, \"pixels_below\", 0),\n \"total_height\": getattr(state, \"pixels_above\", 0)\n + getattr(state, \"pixels_below\", 0)\n + viewport_height,\n },\n \"viewport_height\": viewport_height,\n }\n\n return ToolResult(\n output=json.dumps(state_info, indent=4, ensure_ascii=False),\n base64_image=screenshot,\n )\n except Exception as e:\n return ToolResult(error=f\"Failed to get browser state: {str(e)}\")\n\n async def cleanup(self):\n \"\"\"Clean up browser resources.\"\"\"\n async with self.lock:\n if self.context is not None:\n await self.context.close()\n self.context = None\n self.dom_service = None\n if self.browser is not None:\n await self.browser.close()\n self.browser = None\n\n def __del__(self):\n \"\"\"Ensure cleanup when object is destroyed.\"\"\"\n if self.browser is not None or self.context is not None:\n try:\n asyncio.run(self.cleanup())\n except RuntimeError:\n loop = asyncio.new_event_loop()\n loop.run_until_complete(self.cleanup())\n loop.close()\n\n @classmethod\n def create_with_context(cls, context: Context) -> \"BrowserUseTool[Context]\":\n \"\"\"Factory method to create a BrowserUseTool with a specific context.\"\"\"\n tool = cls()\n tool.tool_context = context\n return tool\n"], ["/OpenManus/app/mcp/server.py", "import logging\nimport sys\n\n\nlogging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)])\n\nimport argparse\nimport asyncio\nimport atexit\nimport json\nfrom inspect import Parameter, Signature\nfrom typing import Any, Dict, Optional\n\nfrom mcp.server.fastmcp import FastMCP\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\n\n\nclass MCPServer:\n \"\"\"MCP Server implementation with tool registration and management.\"\"\"\n\n def __init__(self, name: str = \"openmanus\"):\n self.server = FastMCP(name)\n self.tools: Dict[str, BaseTool] = {}\n\n # Initialize standard tools\n self.tools[\"bash\"] = Bash()\n self.tools[\"browser\"] = BrowserUseTool()\n self.tools[\"editor\"] = StrReplaceEditor()\n self.tools[\"terminate\"] = Terminate()\n\n def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None:\n \"\"\"Register a tool with parameter validation and documentation.\"\"\"\n tool_name = method_name or tool.name\n tool_param = tool.to_param()\n tool_function = tool_param[\"function\"]\n\n # Define the async function to be registered\n async def tool_method(**kwargs):\n logger.info(f\"Executing {tool_name}: {kwargs}\")\n result = await tool.execute(**kwargs)\n\n logger.info(f\"Result of {tool_name}: {result}\")\n\n # Handle different types of results (match original logic)\n if hasattr(result, \"model_dump\"):\n return json.dumps(result.model_dump())\n elif isinstance(result, dict):\n return json.dumps(result)\n return result\n\n # Set method metadata\n tool_method.__name__ = tool_name\n tool_method.__doc__ = self._build_docstring(tool_function)\n tool_method.__signature__ = self._build_signature(tool_function)\n\n # Store parameter schema (important for tools that access it programmatically)\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n tool_method._parameter_schema = {\n param_name: {\n \"description\": param_details.get(\"description\", \"\"),\n \"type\": param_details.get(\"type\", \"any\"),\n \"required\": param_name in required_params,\n }\n for param_name, param_details in param_props.items()\n }\n\n # Register with server\n self.server.tool()(tool_method)\n logger.info(f\"Registered tool: {tool_name}\")\n\n def _build_docstring(self, tool_function: dict) -> str:\n \"\"\"Build a formatted docstring from tool function metadata.\"\"\"\n description = tool_function.get(\"description\", \"\")\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n # Build docstring (match original format)\n docstring = description\n if param_props:\n docstring += \"\\n\\nParameters:\\n\"\n for param_name, param_details in param_props.items():\n required_str = (\n \"(required)\" if param_name in required_params else \"(optional)\"\n )\n param_type = param_details.get(\"type\", \"any\")\n param_desc = param_details.get(\"description\", \"\")\n docstring += (\n f\" {param_name} ({param_type}) {required_str}: {param_desc}\\n\"\n )\n\n return docstring\n\n def _build_signature(self, tool_function: dict) -> Signature:\n \"\"\"Build a function signature from tool function metadata.\"\"\"\n param_props = tool_function.get(\"parameters\", {}).get(\"properties\", {})\n required_params = tool_function.get(\"parameters\", {}).get(\"required\", [])\n\n parameters = []\n\n # Follow original type mapping\n for param_name, param_details in param_props.items():\n param_type = param_details.get(\"type\", \"\")\n default = Parameter.empty if param_name in required_params else None\n\n # Map JSON Schema types to Python types (same as original)\n annotation = Any\n if param_type == \"string\":\n annotation = str\n elif param_type == \"integer\":\n annotation = int\n elif param_type == \"number\":\n annotation = float\n elif param_type == \"boolean\":\n annotation = bool\n elif param_type == \"object\":\n annotation = dict\n elif param_type == \"array\":\n annotation = list\n\n # Create parameter with same structure as original\n param = Parameter(\n name=param_name,\n kind=Parameter.KEYWORD_ONLY,\n default=default,\n annotation=annotation,\n )\n parameters.append(param)\n\n return Signature(parameters=parameters)\n\n async def cleanup(self) -> None:\n \"\"\"Clean up server resources.\"\"\"\n logger.info(\"Cleaning up resources\")\n # Follow original cleanup logic - only clean browser tool\n if \"browser\" in self.tools and hasattr(self.tools[\"browser\"], \"cleanup\"):\n await self.tools[\"browser\"].cleanup()\n\n def register_all_tools(self) -> None:\n \"\"\"Register all tools with the server.\"\"\"\n for tool in self.tools.values():\n self.register_tool(tool)\n\n def run(self, transport: str = \"stdio\") -> None:\n \"\"\"Run the MCP server.\"\"\"\n # Register all tools\n self.register_all_tools()\n\n # Register cleanup function (match original behavior)\n atexit.register(lambda: asyncio.run(self.cleanup()))\n\n # Start server (with same logging as original)\n logger.info(f\"Starting OpenManus server ({transport} mode)\")\n self.server.run(transport=transport)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"OpenManus MCP Server\")\n parser.add_argument(\n \"--transport\",\n choices=[\"stdio\"],\n default=\"stdio\",\n help=\"Communication method: stdio or http (default: stdio)\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/tool/crawl4ai.py", "\"\"\"\nCrawl4AI Web Crawler Tool for OpenManus\n\nThis tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents,\nproviding fast, precise, and AI-ready data extraction with clean Markdown generation.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Union\nfrom urllib.parse import urlparse\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\n\n\nclass Crawl4aiTool(BaseTool):\n \"\"\"\n Web crawler tool powered by Crawl4AI.\n\n Provides clean markdown extraction optimized for AI processing.\n \"\"\"\n\n name: str = \"crawl4ai\"\n description: str = \"\"\"Web crawler that extracts clean, AI-ready content from web pages.\n\n Features:\n - Extracts clean markdown content optimized for LLMs\n - Handles JavaScript-heavy sites and dynamic content\n - Supports multiple URLs in a single request\n - Fast and reliable with built-in error handling\n\n Perfect for content analysis, research, and feeding web content to AI models.\"\"\"\n\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"urls\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"(required) List of URLs to crawl. Can be a single URL or multiple URLs.\",\n \"minItems\": 1,\n },\n \"timeout\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Timeout in seconds for each URL. Default is 30.\",\n \"default\": 30,\n \"minimum\": 5,\n \"maximum\": 120,\n },\n \"bypass_cache\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to bypass cache and fetch fresh content. Default is false.\",\n \"default\": False,\n },\n \"word_count_threshold\": {\n \"type\": \"integer\",\n \"description\": \"(optional) Minimum word count for content blocks. Default is 10.\",\n \"default\": 10,\n \"minimum\": 1,\n },\n },\n \"required\": [\"urls\"],\n }\n\n async def execute(\n self,\n urls: Union[str, List[str]],\n timeout: int = 30,\n bypass_cache: bool = False,\n word_count_threshold: int = 10,\n ) -> ToolResult:\n \"\"\"\n Execute web crawling for the specified URLs.\n\n Args:\n urls: Single URL string or list of URLs to crawl\n timeout: Timeout in seconds for each URL\n bypass_cache: Whether to bypass cache\n word_count_threshold: Minimum word count for content blocks\n\n Returns:\n ToolResult with crawl results\n \"\"\"\n # Normalize URLs to list\n if isinstance(urls, str):\n url_list = [urls]\n else:\n url_list = urls\n\n # Validate URLs\n valid_urls = []\n for url in url_list:\n if self._is_valid_url(url):\n valid_urls.append(url)\n else:\n logger.warning(f\"Invalid URL skipped: {url}\")\n\n if not valid_urls:\n return ToolResult(error=\"No valid URLs provided\")\n\n try:\n # Import crawl4ai components\n from crawl4ai import (\n AsyncWebCrawler,\n BrowserConfig,\n CacheMode,\n CrawlerRunConfig,\n )\n\n # Configure browser settings\n browser_config = BrowserConfig(\n headless=True,\n verbose=False,\n browser_type=\"chromium\",\n ignore_https_errors=True,\n java_script_enabled=True,\n )\n\n # Configure crawler settings\n run_config = CrawlerRunConfig(\n cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED,\n word_count_threshold=word_count_threshold,\n process_iframes=True,\n remove_overlay_elements=True,\n excluded_tags=[\"script\", \"style\"],\n page_timeout=timeout * 1000, # Convert to milliseconds\n verbose=False,\n wait_until=\"domcontentloaded\",\n )\n\n results = []\n successful_count = 0\n failed_count = 0\n\n # Process each URL\n async with AsyncWebCrawler(config=browser_config) as crawler:\n for url in valid_urls:\n try:\n logger.info(f\"🕷️ Crawling URL: {url}\")\n start_time = asyncio.get_event_loop().time()\n\n result = await crawler.arun(url=url, config=run_config)\n\n end_time = asyncio.get_event_loop().time()\n execution_time = end_time - start_time\n\n if result.success:\n # Count words in markdown\n word_count = 0\n if hasattr(result, \"markdown\") and result.markdown:\n word_count = len(result.markdown.split())\n\n # Count links\n links_count = 0\n if hasattr(result, \"links\") and result.links:\n internal_links = result.links.get(\"internal\", [])\n external_links = result.links.get(\"external\", [])\n links_count = len(internal_links) + len(external_links)\n\n # Count images\n images_count = 0\n if hasattr(result, \"media\") and result.media:\n images = result.media.get(\"images\", [])\n images_count = len(images)\n\n results.append(\n {\n \"url\": url,\n \"success\": True,\n \"status_code\": getattr(result, \"status_code\", 200),\n \"title\": result.metadata.get(\"title\")\n if result.metadata\n else None,\n \"markdown\": result.markdown\n if hasattr(result, \"markdown\")\n else None,\n \"word_count\": word_count,\n \"links_count\": links_count,\n \"images_count\": images_count,\n \"execution_time\": execution_time,\n }\n )\n successful_count += 1\n logger.info(\n f\"✅ Successfully crawled {url} in {execution_time:.2f}s\"\n )\n\n else:\n results.append(\n {\n \"url\": url,\n \"success\": False,\n \"error_message\": getattr(\n result, \"error_message\", \"Unknown error\"\n ),\n \"execution_time\": execution_time,\n }\n )\n failed_count += 1\n logger.warning(f\"❌ Failed to crawl {url}\")\n\n except Exception as e:\n error_msg = f\"Error crawling {url}: {str(e)}\"\n logger.error(error_msg)\n results.append(\n {\"url\": url, \"success\": False, \"error_message\": error_msg}\n )\n failed_count += 1\n\n # Format output\n output_lines = [f\"🕷️ Crawl4AI Results Summary:\"]\n output_lines.append(f\"📊 Total URLs: {len(valid_urls)}\")\n output_lines.append(f\"✅ Successful: {successful_count}\")\n output_lines.append(f\"❌ Failed: {failed_count}\")\n output_lines.append(\"\")\n\n for i, result in enumerate(results, 1):\n output_lines.append(f\"{i}. {result['url']}\")\n\n if result[\"success\"]:\n output_lines.append(\n f\" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})\"\n )\n if result.get(\"title\"):\n output_lines.append(f\" 📄 Title: {result['title']}\")\n\n if result.get(\"markdown\"):\n # Show first 300 characters of markdown content\n content_preview = result[\"markdown\"]\n if len(result[\"markdown\"]) > 300:\n content_preview += \"...\"\n output_lines.append(f\" 📝 Content: {content_preview}\")\n\n output_lines.append(\n f\" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images\"\n )\n\n if result.get(\"execution_time\"):\n output_lines.append(\n f\" ⏱️ Time: {result['execution_time']:.2f}s\"\n )\n else:\n output_lines.append(f\" ❌ Status: Failed\")\n if result.get(\"error_message\"):\n output_lines.append(f\" 🚫 Error: {result['error_message']}\")\n\n output_lines.append(\"\")\n\n return ToolResult(output=\"\\n\".join(output_lines))\n\n except ImportError as e:\n error_msg = \"Crawl4AI is not installed. Please install it with: pip install crawl4ai\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n except Exception as e:\n error_msg = f\"Crawl4AI execution failed: {str(e)}\"\n logger.error(error_msg)\n return ToolResult(error=error_msg)\n\n def _is_valid_url(self, url: str) -> bool:\n \"\"\"Validate if a URL is properly formatted.\"\"\"\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc]) and result.scheme in [\n \"http\",\n \"https\",\n ]\n except Exception:\n return False\n"], ["/OpenManus/app/flow/planning.py", "import json\nimport time\nfrom enum import Enum\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.schema import AgentState, Message, ToolChoice\nfrom app.tool import PlanningTool\n\n\nclass PlanStepStatus(str, Enum):\n \"\"\"Enum class defining possible statuses of a plan step\"\"\"\n\n NOT_STARTED = \"not_started\"\n IN_PROGRESS = \"in_progress\"\n COMPLETED = \"completed\"\n BLOCKED = \"blocked\"\n\n @classmethod\n def get_all_statuses(cls) -> list[str]:\n \"\"\"Return a list of all possible step status values\"\"\"\n return [status.value for status in cls]\n\n @classmethod\n def get_active_statuses(cls) -> list[str]:\n \"\"\"Return a list of values representing active statuses (not started or in progress)\"\"\"\n return [cls.NOT_STARTED.value, cls.IN_PROGRESS.value]\n\n @classmethod\n def get_status_marks(cls) -> Dict[str, str]:\n \"\"\"Return a mapping of statuses to their marker symbols\"\"\"\n return {\n cls.COMPLETED.value: \"[✓]\",\n cls.IN_PROGRESS.value: \"[→]\",\n cls.BLOCKED.value: \"[!]\",\n cls.NOT_STARTED.value: \"[ ]\",\n }\n\n\nclass PlanningFlow(BaseFlow):\n \"\"\"A flow that manages planning and execution of tasks using agents.\"\"\"\n\n llm: LLM = Field(default_factory=lambda: LLM())\n planning_tool: PlanningTool = Field(default_factory=PlanningTool)\n executor_keys: List[str] = Field(default_factory=list)\n active_plan_id: str = Field(default_factory=lambda: f\"plan_{int(time.time())}\")\n current_step_index: Optional[int] = None\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Set executor keys before super().__init__\n if \"executors\" in data:\n data[\"executor_keys\"] = data.pop(\"executors\")\n\n # Set plan ID if provided\n if \"plan_id\" in data:\n data[\"active_plan_id\"] = data.pop(\"plan_id\")\n\n # Initialize the planning tool if not provided\n if \"planning_tool\" not in data:\n planning_tool = PlanningTool()\n data[\"planning_tool\"] = planning_tool\n\n # Call parent's init with the processed data\n super().__init__(agents, **data)\n\n # Set executor_keys to all agent keys if not specified\n if not self.executor_keys:\n self.executor_keys = list(self.agents.keys())\n\n def get_executor(self, step_type: Optional[str] = None) -> BaseAgent:\n \"\"\"\n Get an appropriate executor agent for the current step.\n Can be extended to select agents based on step type/requirements.\n \"\"\"\n # If step type is provided and matches an agent key, use that agent\n if step_type and step_type in self.agents:\n return self.agents[step_type]\n\n # Otherwise use the first available executor or fall back to primary agent\n for key in self.executor_keys:\n if key in self.agents:\n return self.agents[key]\n\n # Fallback to primary agent\n return self.primary_agent\n\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the planning flow with agents.\"\"\"\n try:\n if not self.primary_agent:\n raise ValueError(\"No primary agent available\")\n\n # Create initial plan if input provided\n if input_text:\n await self._create_initial_plan(input_text)\n\n # Verify plan was created successfully\n if self.active_plan_id not in self.planning_tool.plans:\n logger.error(\n f\"Plan creation failed. Plan ID {self.active_plan_id} not found in planning tool.\"\n )\n return f\"Failed to create plan for: {input_text}\"\n\n result = \"\"\n while True:\n # Get current step to execute\n self.current_step_index, step_info = await self._get_current_step_info()\n\n # Exit if no more steps or plan completed\n if self.current_step_index is None:\n result += await self._finalize_plan()\n break\n\n # Execute current step with appropriate agent\n step_type = step_info.get(\"type\") if step_info else None\n executor = self.get_executor(step_type)\n step_result = await self._execute_step(executor, step_info)\n result += step_result + \"\\n\"\n\n # Check if agent wants to terminate\n if hasattr(executor, \"state\") and executor.state == AgentState.FINISHED:\n break\n\n return result\n except Exception as e:\n logger.error(f\"Error in PlanningFlow: {str(e)}\")\n return f\"Execution failed: {str(e)}\"\n\n async def _create_initial_plan(self, request: str) -> None:\n \"\"\"Create an initial plan based on the request using the flow's LLM and PlanningTool.\"\"\"\n logger.info(f\"Creating initial plan with ID: {self.active_plan_id}\")\n\n system_message_content = (\n \"You are a planning assistant. Create a concise, actionable plan with clear steps. \"\n \"Focus on key milestones rather than detailed sub-steps. \"\n \"Optimize for clarity and efficiency.\"\n )\n agents_description = []\n for key in self.executor_keys:\n if key in self.agents:\n agents_description.append(\n {\n \"name\": key.upper(),\n \"description\": self.agents[key].description,\n }\n )\n if len(agents_description) > 1:\n # Add description of agents to select\n system_message_content += (\n f\"\\nNow we have {agents_description} agents. \"\n f\"The infomation of them are below: {json.dumps(agents_description)}\\n\"\n \"When creating steps in the planning tool, please specify the agent names using the format '[agent_name]'.\"\n )\n\n # Create a system message for plan creation\n system_message = Message.system_message(system_message_content)\n\n # Create a user message with the request\n user_message = Message.user_message(\n f\"Create a reasonable plan with clear steps to accomplish the task: {request}\"\n )\n\n # Call LLM with PlanningTool\n response = await self.llm.ask_tool(\n messages=[user_message],\n system_msgs=[system_message],\n tools=[self.planning_tool.to_param()],\n tool_choice=ToolChoice.AUTO,\n )\n\n # Process tool calls if present\n if response.tool_calls:\n for tool_call in response.tool_calls:\n if tool_call.function.name == \"planning\":\n # Parse the arguments\n args = tool_call.function.arguments\n if isinstance(args, str):\n try:\n args = json.loads(args)\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse tool arguments: {args}\")\n continue\n\n # Ensure plan_id is set correctly and execute the tool\n args[\"plan_id\"] = self.active_plan_id\n\n # Execute the tool via ToolCollection instead of directly\n result = await self.planning_tool.execute(**args)\n\n logger.info(f\"Plan creation result: {str(result)}\")\n return\n\n # If execution reached here, create a default plan\n logger.warning(\"Creating default plan\")\n\n # Create default plan using the ToolCollection\n await self.planning_tool.execute(\n **{\n \"command\": \"create\",\n \"plan_id\": self.active_plan_id,\n \"title\": f\"Plan for: {request[:50]}{'...' if len(request) > 50 else ''}\",\n \"steps\": [\"Analyze request\", \"Execute task\", \"Verify results\"],\n }\n )\n\n async def _get_current_step_info(self) -> tuple[Optional[int], Optional[dict]]:\n \"\"\"\n Parse the current plan to identify the first non-completed step's index and info.\n Returns (None, None) if no active step is found.\n \"\"\"\n if (\n not self.active_plan_id\n or self.active_plan_id not in self.planning_tool.plans\n ):\n logger.error(f\"Plan with ID {self.active_plan_id} not found\")\n return None, None\n\n try:\n # Direct access to plan data from planning tool storage\n plan_data = self.planning_tool.plans[self.active_plan_id]\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Find first non-completed step\n for i, step in enumerate(steps):\n if i >= len(step_statuses):\n status = PlanStepStatus.NOT_STARTED.value\n else:\n status = step_statuses[i]\n\n if status in PlanStepStatus.get_active_statuses():\n # Extract step type/category if available\n step_info = {\"text\": step}\n\n # Try to extract step type from the text (e.g., [SEARCH] or [CODE])\n import re\n\n type_match = re.search(r\"\\[([A-Z_]+)\\]\", step)\n if type_match:\n step_info[\"type\"] = type_match.group(1).lower()\n\n # Mark current step as in_progress\n try:\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=i,\n step_status=PlanStepStatus.IN_PROGRESS.value,\n )\n except Exception as e:\n logger.warning(f\"Error marking step as in_progress: {e}\")\n # Update step status directly if needed\n if i < len(step_statuses):\n step_statuses[i] = PlanStepStatus.IN_PROGRESS.value\n else:\n while len(step_statuses) < i:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n step_statuses.append(PlanStepStatus.IN_PROGRESS.value)\n\n plan_data[\"step_statuses\"] = step_statuses\n\n return i, step_info\n\n return None, None # No active step found\n\n except Exception as e:\n logger.warning(f\"Error finding current step index: {e}\")\n return None, None\n\n async def _execute_step(self, executor: BaseAgent, step_info: dict) -> str:\n \"\"\"Execute the current step with the specified agent using agent.run().\"\"\"\n # Prepare context for the agent with current plan status\n plan_status = await self._get_plan_text()\n step_text = step_info.get(\"text\", f\"Step {self.current_step_index}\")\n\n # Create a prompt for the agent to execute the current step\n step_prompt = f\"\"\"\n CURRENT PLAN STATUS:\n {plan_status}\n\n YOUR CURRENT TASK:\n You are now working on step {self.current_step_index}: \"{step_text}\"\n\n Please only execute this current step using the appropriate tools. When you're done, provide a summary of what you accomplished.\n \"\"\"\n\n # Use agent.run() to execute the step\n try:\n step_result = await executor.run(step_prompt)\n\n # Mark the step as completed after successful execution\n await self._mark_step_completed()\n\n return step_result\n except Exception as e:\n logger.error(f\"Error executing step {self.current_step_index}: {e}\")\n return f\"Error executing step {self.current_step_index}: {str(e)}\"\n\n async def _mark_step_completed(self) -> None:\n \"\"\"Mark the current step as completed.\"\"\"\n if self.current_step_index is None:\n return\n\n try:\n # Mark the step as completed\n await self.planning_tool.execute(\n command=\"mark_step\",\n plan_id=self.active_plan_id,\n step_index=self.current_step_index,\n step_status=PlanStepStatus.COMPLETED.value,\n )\n logger.info(\n f\"Marked step {self.current_step_index} as completed in plan {self.active_plan_id}\"\n )\n except Exception as e:\n logger.warning(f\"Failed to update plan status: {e}\")\n # Update step status directly in planning tool storage\n if self.active_plan_id in self.planning_tool.plans:\n plan_data = self.planning_tool.plans[self.active_plan_id]\n step_statuses = plan_data.get(\"step_statuses\", [])\n\n # Ensure the step_statuses list is long enough\n while len(step_statuses) <= self.current_step_index:\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n\n # Update the status\n step_statuses[self.current_step_index] = PlanStepStatus.COMPLETED.value\n plan_data[\"step_statuses\"] = step_statuses\n\n async def _get_plan_text(self) -> str:\n \"\"\"Get the current plan as formatted text.\"\"\"\n try:\n result = await self.planning_tool.execute(\n command=\"get\", plan_id=self.active_plan_id\n )\n return result.output if hasattr(result, \"output\") else str(result)\n except Exception as e:\n logger.error(f\"Error getting plan: {e}\")\n return self._generate_plan_text_from_storage()\n\n def _generate_plan_text_from_storage(self) -> str:\n \"\"\"Generate plan text directly from storage if the planning tool fails.\"\"\"\n try:\n if self.active_plan_id not in self.planning_tool.plans:\n return f\"Error: Plan with ID {self.active_plan_id} not found\"\n\n plan_data = self.planning_tool.plans[self.active_plan_id]\n title = plan_data.get(\"title\", \"Untitled Plan\")\n steps = plan_data.get(\"steps\", [])\n step_statuses = plan_data.get(\"step_statuses\", [])\n step_notes = plan_data.get(\"step_notes\", [])\n\n # Ensure step_statuses and step_notes match the number of steps\n while len(step_statuses) < len(steps):\n step_statuses.append(PlanStepStatus.NOT_STARTED.value)\n while len(step_notes) < len(steps):\n step_notes.append(\"\")\n\n # Count steps by status\n status_counts = {status: 0 for status in PlanStepStatus.get_all_statuses()}\n\n for status in step_statuses:\n if status in status_counts:\n status_counts[status] += 1\n\n completed = status_counts[PlanStepStatus.COMPLETED.value]\n total = len(steps)\n progress = (completed / total) * 100 if total > 0 else 0\n\n plan_text = f\"Plan: {title} (ID: {self.active_plan_id})\\n\"\n plan_text += \"=\" * len(plan_text) + \"\\n\\n\"\n\n plan_text += (\n f\"Progress: {completed}/{total} steps completed ({progress:.1f}%)\\n\"\n )\n plan_text += f\"Status: {status_counts[PlanStepStatus.COMPLETED.value]} completed, {status_counts[PlanStepStatus.IN_PROGRESS.value]} in progress, \"\n plan_text += f\"{status_counts[PlanStepStatus.BLOCKED.value]} blocked, {status_counts[PlanStepStatus.NOT_STARTED.value]} not started\\n\\n\"\n plan_text += \"Steps:\\n\"\n\n status_marks = PlanStepStatus.get_status_marks()\n\n for i, (step, status, notes) in enumerate(\n zip(steps, step_statuses, step_notes)\n ):\n # Use status marks to indicate step status\n status_mark = status_marks.get(\n status, status_marks[PlanStepStatus.NOT_STARTED.value]\n )\n\n plan_text += f\"{i}. {status_mark} {step}\\n\"\n if notes:\n plan_text += f\" Notes: {notes}\\n\"\n\n return plan_text\n except Exception as e:\n logger.error(f\"Error generating plan text from storage: {e}\")\n return f\"Error: Unable to retrieve plan with ID {self.active_plan_id}\"\n\n async def _finalize_plan(self) -> str:\n \"\"\"Finalize the plan and provide a summary using the flow's LLM directly.\"\"\"\n plan_text = await self._get_plan_text()\n\n # Create a summary using the flow's LLM directly\n try:\n system_message = Message.system_message(\n \"You are a planning assistant. Your task is to summarize the completed plan.\"\n )\n\n user_message = Message.user_message(\n f\"The plan has been completed. Here is the final plan status:\\n\\n{plan_text}\\n\\nPlease provide a summary of what was accomplished and any final thoughts.\"\n )\n\n response = await self.llm.ask(\n messages=[user_message], system_msgs=[system_message]\n )\n\n return f\"Plan completed:\\n\\n{response}\"\n except Exception as e:\n logger.error(f\"Error finalizing plan with LLM: {e}\")\n\n # Fallback to using an agent for the summary\n try:\n agent = self.primary_agent\n summary_prompt = f\"\"\"\n The plan has been completed. Here is the final plan status:\n\n {plan_text}\n\n Please provide a summary of what was accomplished and any final thoughts.\n \"\"\"\n summary = await agent.run(summary_prompt)\n return f\"Plan completed:\\n\\n{summary}\"\n except Exception as e2:\n logger.error(f\"Error finalizing plan with agent: {e2}\")\n return \"Plan completed. Error generating summary.\"\n"], ["/OpenManus/app/tool/chart_visualization/data_visualization.py", "import asyncio\nimport json\nimport os\nfrom typing import Any, Hashable\n\nimport pandas as pd\nfrom pydantic import Field, model_validator\n\nfrom app.config import config\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.tool.base import BaseTool\n\n\nclass DataVisualization(BaseTool):\n name: str = \"data_visualization\"\n description: str = \"\"\"Visualize statistical chart or Add insights in chart with JSON info from visualization_preparation tool. You can do steps as follows:\n1. Visualize statistical chart\n2. Choose insights into chart based on step 1 (Optional)\nOutputs:\n1. Charts (png/html)\n2. Charts Insights (.md)(Optional)\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"json_path\": {\n \"type\": \"string\",\n \"description\": \"\"\"file path of json info with \".json\" in the end\"\"\",\n },\n \"output_type\": {\n \"description\": \"Rendering format (html=interactive)\",\n \"type\": \"string\",\n \"default\": \"html\",\n \"enum\": [\"png\", \"html\"],\n },\n \"tool_type\": {\n \"description\": \"visualize chart or add insights\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"language\": {\n \"description\": \"english(en) / chinese(zh)\",\n \"type\": \"string\",\n \"default\": \"en\",\n \"enum\": [\"zh\", \"en\"],\n },\n },\n \"required\": [\"code\"],\n }\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n\n @model_validator(mode=\"after\")\n def initialize_llm(self):\n \"\"\"Initialize llm with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n return self\n\n def get_file_path(\n self,\n json_info: list[dict[str, str]],\n path_str: str,\n directory: str = None,\n ) -> list[str]:\n res = []\n for item in json_info:\n if os.path.exists(item[path_str]):\n res.append(item[path_str])\n elif os.path.exists(\n os.path.join(f\"{directory or config.workspace_root}\", item[path_str])\n ):\n res.append(\n os.path.join(\n f\"{directory or config.workspace_root}\", item[path_str]\n )\n )\n else:\n raise Exception(f\"No such file or directory: {item[path_str]}\")\n return res\n\n def success_output_template(self, result: list[dict[str, str]]) -> str:\n content = \"\"\n if len(result) == 0:\n return \"Is EMPTY!\"\n for item in result:\n content += f\"\"\"## {item['title']}\\nChart saved in: {item['chart_path']}\"\"\"\n if \"insight_path\" in item and item[\"insight_path\"] and \"insight_md\" in item:\n content += \"\\n\" + item[\"insight_md\"]\n else:\n content += \"\\n\"\n return f\"Chart Generated Successful!\\n{content}\"\n\n async def data_visualization(\n self, json_info: list[dict[str, str]], output_type: str, language: str\n ) -> str:\n data_list = []\n csv_file_path = self.get_file_path(json_info, \"csvFilePath\")\n for index, item in enumerate(json_info):\n df = pd.read_csv(csv_file_path[index], encoding=\"utf-8\")\n df = df.astype(object)\n df = df.where(pd.notnull(df), None)\n data_dict_list = df.to_json(orient=\"records\", force_ascii=False)\n\n data_list.append(\n {\n \"file_name\": os.path.basename(csv_file_path[index]).replace(\n \".csv\", \"\"\n ),\n \"dict_data\": data_dict_list,\n \"chartTitle\": item[\"chartTitle\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n dict_data=item[\"dict_data\"],\n chart_description=item[\"chartTitle\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"visualization\",\n language=language,\n )\n for item in data_list\n ]\n\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n csv_path = csv_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {csv_path}: {result['error']}\")\n else:\n success_list.append(\n {\n **result,\n \"title\": json_info[index][\"chartTitle\"],\n }\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error chart generated{'\\n'.join(error_list)}\\n{self.success_output_template(success_list)}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{self.success_output_template(success_list)}\"}\n\n async def add_insighs(\n self, json_info: list[dict[str, str]], output_type: str\n ) -> str:\n data_list = []\n chart_file_path = self.get_file_path(\n json_info, \"chartPath\", os.path.join(config.workspace_root, \"visualization\")\n )\n for index, item in enumerate(json_info):\n if \"insights_id\" in item:\n data_list.append(\n {\n \"file_name\": os.path.basename(chart_file_path[index]).replace(\n f\".{output_type}\", \"\"\n ),\n \"insights_id\": item[\"insights_id\"],\n }\n )\n tasks = [\n self.invoke_vmind(\n insights_id=item[\"insights_id\"],\n file_name=item[\"file_name\"],\n output_type=output_type,\n task_type=\"insight\",\n )\n for item in data_list\n ]\n results = await asyncio.gather(*tasks)\n error_list = []\n success_list = []\n for index, result in enumerate(results):\n chart_path = chart_file_path[index]\n if \"error\" in result and \"chart_path\" not in result:\n error_list.append(f\"Error in {chart_path}: {result['error']}\")\n else:\n success_list.append(chart_path)\n success_template = (\n f\"# Charts Update with Insights\\n{','.join(success_list)}\"\n if len(success_list) > 0\n else \"\"\n )\n if len(error_list) > 0:\n return {\n \"observation\": f\"# Error in chart insights:{'\\n'.join(error_list)}\\n{success_template}\",\n \"success\": False,\n }\n else:\n return {\"observation\": f\"{success_template}\"}\n\n async def execute(\n self,\n json_path: str,\n output_type: str | None = \"html\",\n tool_type: str | None = \"visualization\",\n language: str | None = \"en\",\n ) -> str:\n try:\n logger.info(f\"📈 data_visualization with {json_path} in: {tool_type} \")\n with open(json_path, \"r\", encoding=\"utf-8\") as file:\n json_info = json.load(file)\n if tool_type == \"visualization\":\n return await self.data_visualization(json_info, output_type, language)\n else:\n return await self.add_insighs(json_info, output_type)\n except Exception as e:\n return {\n \"observation\": f\"Error: {e}\",\n \"success\": False,\n }\n\n async def invoke_vmind(\n self,\n file_name: str,\n output_type: str,\n task_type: str,\n insights_id: list[str] = None,\n dict_data: list[dict[Hashable, Any]] = None,\n chart_description: str = None,\n language: str = \"en\",\n ):\n llm_config = {\n \"base_url\": self.llm.base_url,\n \"model\": self.llm.model,\n \"api_key\": self.llm.api_key,\n }\n vmind_params = {\n \"llm_config\": llm_config,\n \"user_prompt\": chart_description,\n \"dataset\": dict_data,\n \"file_name\": file_name,\n \"output_type\": output_type,\n \"insights_id\": insights_id,\n \"task_type\": task_type,\n \"directory\": str(config.workspace_root),\n \"language\": language,\n }\n # build async sub process\n process = await asyncio.create_subprocess_exec(\n \"npx\",\n \"ts-node\",\n \"src/chartVisualize.ts\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n cwd=os.path.dirname(__file__),\n )\n input_json = json.dumps(vmind_params, ensure_ascii=False).encode(\"utf-8\")\n try:\n stdout, stderr = await process.communicate(input_json)\n stdout_str = stdout.decode(\"utf-8\")\n stderr_str = stderr.decode(\"utf-8\")\n if process.returncode == 0:\n return json.loads(stdout_str)\n else:\n return {\"error\": f\"Node.js Error: {stderr_str}\"}\n except Exception as e:\n return {\"error\": f\"Subprocess Error: {str(e)}\"}\n"], ["/OpenManus/app/config.py", "import json\nimport threading\nimport tomllib\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\ndef get_project_root() -> Path:\n \"\"\"Get the project root directory\"\"\"\n return Path(__file__).resolve().parent.parent\n\n\nPROJECT_ROOT = get_project_root()\nWORKSPACE_ROOT = PROJECT_ROOT / \"workspace\"\n\n\nclass LLMSettings(BaseModel):\n model: str = Field(..., description=\"Model name\")\n base_url: str = Field(..., description=\"API base URL\")\n api_key: str = Field(..., description=\"API key\")\n max_tokens: int = Field(4096, description=\"Maximum number of tokens per request\")\n max_input_tokens: Optional[int] = Field(\n None,\n description=\"Maximum input tokens to use across all requests (None for unlimited)\",\n )\n temperature: float = Field(1.0, description=\"Sampling temperature\")\n api_type: str = Field(..., description=\"Azure, Openai, or Ollama\")\n api_version: str = Field(..., description=\"Azure Openai version if AzureOpenai\")\n\n\nclass ProxySettings(BaseModel):\n server: str = Field(None, description=\"Proxy server address\")\n username: Optional[str] = Field(None, description=\"Proxy username\")\n password: Optional[str] = Field(None, description=\"Proxy password\")\n\n\nclass SearchSettings(BaseModel):\n engine: str = Field(default=\"Google\", description=\"Search engine the llm to use\")\n fallback_engines: List[str] = Field(\n default_factory=lambda: [\"DuckDuckGo\", \"Baidu\", \"Bing\"],\n description=\"Fallback search engines to try if the primary engine fails\",\n )\n retry_delay: int = Field(\n default=60,\n description=\"Seconds to wait before retrying all engines again after they all fail\",\n )\n max_retries: int = Field(\n default=3,\n description=\"Maximum number of times to retry all engines when all fail\",\n )\n lang: str = Field(\n default=\"en\",\n description=\"Language code for search results (e.g., en, zh, fr)\",\n )\n country: str = Field(\n default=\"us\",\n description=\"Country code for search results (e.g., us, cn, uk)\",\n )\n\n\nclass RunflowSettings(BaseModel):\n use_data_analysis_agent: bool = Field(\n default=False, description=\"Enable data analysis agent in run flow\"\n )\n\n\nclass BrowserSettings(BaseModel):\n headless: bool = Field(False, description=\"Whether to run browser in headless mode\")\n disable_security: bool = Field(\n True, description=\"Disable browser security features\"\n )\n extra_chromium_args: List[str] = Field(\n default_factory=list, description=\"Extra arguments to pass to the browser\"\n )\n chrome_instance_path: Optional[str] = Field(\n None, description=\"Path to a Chrome instance to use\"\n )\n wss_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via WebSocket\"\n )\n cdp_url: Optional[str] = Field(\n None, description=\"Connect to a browser instance via CDP\"\n )\n proxy: Optional[ProxySettings] = Field(\n None, description=\"Proxy settings for the browser\"\n )\n max_content_length: int = Field(\n 2000, description=\"Maximum length for content retrieval operations\"\n )\n\n\nclass SandboxSettings(BaseModel):\n \"\"\"Configuration for the execution sandbox\"\"\"\n\n use_sandbox: bool = Field(False, description=\"Whether to use the sandbox\")\n image: str = Field(\"python:3.12-slim\", description=\"Base image\")\n work_dir: str = Field(\"/workspace\", description=\"Container working directory\")\n memory_limit: str = Field(\"512m\", description=\"Memory limit\")\n cpu_limit: float = Field(1.0, description=\"CPU limit\")\n timeout: int = Field(300, description=\"Default command timeout (seconds)\")\n network_enabled: bool = Field(\n False, description=\"Whether network access is allowed\"\n )\n\n\nclass MCPServerConfig(BaseModel):\n \"\"\"Configuration for a single MCP server\"\"\"\n\n type: str = Field(..., description=\"Server connection type (sse or stdio)\")\n url: Optional[str] = Field(None, description=\"Server URL for SSE connections\")\n command: Optional[str] = Field(None, description=\"Command for stdio connections\")\n args: List[str] = Field(\n default_factory=list, description=\"Arguments for stdio command\"\n )\n\n\nclass MCPSettings(BaseModel):\n \"\"\"Configuration for MCP (Model Context Protocol)\"\"\"\n\n server_reference: str = Field(\n \"app.mcp.server\", description=\"Module reference for the MCP server\"\n )\n servers: Dict[str, MCPServerConfig] = Field(\n default_factory=dict, description=\"MCP server configurations\"\n )\n\n @classmethod\n def load_server_config(cls) -> Dict[str, MCPServerConfig]:\n \"\"\"Load MCP server configuration from JSON file\"\"\"\n config_path = PROJECT_ROOT / \"config\" / \"mcp.json\"\n\n try:\n config_file = config_path if config_path.exists() else None\n if not config_file:\n return {}\n\n with config_file.open() as f:\n data = json.load(f)\n servers = {}\n\n for server_id, server_config in data.get(\"mcpServers\", {}).items():\n servers[server_id] = MCPServerConfig(\n type=server_config[\"type\"],\n url=server_config.get(\"url\"),\n command=server_config.get(\"command\"),\n args=server_config.get(\"args\", []),\n )\n return servers\n except Exception as e:\n raise ValueError(f\"Failed to load MCP server config: {e}\")\n\n\nclass AppConfig(BaseModel):\n llm: Dict[str, LLMSettings]\n sandbox: Optional[SandboxSettings] = Field(\n None, description=\"Sandbox configuration\"\n )\n browser_config: Optional[BrowserSettings] = Field(\n None, description=\"Browser configuration\"\n )\n search_config: Optional[SearchSettings] = Field(\n None, description=\"Search configuration\"\n )\n mcp_config: Optional[MCPSettings] = Field(None, description=\"MCP configuration\")\n run_flow_config: Optional[RunflowSettings] = Field(\n None, description=\"Run flow configuration\"\n )\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass Config:\n _instance = None\n _lock = threading.Lock()\n _initialized = False\n\n def __new__(cls):\n if cls._instance is None:\n with cls._lock:\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n if not self._initialized:\n with self._lock:\n if not self._initialized:\n self._config = None\n self._load_initial_config()\n self._initialized = True\n\n @staticmethod\n def _get_config_path() -> Path:\n root = PROJECT_ROOT\n config_path = root / \"config\" / \"config.toml\"\n if config_path.exists():\n return config_path\n example_path = root / \"config\" / \"config.example.toml\"\n if example_path.exists():\n return example_path\n raise FileNotFoundError(\"No configuration file found in config directory\")\n\n def _load_config(self) -> dict:\n config_path = self._get_config_path()\n with config_path.open(\"rb\") as f:\n return tomllib.load(f)\n\n def _load_initial_config(self):\n raw_config = self._load_config()\n base_llm = raw_config.get(\"llm\", {})\n llm_overrides = {\n k: v for k, v in raw_config.get(\"llm\", {}).items() if isinstance(v, dict)\n }\n\n default_settings = {\n \"model\": base_llm.get(\"model\"),\n \"base_url\": base_llm.get(\"base_url\"),\n \"api_key\": base_llm.get(\"api_key\"),\n \"max_tokens\": base_llm.get(\"max_tokens\", 4096),\n \"max_input_tokens\": base_llm.get(\"max_input_tokens\"),\n \"temperature\": base_llm.get(\"temperature\", 1.0),\n \"api_type\": base_llm.get(\"api_type\", \"\"),\n \"api_version\": base_llm.get(\"api_version\", \"\"),\n }\n\n # handle browser config.\n browser_config = raw_config.get(\"browser\", {})\n browser_settings = None\n\n if browser_config:\n # handle proxy settings.\n proxy_config = browser_config.get(\"proxy\", {})\n proxy_settings = None\n\n if proxy_config and proxy_config.get(\"server\"):\n proxy_settings = ProxySettings(\n **{\n k: v\n for k, v in proxy_config.items()\n if k in [\"server\", \"username\", \"password\"] and v\n }\n )\n\n # filter valid browser config parameters.\n valid_browser_params = {\n k: v\n for k, v in browser_config.items()\n if k in BrowserSettings.__annotations__ and v is not None\n }\n\n # if there is proxy settings, add it to the parameters.\n if proxy_settings:\n valid_browser_params[\"proxy\"] = proxy_settings\n\n # only create BrowserSettings when there are valid parameters.\n if valid_browser_params:\n browser_settings = BrowserSettings(**valid_browser_params)\n\n search_config = raw_config.get(\"search\", {})\n search_settings = None\n if search_config:\n search_settings = SearchSettings(**search_config)\n sandbox_config = raw_config.get(\"sandbox\", {})\n if sandbox_config:\n sandbox_settings = SandboxSettings(**sandbox_config)\n else:\n sandbox_settings = SandboxSettings()\n\n mcp_config = raw_config.get(\"mcp\", {})\n mcp_settings = None\n if mcp_config:\n # Load server configurations from JSON\n mcp_config[\"servers\"] = MCPSettings.load_server_config()\n mcp_settings = MCPSettings(**mcp_config)\n else:\n mcp_settings = MCPSettings(servers=MCPSettings.load_server_config())\n\n run_flow_config = raw_config.get(\"runflow\")\n if run_flow_config:\n run_flow_settings = RunflowSettings(**run_flow_config)\n else:\n run_flow_settings = RunflowSettings()\n config_dict = {\n \"llm\": {\n \"default\": default_settings,\n **{\n name: {**default_settings, **override_config}\n for name, override_config in llm_overrides.items()\n },\n },\n \"sandbox\": sandbox_settings,\n \"browser_config\": browser_settings,\n \"search_config\": search_settings,\n \"mcp_config\": mcp_settings,\n \"run_flow_config\": run_flow_settings,\n }\n\n self._config = AppConfig(**config_dict)\n\n @property\n def llm(self) -> Dict[str, LLMSettings]:\n return self._config.llm\n\n @property\n def sandbox(self) -> SandboxSettings:\n return self._config.sandbox\n\n @property\n def browser_config(self) -> Optional[BrowserSettings]:\n return self._config.browser_config\n\n @property\n def search_config(self) -> Optional[SearchSettings]:\n return self._config.search_config\n\n @property\n def mcp_config(self) -> MCPSettings:\n \"\"\"Get the MCP configuration\"\"\"\n return self._config.mcp_config\n\n @property\n def run_flow_config(self) -> RunflowSettings:\n \"\"\"Get the Run Flow configuration\"\"\"\n return self._config.run_flow_config\n\n @property\n def workspace_root(self) -> Path:\n \"\"\"Get the workspace root directory\"\"\"\n return WORKSPACE_ROOT\n\n @property\n def root_path(self) -> Path:\n \"\"\"Get the root path of the application\"\"\"\n return PROJECT_ROOT\n\n\nconfig = Config()\n"], ["/OpenManus/app/schema.py", "from enum import Enum\nfrom typing import Any, List, Literal, Optional, Union\n\nfrom pydantic import BaseModel, Field\n\n\nclass Role(str, Enum):\n \"\"\"Message role options\"\"\"\n\n SYSTEM = \"system\"\n USER = \"user\"\n ASSISTANT = \"assistant\"\n TOOL = \"tool\"\n\n\nROLE_VALUES = tuple(role.value for role in Role)\nROLE_TYPE = Literal[ROLE_VALUES] # type: ignore\n\n\nclass ToolChoice(str, Enum):\n \"\"\"Tool choice options\"\"\"\n\n NONE = \"none\"\n AUTO = \"auto\"\n REQUIRED = \"required\"\n\n\nTOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice)\nTOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore\n\n\nclass AgentState(str, Enum):\n \"\"\"Agent execution states\"\"\"\n\n IDLE = \"IDLE\"\n RUNNING = \"RUNNING\"\n FINISHED = \"FINISHED\"\n ERROR = \"ERROR\"\n\n\nclass Function(BaseModel):\n name: str\n arguments: str\n\n\nclass ToolCall(BaseModel):\n \"\"\"Represents a tool/function call in a message\"\"\"\n\n id: str\n type: str = \"function\"\n function: Function\n\n\nclass Message(BaseModel):\n \"\"\"Represents a chat message in the conversation\"\"\"\n\n role: ROLE_TYPE = Field(...) # type: ignore\n content: Optional[str] = Field(default=None)\n tool_calls: Optional[List[ToolCall]] = Field(default=None)\n name: Optional[str] = Field(default=None)\n tool_call_id: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n\n def __add__(self, other) -> List[\"Message\"]:\n \"\"\"支持 Message + list 或 Message + Message 的操作\"\"\"\n if isinstance(other, list):\n return [self] + other\n elif isinstance(other, Message):\n return [self, other]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'\"\n )\n\n def __radd__(self, other) -> List[\"Message\"]:\n \"\"\"支持 list + Message 的操作\"\"\"\n if isinstance(other, list):\n return other + [self]\n else:\n raise TypeError(\n f\"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'\"\n )\n\n def to_dict(self) -> dict:\n \"\"\"Convert message to dictionary format\"\"\"\n message = {\"role\": self.role}\n if self.content is not None:\n message[\"content\"] = self.content\n if self.tool_calls is not None:\n message[\"tool_calls\"] = [tool_call.dict() for tool_call in self.tool_calls]\n if self.name is not None:\n message[\"name\"] = self.name\n if self.tool_call_id is not None:\n message[\"tool_call_id\"] = self.tool_call_id\n if self.base64_image is not None:\n message[\"base64_image\"] = self.base64_image\n return message\n\n @classmethod\n def user_message(\n cls, content: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a user message\"\"\"\n return cls(role=Role.USER, content=content, base64_image=base64_image)\n\n @classmethod\n def system_message(cls, content: str) -> \"Message\":\n \"\"\"Create a system message\"\"\"\n return cls(role=Role.SYSTEM, content=content)\n\n @classmethod\n def assistant_message(\n cls, content: Optional[str] = None, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create an assistant message\"\"\"\n return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image)\n\n @classmethod\n def tool_message(\n cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None\n ) -> \"Message\":\n \"\"\"Create a tool message\"\"\"\n return cls(\n role=Role.TOOL,\n content=content,\n name=name,\n tool_call_id=tool_call_id,\n base64_image=base64_image,\n )\n\n @classmethod\n def from_tool_calls(\n cls,\n tool_calls: List[Any],\n content: Union[str, List[str]] = \"\",\n base64_image: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Create ToolCallsMessage from raw tool calls.\n\n Args:\n tool_calls: Raw tool calls from LLM\n content: Optional message content\n base64_image: Optional base64 encoded image\n \"\"\"\n formatted_calls = [\n {\"id\": call.id, \"function\": call.function.model_dump(), \"type\": \"function\"}\n for call in tool_calls\n ]\n return cls(\n role=Role.ASSISTANT,\n content=content,\n tool_calls=formatted_calls,\n base64_image=base64_image,\n **kwargs,\n )\n\n\nclass Memory(BaseModel):\n messages: List[Message] = Field(default_factory=list)\n max_messages: int = Field(default=100)\n\n def add_message(self, message: Message) -> None:\n \"\"\"Add a message to memory\"\"\"\n self.messages.append(message)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def add_messages(self, messages: List[Message]) -> None:\n \"\"\"Add multiple messages to memory\"\"\"\n self.messages.extend(messages)\n # Optional: Implement message limit\n if len(self.messages) > self.max_messages:\n self.messages = self.messages[-self.max_messages :]\n\n def clear(self) -> None:\n \"\"\"Clear all messages\"\"\"\n self.messages.clear()\n\n def get_recent_messages(self, n: int) -> List[Message]:\n \"\"\"Get n most recent messages\"\"\"\n return self.messages[-n:]\n\n def to_dict_list(self) -> List[dict]:\n \"\"\"Convert messages to list of dicts\"\"\"\n return [msg.to_dict() for msg in self.messages]\n"], ["/OpenManus/app/tool/web_search.py", "import asyncio\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pydantic import BaseModel, ConfigDict, Field, model_validator\nfrom tenacity import retry, stop_after_attempt, wait_exponential\n\nfrom app.config import config\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.search import (\n BaiduSearchEngine,\n BingSearchEngine,\n DuckDuckGoSearchEngine,\n GoogleSearchEngine,\n WebSearchEngine,\n)\nfrom app.tool.search.base import SearchItem\n\n\nclass SearchResult(BaseModel):\n \"\"\"Represents a single search result returned by a search engine.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n position: int = Field(description=\"Position in search results\")\n url: str = Field(description=\"URL of the search result\")\n title: str = Field(default=\"\", description=\"Title of the search result\")\n description: str = Field(\n default=\"\", description=\"Description or snippet of the search result\"\n )\n source: str = Field(description=\"The search engine that provided this result\")\n raw_content: Optional[str] = Field(\n default=None, description=\"Raw content from the search result page if available\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result.\"\"\"\n return f\"{self.title} ({self.url})\"\n\n\nclass SearchMetadata(BaseModel):\n \"\"\"Metadata about the search operation.\"\"\"\n\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n total_results: int = Field(description=\"Total number of results found\")\n language: str = Field(description=\"Language code used for the search\")\n country: str = Field(description=\"Country code used for the search\")\n\n\nclass SearchResponse(ToolResult):\n \"\"\"Structured response from the web search tool, inheriting ToolResult.\"\"\"\n\n query: str = Field(description=\"The search query that was executed\")\n results: List[SearchResult] = Field(\n default_factory=list, description=\"List of search results\"\n )\n metadata: Optional[SearchMetadata] = Field(\n default=None, description=\"Metadata about the search\"\n )\n\n @model_validator(mode=\"after\")\n def populate_output(self) -> \"SearchResponse\":\n \"\"\"Populate output or error fields based on search results.\"\"\"\n if self.error:\n return self\n\n result_text = [f\"Search results for '{self.query}':\"]\n\n for i, result in enumerate(self.results, 1):\n # Add title with position number\n title = result.title.strip() or \"No title\"\n result_text.append(f\"\\n{i}. {title}\")\n\n # Add URL with proper indentation\n result_text.append(f\" URL: {result.url}\")\n\n # Add description if available\n if result.description.strip():\n result_text.append(f\" Description: {result.description}\")\n\n # Add content preview if available\n if result.raw_content:\n content_preview = result.raw_content[:1000].replace(\"\\n\", \" \").strip()\n if len(result.raw_content) > 1000:\n content_preview += \"...\"\n result_text.append(f\" Content: {content_preview}\")\n\n # Add metadata at the bottom if available\n if self.metadata:\n result_text.extend(\n [\n f\"\\nMetadata:\",\n f\"- Total results: {self.metadata.total_results}\",\n f\"- Language: {self.metadata.language}\",\n f\"- Country: {self.metadata.country}\",\n ]\n )\n\n self.output = \"\\n\".join(result_text)\n return self\n\n\nclass WebContentFetcher:\n \"\"\"Utility class for fetching web content.\"\"\"\n\n @staticmethod\n async def fetch_content(url: str, timeout: int = 10) -> Optional[str]:\n \"\"\"\n Fetch and extract the main content from a webpage.\n\n Args:\n url: The URL to fetch content from\n timeout: Request timeout in seconds\n\n Returns:\n Extracted text content or None if fetching fails\n \"\"\"\n headers = {\n \"WebSearch\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\"\n }\n\n try:\n # Use asyncio to run requests in a thread pool\n response = await asyncio.get_event_loop().run_in_executor(\n None, lambda: requests.get(url, headers=headers, timeout=timeout)\n )\n\n if response.status_code != 200:\n logger.warning(\n f\"Failed to fetch content from {url}: HTTP {response.status_code}\"\n )\n return None\n\n # Parse HTML with BeautifulSoup\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Remove script and style elements\n for script in soup([\"script\", \"style\", \"header\", \"footer\", \"nav\"]):\n script.extract()\n\n # Get text content\n text = soup.get_text(separator=\"\\n\", strip=True)\n\n # Clean up whitespace and limit size (100KB max)\n text = \" \".join(text.split())\n return text[:10000] if text else None\n\n except Exception as e:\n logger.warning(f\"Error fetching content from {url}: {e}\")\n return None\n\n\nclass WebSearch(BaseTool):\n \"\"\"Search the web for information using various search engines.\"\"\"\n\n name: str = \"web_search\"\n description: str = \"\"\"Search the web for real-time information about any topic.\n This tool returns comprehensive search results with relevant information, URLs, titles, and descriptions.\n If the primary search engine fails, it automatically falls back to alternative engines.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"query\": {\n \"type\": \"string\",\n \"description\": \"(required) The search query to submit to the search engine.\",\n },\n \"num_results\": {\n \"type\": \"integer\",\n \"description\": \"(optional) The number of search results to return. Default is 5.\",\n \"default\": 5,\n },\n \"lang\": {\n \"type\": \"string\",\n \"description\": \"(optional) Language code for search results (default: en).\",\n \"default\": \"en\",\n },\n \"country\": {\n \"type\": \"string\",\n \"description\": \"(optional) Country code for search results (default: us).\",\n \"default\": \"us\",\n },\n \"fetch_content\": {\n \"type\": \"boolean\",\n \"description\": \"(optional) Whether to fetch full content from result pages. Default is false.\",\n \"default\": False,\n },\n },\n \"required\": [\"query\"],\n }\n _search_engine: dict[str, WebSearchEngine] = {\n \"google\": GoogleSearchEngine(),\n \"baidu\": BaiduSearchEngine(),\n \"duckduckgo\": DuckDuckGoSearchEngine(),\n \"bing\": BingSearchEngine(),\n }\n content_fetcher: WebContentFetcher = WebContentFetcher()\n\n async def execute(\n self,\n query: str,\n num_results: int = 5,\n lang: Optional[str] = None,\n country: Optional[str] = None,\n fetch_content: bool = False,\n ) -> SearchResponse:\n \"\"\"\n Execute a Web search and return detailed search results.\n\n Args:\n query: The search query to submit to the search engine\n num_results: The number of search results to return (default: 5)\n lang: Language code for search results (default from config)\n country: Country code for search results (default from config)\n fetch_content: Whether to fetch content from result pages (default: False)\n\n Returns:\n A structured response containing search results and metadata\n \"\"\"\n # Get settings from config\n retry_delay = (\n getattr(config.search_config, \"retry_delay\", 60)\n if config.search_config\n else 60\n )\n max_retries = (\n getattr(config.search_config, \"max_retries\", 3)\n if config.search_config\n else 3\n )\n\n # Use config values for lang and country if not specified\n if lang is None:\n lang = (\n getattr(config.search_config, \"lang\", \"en\")\n if config.search_config\n else \"en\"\n )\n\n if country is None:\n country = (\n getattr(config.search_config, \"country\", \"us\")\n if config.search_config\n else \"us\"\n )\n\n search_params = {\"lang\": lang, \"country\": country}\n\n # Try searching with retries when all engines fail\n for retry_count in range(max_retries + 1):\n results = await self._try_all_engines(query, num_results, search_params)\n\n if results:\n # Fetch content if requested\n if fetch_content:\n results = await self._fetch_content_for_results(results)\n\n # Return a successful structured response\n return SearchResponse(\n status=\"success\",\n query=query,\n results=results,\n metadata=SearchMetadata(\n total_results=len(results),\n language=lang,\n country=country,\n ),\n )\n\n if retry_count < max_retries:\n # All engines failed, wait and retry\n logger.warning(\n f\"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}...\"\n )\n await asyncio.sleep(retry_delay)\n else:\n logger.error(\n f\"All search engines failed after {max_retries} retries. Giving up.\"\n )\n\n # Return an error response\n return SearchResponse(\n query=query,\n error=\"All search engines failed to return results after multiple retries.\",\n results=[],\n )\n\n async def _try_all_engines(\n self, query: str, num_results: int, search_params: Dict[str, Any]\n ) -> List[SearchResult]:\n \"\"\"Try all search engines in the configured order.\"\"\"\n engine_order = self._get_engine_order()\n failed_engines = []\n\n for engine_name in engine_order:\n engine = self._search_engine[engine_name]\n logger.info(f\"🔎 Attempting search with {engine_name.capitalize()}...\")\n search_items = await self._perform_search_with_engine(\n engine, query, num_results, search_params\n )\n\n if not search_items:\n continue\n\n if failed_engines:\n logger.info(\n f\"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}\"\n )\n\n # Transform search items into structured results\n return [\n SearchResult(\n position=i + 1,\n url=item.url,\n title=item.title\n or f\"Result {i+1}\", # Ensure we always have a title\n description=item.description or \"\",\n source=engine_name,\n )\n for i, item in enumerate(search_items)\n ]\n\n if failed_engines:\n logger.error(f\"All search engines failed: {', '.join(failed_engines)}\")\n return []\n\n async def _fetch_content_for_results(\n self, results: List[SearchResult]\n ) -> List[SearchResult]:\n \"\"\"Fetch and add web content to search results.\"\"\"\n if not results:\n return []\n\n # Create tasks for each result\n tasks = [self._fetch_single_result_content(result) for result in results]\n\n # Type annotation to help type checker\n fetched_results = await asyncio.gather(*tasks)\n\n # Explicit validation of return type\n return [\n (\n result\n if isinstance(result, SearchResult)\n else SearchResult(**result.dict())\n )\n for result in fetched_results\n ]\n\n async def _fetch_single_result_content(self, result: SearchResult) -> SearchResult:\n \"\"\"Fetch content for a single search result.\"\"\"\n if result.url:\n content = await self.content_fetcher.fetch_content(result.url)\n if content:\n result.raw_content = content\n return result\n\n def _get_engine_order(self) -> List[str]:\n \"\"\"Determines the order in which to try search engines.\"\"\"\n preferred = (\n getattr(config.search_config, \"engine\", \"google\").lower()\n if config.search_config\n else \"google\"\n )\n fallbacks = (\n [engine.lower() for engine in config.search_config.fallback_engines]\n if config.search_config\n and hasattr(config.search_config, \"fallback_engines\")\n else []\n )\n\n # Start with preferred engine, then fallbacks, then remaining engines\n engine_order = [preferred] if preferred in self._search_engine else []\n engine_order.extend(\n [\n fb\n for fb in fallbacks\n if fb in self._search_engine and fb not in engine_order\n ]\n )\n engine_order.extend([e for e in self._search_engine if e not in engine_order])\n\n return engine_order\n\n @retry(\n stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10)\n )\n async def _perform_search_with_engine(\n self,\n engine: WebSearchEngine,\n query: str,\n num_results: int,\n search_params: Dict[str, Any],\n ) -> List[SearchItem]:\n \"\"\"Execute search with the given engine and parameters.\"\"\"\n return await asyncio.get_event_loop().run_in_executor(\n None,\n lambda: list(\n engine.perform_search(\n query,\n num_results=num_results,\n lang=search_params.get(\"lang\"),\n country=search_params.get(\"country\"),\n )\n ),\n )\n\n\nif __name__ == \"__main__\":\n web_search = WebSearch()\n search_response = asyncio.run(\n web_search.execute(\n query=\"Python programming\", fetch_content=True, num_results=1\n )\n )\n print(search_response.to_tool_result())\n"], ["/OpenManus/app/agent/toolcall.py", "import asyncio\nimport json\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom app.agent.react import ReActAgent\nfrom app.exceptions import TokenLimitExceeded\nfrom app.logger import logger\nfrom app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice\nfrom app.tool import CreateChatCompletion, Terminate, ToolCollection\n\n\nTOOL_CALL_REQUIRED = \"Tool calls required but none provided\"\n\n\nclass ToolCallAgent(ReActAgent):\n \"\"\"Base agent class for handling tool/function calls with enhanced abstraction\"\"\"\n\n name: str = \"toolcall\"\n description: str = \"an agent that can execute tool calls.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n available_tools: ToolCollection = ToolCollection(\n CreateChatCompletion(), Terminate()\n )\n tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n tool_calls: List[ToolCall] = Field(default_factory=list)\n _current_base64_image: Optional[str] = None\n\n max_steps: int = 30\n max_observe: Optional[Union[int, bool]] = None\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools\"\"\"\n if self.next_step_prompt:\n user_msg = Message.user_message(self.next_step_prompt)\n self.messages += [user_msg]\n\n try:\n # Get response with tool options\n response = await self.llm.ask_tool(\n messages=self.messages,\n system_msgs=(\n [Message.system_message(self.system_prompt)]\n if self.system_prompt\n else None\n ),\n tools=self.available_tools.to_params(),\n tool_choice=self.tool_choices,\n )\n except ValueError:\n raise\n except Exception as e:\n # Check if this is a RetryError containing TokenLimitExceeded\n if hasattr(e, \"__cause__\") and isinstance(e.__cause__, TokenLimitExceeded):\n token_limit_error = e.__cause__\n logger.error(\n f\"🚨 Token limit error (from RetryError): {token_limit_error}\"\n )\n self.memory.add_message(\n Message.assistant_message(\n f\"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}\"\n )\n )\n self.state = AgentState.FINISHED\n return False\n raise\n\n self.tool_calls = tool_calls = (\n response.tool_calls if response and response.tool_calls else []\n )\n content = response.content if response and response.content else \"\"\n\n # Log response info\n logger.info(f\"✨ {self.name}'s thoughts: {content}\")\n logger.info(\n f\"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use\"\n )\n if tool_calls:\n logger.info(\n f\"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}\"\n )\n logger.info(f\"🔧 Tool arguments: {tool_calls[0].function.arguments}\")\n\n try:\n if response is None:\n raise RuntimeError(\"No response received from the LLM\")\n\n # Handle different tool_choices modes\n if self.tool_choices == ToolChoice.NONE:\n if tool_calls:\n logger.warning(\n f\"🤔 Hmm, {self.name} tried to use tools when they weren't available!\"\n )\n if content:\n self.memory.add_message(Message.assistant_message(content))\n return True\n return False\n\n # Create and add assistant message\n assistant_msg = (\n Message.from_tool_calls(content=content, tool_calls=self.tool_calls)\n if self.tool_calls\n else Message.assistant_message(content)\n )\n self.memory.add_message(assistant_msg)\n\n if self.tool_choices == ToolChoice.REQUIRED and not self.tool_calls:\n return True # Will be handled in act()\n\n # For 'auto' mode, continue with content if no commands but content exists\n if self.tool_choices == ToolChoice.AUTO and not self.tool_calls:\n return bool(content)\n\n return bool(self.tool_calls)\n except Exception as e:\n logger.error(f\"🚨 Oops! The {self.name}'s thinking process hit a snag: {e}\")\n self.memory.add_message(\n Message.assistant_message(\n f\"Error encountered while processing: {str(e)}\"\n )\n )\n return False\n\n async def act(self) -> str:\n \"\"\"Execute tool calls and handle their results\"\"\"\n if not self.tool_calls:\n if self.tool_choices == ToolChoice.REQUIRED:\n raise ValueError(TOOL_CALL_REQUIRED)\n\n # Return last message content if no tool calls\n return self.messages[-1].content or \"No content or commands to execute\"\n\n results = []\n for command in self.tool_calls:\n # Reset base64_image for each tool call\n self._current_base64_image = None\n\n result = await self.execute_tool(command)\n\n if self.max_observe:\n result = result[: self.max_observe]\n\n logger.info(\n f\"🎯 Tool '{command.function.name}' completed its mission! Result: {result}\"\n )\n\n # Add tool response to memory\n tool_msg = Message.tool_message(\n content=result,\n tool_call_id=command.id,\n name=command.function.name,\n base64_image=self._current_base64_image,\n )\n self.memory.add_message(tool_msg)\n results.append(result)\n\n return \"\\n\\n\".join(results)\n\n async def execute_tool(self, command: ToolCall) -> str:\n \"\"\"Execute a single tool call with robust error handling\"\"\"\n if not command or not command.function or not command.function.name:\n return \"Error: Invalid command format\"\n\n name = command.function.name\n if name not in self.available_tools.tool_map:\n return f\"Error: Unknown tool '{name}'\"\n\n try:\n # Parse arguments\n args = json.loads(command.function.arguments or \"{}\")\n\n # Execute the tool\n logger.info(f\"🔧 Activating tool: '{name}'...\")\n result = await self.available_tools.execute(name=name, tool_input=args)\n\n # Handle special tools\n await self._handle_special_tool(name=name, result=result)\n\n # Check if result is a ToolResult with base64_image\n if hasattr(result, \"base64_image\") and result.base64_image:\n # Store the base64_image for later use in tool_message\n self._current_base64_image = result.base64_image\n\n # Format result for display (standard case)\n observation = (\n f\"Observed output of cmd `{name}` executed:\\n{str(result)}\"\n if result\n else f\"Cmd `{name}` completed with no output\"\n )\n\n return observation\n except json.JSONDecodeError:\n error_msg = f\"Error parsing arguments for {name}: Invalid JSON format\"\n logger.error(\n f\"📝 Oops! The arguments for '{name}' don't make sense - invalid JSON, arguments:{command.function.arguments}\"\n )\n return f\"Error: {error_msg}\"\n except Exception as e:\n error_msg = f\"⚠️ Tool '{name}' encountered a problem: {str(e)}\"\n logger.exception(error_msg)\n return f\"Error: {error_msg}\"\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs):\n \"\"\"Handle special tool execution and state changes\"\"\"\n if not self._is_special_tool(name):\n return\n\n if self._should_finish_execution(name=name, result=result, **kwargs):\n # Set agent state to finished\n logger.info(f\"🏁 Special tool '{name}' has completed the task!\")\n self.state = AgentState.FINISHED\n\n @staticmethod\n def _should_finish_execution(**kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n return True\n\n def _is_special_tool(self, name: str) -> bool:\n \"\"\"Check if tool name is in special tools list\"\"\"\n return name.lower() in [n.lower() for n in self.special_tool_names]\n\n async def cleanup(self):\n \"\"\"Clean up resources used by the agent's tools.\"\"\"\n logger.info(f\"🧹 Cleaning up resources for agent '{self.name}'...\")\n for tool_name, tool_instance in self.available_tools.tool_map.items():\n if hasattr(tool_instance, \"cleanup\") and asyncio.iscoroutinefunction(\n tool_instance.cleanup\n ):\n try:\n logger.debug(f\"🧼 Cleaning up tool: {tool_name}\")\n await tool_instance.cleanup()\n except Exception as e:\n logger.error(\n f\"🚨 Error cleaning up tool '{tool_name}': {e}\", exc_info=True\n )\n logger.info(f\"✨ Cleanup complete for agent '{self.name}'.\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n return await super().run(request)\n finally:\n await self.cleanup()\n"], ["/OpenManus/app/tool/create_chat_completion.py", "from typing import Any, List, Optional, Type, Union, get_args, get_origin\n\nfrom pydantic import BaseModel, Field\n\nfrom app.tool import BaseTool\n\n\nclass CreateChatCompletion(BaseTool):\n name: str = \"create_chat_completion\"\n description: str = (\n \"Creates a structured completion with specified output formatting.\"\n )\n\n # Type mapping for JSON schema\n type_mapping: dict = {\n str: \"string\",\n int: \"integer\",\n float: \"number\",\n bool: \"boolean\",\n dict: \"object\",\n list: \"array\",\n }\n response_type: Optional[Type] = None\n required: List[str] = Field(default_factory=lambda: [\"response\"])\n\n def __init__(self, response_type: Optional[Type] = str):\n \"\"\"Initialize with a specific response type.\"\"\"\n super().__init__()\n self.response_type = response_type\n self.parameters = self._build_parameters()\n\n def _build_parameters(self) -> dict:\n \"\"\"Build parameters schema based on response type.\"\"\"\n if self.response_type == str:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"string\",\n \"description\": \"The response text that should be delivered to the user.\",\n },\n },\n \"required\": self.required,\n }\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n schema = self.response_type.model_json_schema()\n return {\n \"type\": \"object\",\n \"properties\": schema[\"properties\"],\n \"required\": schema.get(\"required\", self.required),\n }\n\n return self._create_type_schema(self.response_type)\n\n def _create_type_schema(self, type_hint: Type) -> dict:\n \"\"\"Create a JSON schema for the given type.\"\"\"\n origin = get_origin(type_hint)\n args = get_args(type_hint)\n\n # Handle primitive types\n if origin is None:\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Response of type {type_hint.__name__}\",\n }\n },\n \"required\": self.required,\n }\n\n # Handle List type\n if origin is list:\n item_type = args[0] if args else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"array\",\n \"items\": self._get_type_info(item_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Dict type\n if origin is dict:\n value_type = args[1] if len(args) > 1 else Any\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\n \"type\": \"object\",\n \"additionalProperties\": self._get_type_info(value_type),\n }\n },\n \"required\": self.required,\n }\n\n # Handle Union type\n if origin is Union:\n return self._create_union_schema(args)\n\n return self._build_parameters()\n\n def _get_type_info(self, type_hint: Type) -> dict:\n \"\"\"Get type information for a single type.\"\"\"\n if isinstance(type_hint, type) and issubclass(type_hint, BaseModel):\n return type_hint.model_json_schema()\n\n return {\n \"type\": self.type_mapping.get(type_hint, \"string\"),\n \"description\": f\"Value of type {getattr(type_hint, '__name__', 'any')}\",\n }\n\n def _create_union_schema(self, types: tuple) -> dict:\n \"\"\"Create schema for Union types.\"\"\"\n return {\n \"type\": \"object\",\n \"properties\": {\n \"response\": {\"anyOf\": [self._get_type_info(t) for t in types]}\n },\n \"required\": self.required,\n }\n\n async def execute(self, required: list | None = None, **kwargs) -> Any:\n \"\"\"Execute the chat completion with type conversion.\n\n Args:\n required: List of required field names or None\n **kwargs: Response data\n\n Returns:\n Converted response based on response_type\n \"\"\"\n required = required or self.required\n\n # Handle case when required is a list\n if isinstance(required, list) and len(required) > 0:\n if len(required) == 1:\n required_field = required[0]\n result = kwargs.get(required_field, \"\")\n else:\n # Return multiple fields as a dictionary\n return {field: kwargs.get(field, \"\") for field in required}\n else:\n required_field = \"response\"\n result = kwargs.get(required_field, \"\")\n\n # Type conversion logic\n if self.response_type == str:\n return result\n\n if isinstance(self.response_type, type) and issubclass(\n self.response_type, BaseModel\n ):\n return self.response_type(**kwargs)\n\n if get_origin(self.response_type) in (list, dict):\n return result # Assuming result is already in correct format\n\n try:\n return self.response_type(result)\n except (ValueError, TypeError):\n return result\n"], ["/OpenManus/app/agent/browser.py", "import json\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import Message, ToolChoice\nfrom app.tool import BrowserUseTool, Terminate, ToolCollection\n\n\n# Avoid circular import if BrowserAgent needs BrowserContextHelper\nif TYPE_CHECKING:\n from app.agent.base import BaseAgent # Or wherever memory is defined\n\n\nclass BrowserContextHelper:\n def __init__(self, agent: \"BaseAgent\"):\n self.agent = agent\n self._current_base64_image: Optional[str] = None\n\n async def get_browser_state(self) -> Optional[dict]:\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if not browser_tool or not hasattr(browser_tool, \"get_current_state\"):\n logger.warning(\"BrowserUseTool not found or doesn't have get_current_state\")\n return None\n try:\n result = await browser_tool.get_current_state()\n if result.error:\n logger.debug(f\"Browser state error: {result.error}\")\n return None\n if hasattr(result, \"base64_image\") and result.base64_image:\n self._current_base64_image = result.base64_image\n else:\n self._current_base64_image = None\n return json.loads(result.output)\n except Exception as e:\n logger.debug(f\"Failed to get browser state: {str(e)}\")\n return None\n\n async def format_next_step_prompt(self) -> str:\n \"\"\"Gets browser state and formats the browser prompt.\"\"\"\n browser_state = await self.get_browser_state()\n url_info, tabs_info, content_above_info, content_below_info = \"\", \"\", \"\", \"\"\n results_info = \"\" # Or get from agent if needed elsewhere\n\n if browser_state and not browser_state.get(\"error\"):\n url_info = f\"\\n URL: {browser_state.get('url', 'N/A')}\\n Title: {browser_state.get('title', 'N/A')}\"\n tabs = browser_state.get(\"tabs\", [])\n if tabs:\n tabs_info = f\"\\n {len(tabs)} tab(s) available\"\n pixels_above = browser_state.get(\"pixels_above\", 0)\n pixels_below = browser_state.get(\"pixels_below\", 0)\n if pixels_above > 0:\n content_above_info = f\" ({pixels_above} pixels)\"\n if pixels_below > 0:\n content_below_info = f\" ({pixels_below} pixels)\"\n\n if self._current_base64_image:\n image_message = Message.user_message(\n content=\"Current browser screenshot:\",\n base64_image=self._current_base64_image,\n )\n self.agent.memory.add_message(image_message)\n self._current_base64_image = None # Consume the image after adding\n\n return NEXT_STEP_PROMPT.format(\n url_placeholder=url_info,\n tabs_placeholder=tabs_info,\n content_above_placeholder=content_above_info,\n content_below_placeholder=content_below_info,\n results_placeholder=results_info,\n )\n\n async def cleanup_browser(self):\n browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name)\n if browser_tool and hasattr(browser_tool, \"cleanup\"):\n await browser_tool.cleanup()\n\n\nclass BrowserAgent(ToolCallAgent):\n \"\"\"\n A browser agent that uses the browser_use library to control a browser.\n\n This agent can navigate web pages, interact with elements, fill forms,\n extract content, and perform other browser-based actions to accomplish tasks.\n \"\"\"\n\n name: str = \"browser\"\n description: str = \"A browser agent that can control a browser to accomplish tasks\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # Configure the available tools\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate())\n )\n\n # Use Auto for tool choice to allow both tool usage and free-form responses\n tool_choices: ToolChoice = ToolChoice.AUTO\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"BrowserAgent\":\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions using tools, with browser state info added\"\"\"\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n return await super().think()\n\n async def cleanup(self):\n \"\"\"Clean up browser agent resources by calling parent cleanup.\"\"\"\n await self.browser_context_helper.cleanup_browser()\n"], ["/OpenManus/app/tool/planning.py", "# tool/planning.py\nfrom typing import Dict, List, Literal, Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, ToolResult\n\n\n_PLANNING_TOOL_DESCRIPTION = \"\"\"\nA planning tool that allows the agent to create and manage plans for solving complex tasks.\nThe tool provides functionality for creating plans, updating plan steps, and tracking progress.\n\"\"\"\n\n\nclass PlanningTool(BaseTool):\n \"\"\"\n A planning tool that allows the agent to create and manage plans for solving complex tasks.\n The tool provides functionality for creating plans, updating plan steps, and tracking progress.\n \"\"\"\n\n name: str = \"planning\"\n description: str = _PLANNING_TOOL_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The command to execute. Available commands: create, update, list, get, set_active, mark_step, delete.\",\n \"enum\": [\n \"create\",\n \"update\",\n \"list\",\n \"get\",\n \"set_active\",\n \"mark_step\",\n \"delete\",\n ],\n \"type\": \"string\",\n },\n \"plan_id\": {\n \"description\": \"Unique identifier for the plan. Required for create, update, set_active, and delete commands. Optional for get and mark_step (uses active plan if not specified).\",\n \"type\": \"string\",\n },\n \"title\": {\n \"description\": \"Title for the plan. Required for create command, optional for update command.\",\n \"type\": \"string\",\n },\n \"steps\": {\n \"description\": \"List of plan steps. Required for create command, optional for update command.\",\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n \"step_index\": {\n \"description\": \"Index of the step to update (0-based). Required for mark_step command.\",\n \"type\": \"integer\",\n },\n \"step_status\": {\n \"description\": \"Status to set for a step. Used with mark_step command.\",\n \"enum\": [\"not_started\", \"in_progress\", \"completed\", \"blocked\"],\n \"type\": \"string\",\n },\n \"step_notes\": {\n \"description\": \"Additional notes for a step. Optional for mark_step command.\",\n \"type\": \"string\",\n },\n },\n \"required\": [\"command\"],\n \"additionalProperties\": False,\n }\n\n plans: dict = {} # Dictionary to store plans by plan_id\n _current_plan_id: Optional[str] = None # Track the current active plan\n\n async def execute(\n self,\n *,\n command: Literal[\n \"create\", \"update\", \"list\", \"get\", \"set_active\", \"mark_step\", \"delete\"\n ],\n plan_id: Optional[str] = None,\n title: Optional[str] = None,\n steps: Optional[List[str]] = None,\n step_index: Optional[int] = None,\n step_status: Optional[\n Literal[\"not_started\", \"in_progress\", \"completed\", \"blocked\"]\n ] = None,\n step_notes: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"\n Execute the planning tool with the given command and parameters.\n\n Parameters:\n - command: The operation to perform\n - plan_id: Unique identifier for the plan\n - title: Title for the plan (used with create command)\n - steps: List of steps for the plan (used with create command)\n - step_index: Index of the step to update (used with mark_step command)\n - step_status: Status to set for a step (used with mark_step command)\n - step_notes: Additional notes for a step (used with mark_step command)\n \"\"\"\n\n if command == \"create\":\n return self._create_plan(plan_id, title, steps)\n elif command == \"update\":\n return self._update_plan(plan_id, title, steps)\n elif command == \"list\":\n return self._list_plans()\n elif command == \"get\":\n return self._get_plan(plan_id)\n elif command == \"set_active\":\n return self._set_active_plan(plan_id)\n elif command == \"mark_step\":\n return self._mark_step(plan_id, step_index, step_status, step_notes)\n elif command == \"delete\":\n return self._delete_plan(plan_id)\n else:\n raise ToolError(\n f\"Unrecognized command: {command}. Allowed commands are: create, update, list, get, set_active, mark_step, delete\"\n )\n\n def _create_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Create a new plan with the given ID, title, and steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: create\")\n\n if plan_id in self.plans:\n raise ToolError(\n f\"A plan with ID '{plan_id}' already exists. Use 'update' to modify existing plans.\"\n )\n\n if not title:\n raise ToolError(\"Parameter `title` is required for command: create\")\n\n if (\n not steps\n or not isinstance(steps, list)\n or not all(isinstance(step, str) for step in steps)\n ):\n raise ToolError(\n \"Parameter `steps` must be a non-empty list of strings for command: create\"\n )\n\n # Create a new plan with initialized step statuses\n plan = {\n \"plan_id\": plan_id,\n \"title\": title,\n \"steps\": steps,\n \"step_statuses\": [\"not_started\"] * len(steps),\n \"step_notes\": [\"\"] * len(steps),\n }\n\n self.plans[plan_id] = plan\n self._current_plan_id = plan_id # Set as active plan\n\n return ToolResult(\n output=f\"Plan created successfully with ID: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _update_plan(\n self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]]\n ) -> ToolResult:\n \"\"\"Update an existing plan with new title or steps.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: update\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n\n if title:\n plan[\"title\"] = title\n\n if steps:\n if not isinstance(steps, list) or not all(\n isinstance(step, str) for step in steps\n ):\n raise ToolError(\n \"Parameter `steps` must be a list of strings for command: update\"\n )\n\n # Preserve existing step statuses for unchanged steps\n old_steps = plan[\"steps\"]\n old_statuses = plan[\"step_statuses\"]\n old_notes = plan[\"step_notes\"]\n\n # Create new step statuses and notes\n new_statuses = []\n new_notes = []\n\n for i, step in enumerate(steps):\n # If the step exists at the same position in old steps, preserve status and notes\n if i < len(old_steps) and step == old_steps[i]:\n new_statuses.append(old_statuses[i])\n new_notes.append(old_notes[i])\n else:\n new_statuses.append(\"not_started\")\n new_notes.append(\"\")\n\n plan[\"steps\"] = steps\n plan[\"step_statuses\"] = new_statuses\n plan[\"step_notes\"] = new_notes\n\n return ToolResult(\n output=f\"Plan updated successfully: {plan_id}\\n\\n{self._format_plan(plan)}\"\n )\n\n def _list_plans(self) -> ToolResult:\n \"\"\"List all available plans.\"\"\"\n if not self.plans:\n return ToolResult(\n output=\"No plans available. Create a plan with the 'create' command.\"\n )\n\n output = \"Available plans:\\n\"\n for plan_id, plan in self.plans.items():\n current_marker = \" (active)\" if plan_id == self._current_plan_id else \"\"\n completed = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"completed\"\n )\n total = len(plan[\"steps\"])\n progress = f\"{completed}/{total} steps completed\"\n output += f\"• {plan_id}{current_marker}: {plan['title']} - {progress}\\n\"\n\n return ToolResult(output=output)\n\n def _get_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Get details of a specific plan.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n plan = self.plans[plan_id]\n return ToolResult(output=self._format_plan(plan))\n\n def _set_active_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Set a plan as the active plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: set_active\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n self._current_plan_id = plan_id\n return ToolResult(\n output=f\"Plan '{plan_id}' is now the active plan.\\n\\n{self._format_plan(self.plans[plan_id])}\"\n )\n\n def _mark_step(\n self,\n plan_id: Optional[str],\n step_index: Optional[int],\n step_status: Optional[str],\n step_notes: Optional[str],\n ) -> ToolResult:\n \"\"\"Mark a step with a specific status and optional notes.\"\"\"\n if not plan_id:\n # If no plan_id is provided, use the current active plan\n if not self._current_plan_id:\n raise ToolError(\n \"No active plan. Please specify a plan_id or set an active plan.\"\n )\n plan_id = self._current_plan_id\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n if step_index is None:\n raise ToolError(\"Parameter `step_index` is required for command: mark_step\")\n\n plan = self.plans[plan_id]\n\n if step_index < 0 or step_index >= len(plan[\"steps\"]):\n raise ToolError(\n f\"Invalid step_index: {step_index}. Valid indices range from 0 to {len(plan['steps'])-1}.\"\n )\n\n if step_status and step_status not in [\n \"not_started\",\n \"in_progress\",\n \"completed\",\n \"blocked\",\n ]:\n raise ToolError(\n f\"Invalid step_status: {step_status}. Valid statuses are: not_started, in_progress, completed, blocked\"\n )\n\n if step_status:\n plan[\"step_statuses\"][step_index] = step_status\n\n if step_notes:\n plan[\"step_notes\"][step_index] = step_notes\n\n return ToolResult(\n output=f\"Step {step_index} updated in plan '{plan_id}'.\\n\\n{self._format_plan(plan)}\"\n )\n\n def _delete_plan(self, plan_id: Optional[str]) -> ToolResult:\n \"\"\"Delete a plan.\"\"\"\n if not plan_id:\n raise ToolError(\"Parameter `plan_id` is required for command: delete\")\n\n if plan_id not in self.plans:\n raise ToolError(f\"No plan found with ID: {plan_id}\")\n\n del self.plans[plan_id]\n\n # If the deleted plan was the active plan, clear the active plan\n if self._current_plan_id == plan_id:\n self._current_plan_id = None\n\n return ToolResult(output=f\"Plan '{plan_id}' has been deleted.\")\n\n def _format_plan(self, plan: Dict) -> str:\n \"\"\"Format a plan for display.\"\"\"\n output = f\"Plan: {plan['title']} (ID: {plan['plan_id']})\\n\"\n output += \"=\" * len(output) + \"\\n\\n\"\n\n # Calculate progress statistics\n total_steps = len(plan[\"steps\"])\n completed = sum(1 for status in plan[\"step_statuses\"] if status == \"completed\")\n in_progress = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"in_progress\"\n )\n blocked = sum(1 for status in plan[\"step_statuses\"] if status == \"blocked\")\n not_started = sum(\n 1 for status in plan[\"step_statuses\"] if status == \"not_started\"\n )\n\n output += f\"Progress: {completed}/{total_steps} steps completed \"\n if total_steps > 0:\n percentage = (completed / total_steps) * 100\n output += f\"({percentage:.1f}%)\\n\"\n else:\n output += \"(0%)\\n\"\n\n output += f\"Status: {completed} completed, {in_progress} in progress, {blocked} blocked, {not_started} not started\\n\\n\"\n output += \"Steps:\\n\"\n\n # Add each step with its status and notes\n for i, (step, status, notes) in enumerate(\n zip(plan[\"steps\"], plan[\"step_statuses\"], plan[\"step_notes\"])\n ):\n status_symbol = {\n \"not_started\": \"[ ]\",\n \"in_progress\": \"[→]\",\n \"completed\": \"[✓]\",\n \"blocked\": \"[!]\",\n }.get(status, \"[ ]\")\n\n output += f\"{i}. {status_symbol} {step}\\n\"\n if notes:\n output += f\" Notes: {notes}\\n\"\n\n return output\n"], ["/OpenManus/app/sandbox/core/sandbox.py", "import asyncio\nimport io\nimport os\nimport tarfile\nimport tempfile\nimport uuid\nfrom typing import Dict, Optional\n\nimport docker\nfrom docker.errors import NotFound\nfrom docker.models.containers import Container\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.exceptions import SandboxTimeoutError\nfrom app.sandbox.core.terminal import AsyncDockerizedTerminal\n\n\nclass DockerSandbox:\n \"\"\"Docker sandbox environment.\n\n Provides a containerized execution environment with resource limits,\n file operations, and command execution capabilities.\n\n Attributes:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n client: Docker client.\n container: Docker container instance.\n terminal: Container terminal interface.\n \"\"\"\n\n def __init__(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Initializes a sandbox instance.\n\n Args:\n config: Sandbox configuration. Default configuration used if None.\n volume_bindings: Volume mappings in {host_path: container_path} format.\n \"\"\"\n self.config = config or SandboxSettings()\n self.volume_bindings = volume_bindings or {}\n self.client = docker.from_env()\n self.container: Optional[Container] = None\n self.terminal: Optional[AsyncDockerizedTerminal] = None\n\n async def create(self) -> \"DockerSandbox\":\n \"\"\"Creates and starts the sandbox container.\n\n Returns:\n Current sandbox instance.\n\n Raises:\n docker.errors.APIError: If Docker API call fails.\n RuntimeError: If container creation or startup fails.\n \"\"\"\n try:\n # Prepare container config\n host_config = self.client.api.create_host_config(\n mem_limit=self.config.memory_limit,\n cpu_period=100000,\n cpu_quota=int(100000 * self.config.cpu_limit),\n network_mode=\"none\" if not self.config.network_enabled else \"bridge\",\n binds=self._prepare_volume_bindings(),\n )\n\n # Generate unique container name with sandbox_ prefix\n container_name = f\"sandbox_{uuid.uuid4().hex[:8]}\"\n\n # Create container\n container = await asyncio.to_thread(\n self.client.api.create_container,\n image=self.config.image,\n command=\"tail -f /dev/null\",\n hostname=\"sandbox\",\n working_dir=self.config.work_dir,\n host_config=host_config,\n name=container_name,\n tty=True,\n detach=True,\n )\n\n self.container = self.client.containers.get(container[\"Id\"])\n\n # Start container\n await asyncio.to_thread(self.container.start)\n\n # Initialize terminal\n self.terminal = AsyncDockerizedTerminal(\n container[\"Id\"],\n self.config.work_dir,\n env_vars={\"PYTHONUNBUFFERED\": \"1\"}\n # Ensure Python output is not buffered\n )\n await self.terminal.init()\n\n return self\n\n except Exception as e:\n await self.cleanup() # Ensure resources are cleaned up\n raise RuntimeError(f\"Failed to create sandbox: {e}\") from e\n\n def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]:\n \"\"\"Prepares volume binding configuration.\n\n Returns:\n Volume binding configuration dictionary.\n \"\"\"\n bindings = {}\n\n # Create and add working directory mapping\n work_dir = self._ensure_host_dir(self.config.work_dir)\n bindings[work_dir] = {\"bind\": self.config.work_dir, \"mode\": \"rw\"}\n\n # Add custom volume bindings\n for host_path, container_path in self.volume_bindings.items():\n bindings[host_path] = {\"bind\": container_path, \"mode\": \"rw\"}\n\n return bindings\n\n @staticmethod\n def _ensure_host_dir(path: str) -> str:\n \"\"\"Ensures directory exists on the host.\n\n Args:\n path: Directory path.\n\n Returns:\n Actual path on the host.\n \"\"\"\n host_path = os.path.join(\n tempfile.gettempdir(),\n f\"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}\",\n )\n os.makedirs(host_path, exist_ok=True)\n return host_path\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the sandbox.\n\n Args:\n cmd: Command to execute.\n timeout: Timeout in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If sandbox not initialized or command execution fails.\n TimeoutError: If command execution times out.\n \"\"\"\n if not self.terminal:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n return await self.terminal.run_command(\n cmd, timeout=timeout or self.config.timeout\n )\n except TimeoutError:\n raise SandboxTimeoutError(\n f\"Command execution timed out after {timeout or self.config.timeout} seconds\"\n )\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads a file from the container.\n\n Args:\n path: File path.\n\n Returns:\n File contents as string.\n\n Raises:\n FileNotFoundError: If file does not exist.\n RuntimeError: If read operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n # Get file archive\n resolved_path = self._safe_resolve_path(path)\n tar_stream, _ = await asyncio.to_thread(\n self.container.get_archive, resolved_path\n )\n\n # Read file content from tar stream\n content = await self._read_from_tar(tar_stream)\n return content.decode(\"utf-8\")\n\n except NotFound:\n raise FileNotFoundError(f\"File not found: {path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to read file: {e}\")\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to a file in the container.\n\n Args:\n path: Target path.\n content: File content.\n\n Raises:\n RuntimeError: If write operation fails.\n \"\"\"\n if not self.container:\n raise RuntimeError(\"Sandbox not initialized\")\n\n try:\n resolved_path = self._safe_resolve_path(path)\n parent_dir = os.path.dirname(resolved_path)\n\n # Create parent directory\n if parent_dir:\n await self.run_command(f\"mkdir -p {parent_dir}\")\n\n # Prepare file data\n tar_stream = await self._create_tar_stream(\n os.path.basename(path), content.encode(\"utf-8\")\n )\n\n # Write file\n await asyncio.to_thread(\n self.container.put_archive, parent_dir or \"/\", tar_stream\n )\n\n except Exception as e:\n raise RuntimeError(f\"Failed to write file: {e}\")\n\n def _safe_resolve_path(self, path: str) -> str:\n \"\"\"Safely resolves container path, preventing path traversal.\n\n Args:\n path: Original path.\n\n Returns:\n Resolved absolute path.\n\n Raises:\n ValueError: If path contains potentially unsafe patterns.\n \"\"\"\n # Check for path traversal attempts\n if \"..\" in path.split(\"/\"):\n raise ValueError(\"Path contains potentially unsafe patterns\")\n\n resolved = (\n os.path.join(self.config.work_dir, path)\n if not os.path.isabs(path)\n else path\n )\n return resolved\n\n async def copy_from(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file from the container.\n\n Args:\n src_path: Source file path (container).\n dst_path: Destination path (host).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n # Ensure destination file's parent directory exists\n parent_dir = os.path.dirname(dst_path)\n if parent_dir:\n os.makedirs(parent_dir, exist_ok=True)\n\n # Get file stream\n resolved_src = self._safe_resolve_path(src_path)\n stream, stat = await asyncio.to_thread(\n self.container.get_archive, resolved_src\n )\n\n # Create temporary directory to extract file\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Write stream to temporary file\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with open(tar_path, \"wb\") as f:\n for chunk in stream:\n f.write(chunk)\n\n # Extract file\n with tarfile.open(tar_path) as tar:\n members = tar.getmembers()\n if not members:\n raise FileNotFoundError(f\"Source file is empty: {src_path}\")\n\n # If destination is a directory, we should preserve relative path structure\n if os.path.isdir(dst_path):\n tar.extractall(dst_path)\n else:\n # If destination is a file, we only extract the source file's content\n if len(members) > 1:\n raise RuntimeError(\n f\"Source path is a directory but destination is a file: {src_path}\"\n )\n\n with open(dst_path, \"wb\") as dst:\n src_file = tar.extractfile(members[0])\n if src_file is None:\n raise RuntimeError(\n f\"Failed to extract file: {src_path}\"\n )\n dst.write(src_file.read())\n\n except docker.errors.NotFound:\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n async def copy_to(self, src_path: str, dst_path: str) -> None:\n \"\"\"Copies a file to the container.\n\n Args:\n src_path: Source file path (host).\n dst_path: Destination path (container).\n\n Raises:\n FileNotFoundError: If source file does not exist.\n RuntimeError: If copy operation fails.\n \"\"\"\n try:\n if not os.path.exists(src_path):\n raise FileNotFoundError(f\"Source file not found: {src_path}\")\n\n # Create destination directory in container\n resolved_dst = self._safe_resolve_path(dst_path)\n container_dir = os.path.dirname(resolved_dst)\n if container_dir:\n await self.run_command(f\"mkdir -p {container_dir}\")\n\n # Create tar file to upload\n with tempfile.TemporaryDirectory() as tmp_dir:\n tar_path = os.path.join(tmp_dir, \"temp.tar\")\n with tarfile.open(tar_path, \"w\") as tar:\n # Handle directory source path\n if os.path.isdir(src_path):\n os.path.basename(src_path.rstrip(\"/\"))\n for root, _, files in os.walk(src_path):\n for file in files:\n file_path = os.path.join(root, file)\n arcname = os.path.join(\n os.path.basename(dst_path),\n os.path.relpath(file_path, src_path),\n )\n tar.add(file_path, arcname=arcname)\n else:\n # Add single file to tar\n tar.add(src_path, arcname=os.path.basename(dst_path))\n\n # Read tar file content\n with open(tar_path, \"rb\") as f:\n data = f.read()\n\n # Upload to container\n await asyncio.to_thread(\n self.container.put_archive,\n os.path.dirname(resolved_dst) or \"/\",\n data,\n )\n\n # Verify file was created successfully\n try:\n await self.run_command(f\"test -e {resolved_dst}\")\n except Exception:\n raise RuntimeError(f\"Failed to verify file creation: {dst_path}\")\n\n except FileNotFoundError:\n raise\n except Exception as e:\n raise RuntimeError(f\"Failed to copy file: {e}\")\n\n @staticmethod\n async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO:\n \"\"\"Creates a tar file stream.\n\n Args:\n name: Filename.\n content: File content.\n\n Returns:\n Tar file stream.\n \"\"\"\n tar_stream = io.BytesIO()\n with tarfile.open(fileobj=tar_stream, mode=\"w\") as tar:\n tarinfo = tarfile.TarInfo(name=name)\n tarinfo.size = len(content)\n tar.addfile(tarinfo, io.BytesIO(content))\n tar_stream.seek(0)\n return tar_stream\n\n @staticmethod\n async def _read_from_tar(tar_stream) -> bytes:\n \"\"\"Reads file content from a tar stream.\n\n Args:\n tar_stream: Tar file stream.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If read operation fails.\n \"\"\"\n with tempfile.NamedTemporaryFile() as tmp:\n for chunk in tar_stream:\n tmp.write(chunk)\n tmp.seek(0)\n\n with tarfile.open(fileobj=tmp) as tar:\n member = tar.next()\n if not member:\n raise RuntimeError(\"Empty tar archive\")\n\n file_content = tar.extractfile(member)\n if not file_content:\n raise RuntimeError(\"Failed to extract file content\")\n\n return file_content.read()\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up sandbox resources.\"\"\"\n errors = []\n try:\n if self.terminal:\n try:\n await self.terminal.close()\n except Exception as e:\n errors.append(f\"Terminal cleanup error: {e}\")\n finally:\n self.terminal = None\n\n if self.container:\n try:\n await asyncio.to_thread(self.container.stop, timeout=5)\n except Exception as e:\n errors.append(f\"Container stop error: {e}\")\n\n try:\n await asyncio.to_thread(self.container.remove, force=True)\n except Exception as e:\n errors.append(f\"Container remove error: {e}\")\n finally:\n self.container = None\n\n except Exception as e:\n errors.append(f\"General cleanup error: {e}\")\n\n if errors:\n print(f\"Warning: Errors during cleanup: {', '.join(errors)}\")\n\n async def __aenter__(self) -> \"DockerSandbox\":\n \"\"\"Async context manager entry.\"\"\"\n return await self.create()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n"], ["/OpenManus/app/tool/mcp.py", "from contextlib import AsyncExitStack\nfrom typing import Dict, List, Optional\n\nfrom mcp import ClientSession, StdioServerParameters\nfrom mcp.client.sse import sse_client\nfrom mcp.client.stdio import stdio_client\nfrom mcp.types import ListToolsResult, TextContent\n\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolResult\nfrom app.tool.tool_collection import ToolCollection\n\n\nclass MCPClientTool(BaseTool):\n \"\"\"Represents a tool proxy that can be called on the MCP server from the client side.\"\"\"\n\n session: Optional[ClientSession] = None\n server_id: str = \"\" # Add server identifier\n original_name: str = \"\"\n\n async def execute(self, **kwargs) -> ToolResult:\n \"\"\"Execute the tool by making a remote call to the MCP server.\"\"\"\n if not self.session:\n return ToolResult(error=\"Not connected to MCP server\")\n\n try:\n logger.info(f\"Executing tool: {self.original_name}\")\n result = await self.session.call_tool(self.original_name, kwargs)\n content_str = \", \".join(\n item.text for item in result.content if isinstance(item, TextContent)\n )\n return ToolResult(output=content_str or \"No output returned.\")\n except Exception as e:\n return ToolResult(error=f\"Error executing tool: {str(e)}\")\n\n\nclass MCPClients(ToolCollection):\n \"\"\"\n A collection of tools that connects to multiple MCP servers and manages available tools through the Model Context Protocol.\n \"\"\"\n\n sessions: Dict[str, ClientSession] = {}\n exit_stacks: Dict[str, AsyncExitStack] = {}\n description: str = \"MCP client tools for server interaction\"\n\n def __init__(self):\n super().__init__() # Initialize with empty tools list\n self.name = \"mcp\" # Keep name for backward compatibility\n\n async def connect_sse(self, server_url: str, server_id: str = \"\") -> None:\n \"\"\"Connect to an MCP server using SSE transport.\"\"\"\n if not server_url:\n raise ValueError(\"Server URL is required.\")\n\n server_id = server_id or server_url\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n streams_context = sse_client(url=server_url)\n streams = await exit_stack.enter_async_context(streams_context)\n session = await exit_stack.enter_async_context(ClientSession(*streams))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def connect_stdio(\n self, command: str, args: List[str], server_id: str = \"\"\n ) -> None:\n \"\"\"Connect to an MCP server using stdio transport.\"\"\"\n if not command:\n raise ValueError(\"Server command is required.\")\n\n server_id = server_id or command\n\n # Always ensure clean disconnection before new connection\n if server_id in self.sessions:\n await self.disconnect(server_id)\n\n exit_stack = AsyncExitStack()\n self.exit_stacks[server_id] = exit_stack\n\n server_params = StdioServerParameters(command=command, args=args)\n stdio_transport = await exit_stack.enter_async_context(\n stdio_client(server_params)\n )\n read, write = stdio_transport\n session = await exit_stack.enter_async_context(ClientSession(read, write))\n self.sessions[server_id] = session\n\n await self._initialize_and_list_tools(server_id)\n\n async def _initialize_and_list_tools(self, server_id: str) -> None:\n \"\"\"Initialize session and populate tool map.\"\"\"\n session = self.sessions.get(server_id)\n if not session:\n raise RuntimeError(f\"Session not initialized for server {server_id}\")\n\n await session.initialize()\n response = await session.list_tools()\n\n # Create proper tool objects for each server tool\n for tool in response.tools:\n original_name = tool.name\n tool_name = f\"mcp_{server_id}_{original_name}\"\n tool_name = self._sanitize_tool_name(tool_name)\n\n server_tool = MCPClientTool(\n name=tool_name,\n description=tool.description,\n parameters=tool.inputSchema,\n session=session,\n server_id=server_id,\n original_name=original_name,\n )\n self.tool_map[tool_name] = server_tool\n\n # Update tools tuple\n self.tools = tuple(self.tool_map.values())\n logger.info(\n f\"Connected to server {server_id} with tools: {[tool.name for tool in response.tools]}\"\n )\n\n def _sanitize_tool_name(self, name: str) -> str:\n \"\"\"Sanitize tool name to match MCPClientTool requirements.\"\"\"\n import re\n\n # Replace invalid characters with underscores\n sanitized = re.sub(r\"[^a-zA-Z0-9_-]\", \"_\", name)\n\n # Remove consecutive underscores\n sanitized = re.sub(r\"_+\", \"_\", sanitized)\n\n # Remove leading/trailing underscores\n sanitized = sanitized.strip(\"_\")\n\n # Truncate to 64 characters if needed\n if len(sanitized) > 64:\n sanitized = sanitized[:64]\n\n return sanitized\n\n async def list_tools(self) -> ListToolsResult:\n \"\"\"List all available tools.\"\"\"\n tools_result = ListToolsResult(tools=[])\n for session in self.sessions.values():\n response = await session.list_tools()\n tools_result.tools += response.tools\n return tools_result\n\n async def disconnect(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from a specific MCP server or all servers if no server_id provided.\"\"\"\n if server_id:\n if server_id in self.sessions:\n try:\n exit_stack = self.exit_stacks.get(server_id)\n\n # Close the exit stack which will handle session cleanup\n if exit_stack:\n try:\n await exit_stack.aclose()\n except RuntimeError as e:\n if \"cancel scope\" in str(e).lower():\n logger.warning(\n f\"Cancel scope error during disconnect from {server_id}, continuing with cleanup: {e}\"\n )\n else:\n raise\n\n # Clean up references\n self.sessions.pop(server_id, None)\n self.exit_stacks.pop(server_id, None)\n\n # Remove tools associated with this server\n self.tool_map = {\n k: v\n for k, v in self.tool_map.items()\n if v.server_id != server_id\n }\n self.tools = tuple(self.tool_map.values())\n logger.info(f\"Disconnected from MCP server {server_id}\")\n except Exception as e:\n logger.error(f\"Error disconnecting from server {server_id}: {e}\")\n else:\n # Disconnect from all servers in a deterministic order\n for sid in sorted(list(self.sessions.keys())):\n await self.disconnect(sid)\n self.tool_map = {}\n self.tools = tuple()\n logger.info(\"Disconnected from all MCP servers\")\n"], ["/OpenManus/app/agent/base.py", "from abc import ABC, abstractmethod\nfrom contextlib import asynccontextmanager\nfrom typing import List, Optional\n\nfrom pydantic import BaseModel, Field, model_validator\n\nfrom app.llm import LLM\nfrom app.logger import logger\nfrom app.sandbox.client import SANDBOX_CLIENT\nfrom app.schema import ROLE_TYPE, AgentState, Memory, Message\n\n\nclass BaseAgent(BaseModel, ABC):\n \"\"\"Abstract base class for managing agent state and execution.\n\n Provides foundational functionality for state transitions, memory management,\n and a step-based execution loop. Subclasses must implement the `step` method.\n \"\"\"\n\n # Core attributes\n name: str = Field(..., description=\"Unique name of the agent\")\n description: Optional[str] = Field(None, description=\"Optional agent description\")\n\n # Prompts\n system_prompt: Optional[str] = Field(\n None, description=\"System-level instruction prompt\"\n )\n next_step_prompt: Optional[str] = Field(\n None, description=\"Prompt for determining next action\"\n )\n\n # Dependencies\n llm: LLM = Field(default_factory=LLM, description=\"Language model instance\")\n memory: Memory = Field(default_factory=Memory, description=\"Agent's memory store\")\n state: AgentState = Field(\n default=AgentState.IDLE, description=\"Current agent state\"\n )\n\n # Execution control\n max_steps: int = Field(default=10, description=\"Maximum steps before termination\")\n current_step: int = Field(default=0, description=\"Current step in execution\")\n\n duplicate_threshold: int = 2\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\" # Allow extra fields for flexibility in subclasses\n\n @model_validator(mode=\"after\")\n def initialize_agent(self) -> \"BaseAgent\":\n \"\"\"Initialize agent with default settings if not provided.\"\"\"\n if self.llm is None or not isinstance(self.llm, LLM):\n self.llm = LLM(config_name=self.name.lower())\n if not isinstance(self.memory, Memory):\n self.memory = Memory()\n return self\n\n @asynccontextmanager\n async def state_context(self, new_state: AgentState):\n \"\"\"Context manager for safe agent state transitions.\n\n Args:\n new_state: The state to transition to during the context.\n\n Yields:\n None: Allows execution within the new state.\n\n Raises:\n ValueError: If the new_state is invalid.\n \"\"\"\n if not isinstance(new_state, AgentState):\n raise ValueError(f\"Invalid state: {new_state}\")\n\n previous_state = self.state\n self.state = new_state\n try:\n yield\n except Exception as e:\n self.state = AgentState.ERROR # Transition to ERROR on failure\n raise e\n finally:\n self.state = previous_state # Revert to previous state\n\n def update_memory(\n self,\n role: ROLE_TYPE, # type: ignore\n content: str,\n base64_image: Optional[str] = None,\n **kwargs,\n ) -> None:\n \"\"\"Add a message to the agent's memory.\n\n Args:\n role: The role of the message sender (user, system, assistant, tool).\n content: The message content.\n base64_image: Optional base64 encoded image.\n **kwargs: Additional arguments (e.g., tool_call_id for tool messages).\n\n Raises:\n ValueError: If the role is unsupported.\n \"\"\"\n message_map = {\n \"user\": Message.user_message,\n \"system\": Message.system_message,\n \"assistant\": Message.assistant_message,\n \"tool\": lambda content, **kw: Message.tool_message(content, **kw),\n }\n\n if role not in message_map:\n raise ValueError(f\"Unsupported message role: {role}\")\n\n # Create message with appropriate parameters based on role\n kwargs = {\"base64_image\": base64_image, **(kwargs if role == \"tool\" else {})}\n self.memory.add_message(message_map[role](content, **kwargs))\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Execute the agent's main loop asynchronously.\n\n Args:\n request: Optional initial user request to process.\n\n Returns:\n A string summarizing the execution results.\n\n Raises:\n RuntimeError: If the agent is not in IDLE state at start.\n \"\"\"\n if self.state != AgentState.IDLE:\n raise RuntimeError(f\"Cannot run agent from state: {self.state}\")\n\n if request:\n self.update_memory(\"user\", request)\n\n results: List[str] = []\n async with self.state_context(AgentState.RUNNING):\n while (\n self.current_step < self.max_steps and self.state != AgentState.FINISHED\n ):\n self.current_step += 1\n logger.info(f\"Executing step {self.current_step}/{self.max_steps}\")\n step_result = await self.step()\n\n # Check for stuck state\n if self.is_stuck():\n self.handle_stuck_state()\n\n results.append(f\"Step {self.current_step}: {step_result}\")\n\n if self.current_step >= self.max_steps:\n self.current_step = 0\n self.state = AgentState.IDLE\n results.append(f\"Terminated: Reached max steps ({self.max_steps})\")\n await SANDBOX_CLIENT.cleanup()\n return \"\\n\".join(results) if results else \"No steps executed\"\n\n @abstractmethod\n async def step(self) -> str:\n \"\"\"Execute a single step in the agent's workflow.\n\n Must be implemented by subclasses to define specific behavior.\n \"\"\"\n\n def handle_stuck_state(self):\n \"\"\"Handle stuck state by adding a prompt to change strategy\"\"\"\n stuck_prompt = \"\\\n Observed duplicate responses. Consider new strategies and avoid repeating ineffective paths already attempted.\"\n self.next_step_prompt = f\"{stuck_prompt}\\n{self.next_step_prompt}\"\n logger.warning(f\"Agent detected stuck state. Added prompt: {stuck_prompt}\")\n\n def is_stuck(self) -> bool:\n \"\"\"Check if the agent is stuck in a loop by detecting duplicate content\"\"\"\n if len(self.memory.messages) < 2:\n return False\n\n last_message = self.memory.messages[-1]\n if not last_message.content:\n return False\n\n # Count identical content occurrences\n duplicate_count = sum(\n 1\n for msg in reversed(self.memory.messages[:-1])\n if msg.role == \"assistant\" and msg.content == last_message.content\n )\n\n return duplicate_count >= self.duplicate_threshold\n\n @property\n def messages(self) -> List[Message]:\n \"\"\"Retrieve a list of messages from the agent's memory.\"\"\"\n return self.memory.messages\n\n @messages.setter\n def messages(self, value: List[Message]):\n \"\"\"Set the list of messages in the agent's memory.\"\"\"\n self.memory.messages = value\n"], ["/OpenManus/app/sandbox/core/terminal.py", "\"\"\"\nAsynchronous Docker Terminal\n\nThis module provides asynchronous terminal functionality for Docker containers,\nallowing interactive command execution with timeout control.\n\"\"\"\n\nimport asyncio\nimport re\nimport socket\nfrom typing import Dict, Optional, Tuple, Union\n\nimport docker\nfrom docker import APIClient\nfrom docker.errors import APIError\nfrom docker.models.containers import Container\n\n\nclass DockerSession:\n def __init__(self, container_id: str) -> None:\n \"\"\"Initializes a Docker session.\n\n Args:\n container_id: ID of the Docker container.\n \"\"\"\n self.api = APIClient()\n self.container_id = container_id\n self.exec_id = None\n self.socket = None\n\n async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None:\n \"\"\"Creates an interactive session with the container.\n\n Args:\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n\n Raises:\n RuntimeError: If socket connection fails.\n \"\"\"\n startup_command = [\n \"bash\",\n \"-c\",\n f\"cd {working_dir} && \"\n \"PROMPT_COMMAND='' \"\n \"PS1='$ ' \"\n \"exec bash --norc --noprofile\",\n ]\n\n exec_data = self.api.exec_create(\n self.container_id,\n startup_command,\n stdin=True,\n tty=True,\n stdout=True,\n stderr=True,\n privileged=True,\n user=\"root\",\n environment={**env_vars, \"TERM\": \"dumb\", \"PS1\": \"$ \", \"PROMPT_COMMAND\": \"\"},\n )\n self.exec_id = exec_data[\"Id\"]\n\n socket_data = self.api.exec_start(\n self.exec_id, socket=True, tty=True, stream=True, demux=True\n )\n\n if hasattr(socket_data, \"_sock\"):\n self.socket = socket_data._sock\n self.socket.setblocking(False)\n else:\n raise RuntimeError(\"Failed to get socket connection\")\n\n await self._read_until_prompt()\n\n async def close(self) -> None:\n \"\"\"Cleans up session resources.\n\n 1. Sends exit command\n 2. Closes socket connection\n 3. Checks and cleans up exec instance\n \"\"\"\n try:\n if self.socket:\n # Send exit command to close bash session\n try:\n self.socket.sendall(b\"exit\\n\")\n # Allow time for command execution\n await asyncio.sleep(0.1)\n except:\n pass # Ignore sending errors, continue cleanup\n\n # Close socket connection\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except:\n pass # Some platforms may not support shutdown\n\n self.socket.close()\n self.socket = None\n\n if self.exec_id:\n try:\n # Check exec instance status\n exec_inspect = self.api.exec_inspect(self.exec_id)\n if exec_inspect.get(\"Running\", False):\n # If still running, wait for it to complete\n await asyncio.sleep(0.5)\n except:\n pass # Ignore inspection errors, continue cleanup\n\n self.exec_id = None\n\n except Exception as e:\n # Log error but don't raise, ensure cleanup continues\n print(f\"Warning: Error during session cleanup: {e}\")\n\n async def _read_until_prompt(self) -> str:\n \"\"\"Reads output until prompt is found.\n\n Returns:\n String containing output up to the prompt.\n\n Raises:\n socket.error: If socket communication fails.\n \"\"\"\n buffer = b\"\"\n while b\"$ \" not in buffer:\n try:\n chunk = self.socket.recv(4096)\n if chunk:\n buffer += chunk\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n return buffer.decode(\"utf-8\")\n\n async def execute(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes a command and returns cleaned output.\n\n Args:\n command: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string with prompt markers removed.\n\n Raises:\n RuntimeError: If session not initialized or execution fails.\n TimeoutError: If command execution exceeds timeout.\n \"\"\"\n if not self.socket:\n raise RuntimeError(\"Session not initialized\")\n\n try:\n # Sanitize command to prevent shell injection\n sanitized_command = self._sanitize_command(command)\n full_command = f\"{sanitized_command}\\necho $?\\n\"\n self.socket.sendall(full_command.encode())\n\n async def read_output() -> str:\n buffer = b\"\"\n result_lines = []\n command_sent = False\n\n while True:\n try:\n chunk = self.socket.recv(4096)\n if not chunk:\n break\n\n buffer += chunk\n lines = buffer.split(b\"\\n\")\n\n buffer = lines[-1]\n lines = lines[:-1]\n\n for line in lines:\n line = line.rstrip(b\"\\r\")\n\n if not command_sent:\n command_sent = True\n continue\n\n if line.strip() == b\"echo $?\" or line.strip().isdigit():\n continue\n\n if line.strip():\n result_lines.append(line)\n\n if buffer.endswith(b\"$ \"):\n break\n\n except socket.error as e:\n if e.errno == socket.EWOULDBLOCK:\n await asyncio.sleep(0.1)\n continue\n raise\n\n output = b\"\\n\".join(result_lines).decode(\"utf-8\")\n output = re.sub(r\"\\n\\$ echo \\$\\$?.*$\", \"\", output)\n\n return output\n\n if timeout:\n result = await asyncio.wait_for(read_output(), timeout)\n else:\n result = await read_output()\n\n return result.strip()\n\n except asyncio.TimeoutError:\n raise TimeoutError(f\"Command execution timed out after {timeout} seconds\")\n except Exception as e:\n raise RuntimeError(f\"Failed to execute command: {e}\")\n\n def _sanitize_command(self, command: str) -> str:\n \"\"\"Sanitizes the command string to prevent shell injection.\n\n Args:\n command: Raw command string.\n\n Returns:\n Sanitized command string.\n\n Raises:\n ValueError: If command contains potentially dangerous patterns.\n \"\"\"\n\n # Additional checks for specific risky commands\n risky_commands = [\n \"rm -rf /\",\n \"rm -rf /*\",\n \"mkfs\",\n \"dd if=/dev/zero\",\n \":(){:|:&};:\",\n \"chmod -R 777 /\",\n \"chown -R\",\n ]\n\n for risky in risky_commands:\n if risky in command.lower():\n raise ValueError(\n f\"Command contains potentially dangerous operation: {risky}\"\n )\n\n return command\n\n\nclass AsyncDockerizedTerminal:\n def __init__(\n self,\n container: Union[str, Container],\n working_dir: str = \"/workspace\",\n env_vars: Optional[Dict[str, str]] = None,\n default_timeout: int = 60,\n ) -> None:\n \"\"\"Initializes an asynchronous terminal for Docker containers.\n\n Args:\n container: Docker container ID or Container object.\n working_dir: Working directory inside the container.\n env_vars: Environment variables to set.\n default_timeout: Default command execution timeout in seconds.\n \"\"\"\n self.client = docker.from_env()\n self.container = (\n container\n if isinstance(container, Container)\n else self.client.containers.get(container)\n )\n self.working_dir = working_dir\n self.env_vars = env_vars or {}\n self.default_timeout = default_timeout\n self.session = None\n\n async def init(self) -> None:\n \"\"\"Initializes the terminal environment.\n\n Ensures working directory exists and creates an interactive session.\n\n Raises:\n RuntimeError: If initialization fails.\n \"\"\"\n await self._ensure_workdir()\n\n self.session = DockerSession(self.container.id)\n await self.session.create(self.working_dir, self.env_vars)\n\n async def _ensure_workdir(self) -> None:\n \"\"\"Ensures working directory exists in container.\n\n Raises:\n RuntimeError: If directory creation fails.\n \"\"\"\n try:\n await self._exec_simple(f\"mkdir -p {self.working_dir}\")\n except APIError as e:\n raise RuntimeError(f\"Failed to create working directory: {e}\")\n\n async def _exec_simple(self, cmd: str) -> Tuple[int, str]:\n \"\"\"Executes a simple command using Docker's exec_run.\n\n Args:\n cmd: Command to execute.\n\n Returns:\n Tuple of (exit_code, output).\n \"\"\"\n result = await asyncio.to_thread(\n self.container.exec_run, cmd, environment=self.env_vars\n )\n return result.exit_code, result.output.decode(\"utf-8\")\n\n async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs a command in the container with timeout.\n\n Args:\n cmd: Shell command to execute.\n timeout: Maximum execution time in seconds.\n\n Returns:\n Command output as string.\n\n Raises:\n RuntimeError: If terminal not initialized.\n \"\"\"\n if not self.session:\n raise RuntimeError(\"Terminal not initialized\")\n\n return await self.session.execute(cmd, timeout=timeout or self.default_timeout)\n\n async def close(self) -> None:\n \"\"\"Closes the terminal session.\"\"\"\n if self.session:\n await self.session.close()\n\n async def __aenter__(self) -> \"AsyncDockerizedTerminal\":\n \"\"\"Async context manager entry.\"\"\"\n await self.init()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.close()\n"], ["/OpenManus/app/tool/bash.py", "import asyncio\nimport os\nfrom typing import Optional\n\nfrom app.exceptions import ToolError\nfrom app.tool.base import BaseTool, CLIResult\n\n\n_BASH_DESCRIPTION = \"\"\"Execute a bash command in the terminal.\n* Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`.\n* Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process.\n* Timeout: If a command execution result says \"Command timed out. Sending SIGINT to the process\", the assistant should retry running the command in the background.\n\"\"\"\n\n\nclass _BashSession:\n \"\"\"A session of a bash shell.\"\"\"\n\n _started: bool\n _process: asyncio.subprocess.Process\n\n command: str = \"/bin/bash\"\n _output_delay: float = 0.2 # seconds\n _timeout: float = 120.0 # seconds\n _sentinel: str = \"<>\"\n\n def __init__(self):\n self._started = False\n self._timed_out = False\n\n async def start(self):\n if self._started:\n return\n\n self._process = await asyncio.create_subprocess_shell(\n self.command,\n preexec_fn=os.setsid,\n shell=True,\n bufsize=0,\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n self._started = True\n\n def stop(self):\n \"\"\"Terminate the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return\n self._process.terminate()\n\n async def run(self, command: str):\n \"\"\"Execute a command in the bash shell.\"\"\"\n if not self._started:\n raise ToolError(\"Session has not started.\")\n if self._process.returncode is not None:\n return CLIResult(\n system=\"tool must be restarted\",\n error=f\"bash has exited with returncode {self._process.returncode}\",\n )\n if self._timed_out:\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n )\n\n # we know these are not None because we created the process with PIPEs\n assert self._process.stdin\n assert self._process.stdout\n assert self._process.stderr\n\n # send command to the process\n self._process.stdin.write(\n command.encode() + f\"; echo '{self._sentinel}'\\n\".encode()\n )\n await self._process.stdin.drain()\n\n # read output from the process, until the sentinel is found\n try:\n async with asyncio.timeout(self._timeout):\n while True:\n await asyncio.sleep(self._output_delay)\n # if we read directly from stdout/stderr, it will wait forever for\n # EOF. use the StreamReader buffer directly instead.\n output = (\n self._process.stdout._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if self._sentinel in output:\n # strip the sentinel and break\n output = output[: output.index(self._sentinel)]\n break\n except asyncio.TimeoutError:\n self._timed_out = True\n raise ToolError(\n f\"timed out: bash has not returned in {self._timeout} seconds and must be restarted\",\n ) from None\n\n if output.endswith(\"\\n\"):\n output = output[:-1]\n\n error = (\n self._process.stderr._buffer.decode()\n ) # pyright: ignore[reportAttributeAccessIssue]\n if error.endswith(\"\\n\"):\n error = error[:-1]\n\n # clear the buffers so that the next output can be read correctly\n self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue]\n\n return CLIResult(output=output, error=error)\n\n\nclass Bash(BaseTool):\n \"\"\"A tool for executing bash commands\"\"\"\n\n name: str = \"bash\"\n description: str = _BASH_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"type\": \"string\",\n \"description\": \"The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.\",\n },\n },\n \"required\": [\"command\"],\n }\n\n _session: Optional[_BashSession] = None\n\n async def execute(\n self, command: str | None = None, restart: bool = False, **kwargs\n ) -> CLIResult:\n if restart:\n if self._session:\n self._session.stop()\n self._session = _BashSession()\n await self._session.start()\n\n return CLIResult(system=\"tool has been restarted.\")\n\n if self._session is None:\n self._session = _BashSession()\n await self._session.start()\n\n if command is not None:\n return await self._session.run(command)\n\n raise ToolError(\"no command provided.\")\n\n\nif __name__ == \"__main__\":\n bash = Bash()\n rst = asyncio.run(bash.execute(\"ls -l\"))\n print(rst)\n"], ["/OpenManus/protocol/a2a/app/agent.py", "import httpx\nfrom typing import Any, Dict, AsyncIterable, Literal, List, ClassVar\nfrom pydantic import BaseModel\nfrom app.agent.manus import Manus\n\n\nclass ResponseFormat(BaseModel):\n \"\"\"Respond to the user in this format.\"\"\"\n\n status: Literal[\"input_required\", \"completed\", \"error\"] = \"input_required\"\n message: str\n\n\nclass A2AManus(Manus):\n\n async def invoke(self, query, sessionId) -> str:\n config = {\"configurable\": {\"thread_id\": sessionId}}\n response = await self.run(query)\n return self.get_agent_response(config, response)\n\n async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:\n \"\"\"Streaming is not supported by Manus.\"\"\"\n raise NotImplementedError(\"Streaming is not supported by Manus yet.\")\n\n def get_agent_response(self, config, agent_response):\n return {\n \"is_task_complete\": True,\n \"require_user_input\": False,\n \"content\": agent_response,\n }\n\n SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = [\"text\", \"text/plain\"]\n"], ["/OpenManus/app/agent/manus.py", "from typing import Dict, List, Optional\n\nfrom pydantic import Field, model_validator\n\nfrom app.agent.browser import BrowserContextHelper\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.logger import logger\nfrom app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.ask_human import AskHuman\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.mcp import MCPClients, MCPClientTool\nfrom app.tool.python_execute import PythonExecute\nfrom app.tool.str_replace_editor import StrReplaceEditor\n\n\nclass Manus(ToolCallAgent):\n \"\"\"A versatile general-purpose agent with support for both local and MCP tools.\"\"\"\n\n name: str = \"Manus\"\n description: str = \"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 10000\n max_steps: int = 20\n\n # MCP clients for remote tool access\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n PythonExecute(),\n BrowserUseTool(),\n StrReplaceEditor(),\n AskHuman(),\n Terminate(),\n )\n )\n\n special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])\n browser_context_helper: Optional[BrowserContextHelper] = None\n\n # Track connected MCP servers\n connected_servers: Dict[str, str] = Field(\n default_factory=dict\n ) # server_id -> url/command\n _initialized: bool = False\n\n @model_validator(mode=\"after\")\n def initialize_helper(self) -> \"Manus\":\n \"\"\"Initialize basic components synchronously.\"\"\"\n self.browser_context_helper = BrowserContextHelper(self)\n return self\n\n @classmethod\n async def create(cls, **kwargs) -> \"Manus\":\n \"\"\"Factory method to create and properly initialize a Manus instance.\"\"\"\n instance = cls(**kwargs)\n await instance.initialize_mcp_servers()\n instance._initialized = True\n return instance\n\n async def initialize_mcp_servers(self) -> None:\n \"\"\"Initialize connections to configured MCP servers.\"\"\"\n for server_id, server_config in config.mcp_config.servers.items():\n try:\n if server_config.type == \"sse\":\n if server_config.url:\n await self.connect_mcp_server(server_config.url, server_id)\n logger.info(\n f\"Connected to MCP server {server_id} at {server_config.url}\"\n )\n elif server_config.type == \"stdio\":\n if server_config.command:\n await self.connect_mcp_server(\n server_config.command,\n server_id,\n use_stdio=True,\n stdio_args=server_config.args,\n )\n logger.info(\n f\"Connected to MCP server {server_id} using command {server_config.command}\"\n )\n except Exception as e:\n logger.error(f\"Failed to connect to MCP server {server_id}: {e}\")\n\n async def connect_mcp_server(\n self,\n server_url: str,\n server_id: str = \"\",\n use_stdio: bool = False,\n stdio_args: List[str] = None,\n ) -> None:\n \"\"\"Connect to an MCP server and add its tools.\"\"\"\n if use_stdio:\n await self.mcp_clients.connect_stdio(\n server_url, stdio_args or [], server_id\n )\n self.connected_servers[server_id or server_url] = server_url\n else:\n await self.mcp_clients.connect_sse(server_url, server_id)\n self.connected_servers[server_id or server_url] = server_url\n\n # Update available tools with only the new tools from this server\n new_tools = [\n tool for tool in self.mcp_clients.tools if tool.server_id == server_id\n ]\n self.available_tools.add_tools(*new_tools)\n\n async def disconnect_mcp_server(self, server_id: str = \"\") -> None:\n \"\"\"Disconnect from an MCP server and remove its tools.\"\"\"\n await self.mcp_clients.disconnect(server_id)\n if server_id:\n self.connected_servers.pop(server_id, None)\n else:\n self.connected_servers.clear()\n\n # Rebuild available tools without the disconnected server's tools\n base_tools = [\n tool\n for tool in self.available_tools.tools\n if not isinstance(tool, MCPClientTool)\n ]\n self.available_tools = ToolCollection(*base_tools)\n self.available_tools.add_tools(*self.mcp_clients.tools)\n\n async def cleanup(self):\n \"\"\"Clean up Manus agent resources.\"\"\"\n if self.browser_context_helper:\n await self.browser_context_helper.cleanup_browser()\n # Disconnect from all MCP servers only if we were initialized\n if self._initialized:\n await self.disconnect_mcp_server()\n self._initialized = False\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next actions with appropriate context.\"\"\"\n if not self._initialized:\n await self.initialize_mcp_servers()\n self._initialized = True\n\n original_prompt = self.next_step_prompt\n recent_messages = self.memory.messages[-3:] if self.memory.messages else []\n browser_in_use = any(\n tc.function.name == BrowserUseTool().name\n for msg in recent_messages\n if msg.tool_calls\n for tc in msg.tool_calls\n )\n\n if browser_in_use:\n self.next_step_prompt = (\n await self.browser_context_helper.format_next_step_prompt()\n )\n\n result = await super().think()\n\n # Restore original prompt\n self.next_step_prompt = original_prompt\n\n return result\n"], ["/OpenManus/app/tool/search/bing_search.py", "from typing import List, Optional, Tuple\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom app.logger import logger\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nABSTRACT_MAX_LENGTH = 300\n\nUSER_AGENTS = [\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/49.0.2623.108 Chrome/49.0.2623.108 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; pt-BR) AppleWebKit/533.3 (KHTML, like Gecko) QtWeb Internet Browser/3.7 http://www.QtWeb.net\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.2 (KHTML, like Gecko) ChromePlus/4.0.222.3 Chrome/4.0.222.3 Safari/532.2\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.4pre) Gecko/20070404 K-Ninja/2.1.3\",\n \"Mozilla/5.0 (Future Star Technologies Corp.; Star-Blade OS; x86_64; U; en-US) iNet Browser 4.7\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080414 Firefox/2.0.0.13 Pogo/2.0.0.13.6866\",\n]\n\nHEADERS = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"User-Agent\": USER_AGENTS[0],\n \"Referer\": \"https://www.bing.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n}\n\nBING_HOST_URL = \"https://www.bing.com\"\nBING_SEARCH_URL = \"https://www.bing.com/search?q=\"\n\n\nclass BingSearchEngine(WebSearchEngine):\n session: Optional[requests.Session] = None\n\n def __init__(self, **data):\n \"\"\"Initialize the BingSearch tool with a requests session.\"\"\"\n super().__init__(**data)\n self.session = requests.Session()\n self.session.headers.update(HEADERS)\n\n def _search_sync(self, query: str, num_results: int = 10) -> List[SearchItem]:\n \"\"\"\n Synchronous Bing search implementation to retrieve search results.\n\n Args:\n query (str): The search query to submit to Bing.\n num_results (int, optional): Maximum number of results to return. Defaults to 10.\n\n Returns:\n List[SearchItem]: A list of search items with title, URL, and description.\n \"\"\"\n if not query:\n return []\n\n list_result = []\n first = 1\n next_url = BING_SEARCH_URL + query\n\n while len(list_result) < num_results:\n data, next_url = self._parse_html(\n next_url, rank_start=len(list_result), first=first\n )\n if data:\n list_result.extend(data)\n if not next_url:\n break\n first += 10\n\n return list_result[:num_results]\n\n def _parse_html(\n self, url: str, rank_start: int = 0, first: int = 1\n ) -> Tuple[List[SearchItem], str]:\n \"\"\"\n Parse Bing search result HTML to extract search results and the next page URL.\n\n Returns:\n tuple: (List of SearchItem objects, next page URL or None)\n \"\"\"\n try:\n res = self.session.get(url=url)\n res.encoding = \"utf-8\"\n root = BeautifulSoup(res.text, \"lxml\")\n\n list_data = []\n ol_results = root.find(\"ol\", id=\"b_results\")\n if not ol_results:\n return [], None\n\n for li in ol_results.find_all(\"li\", class_=\"b_algo\"):\n title = \"\"\n url = \"\"\n abstract = \"\"\n try:\n h2 = li.find(\"h2\")\n if h2:\n title = h2.text.strip()\n url = h2.a[\"href\"].strip()\n\n p = li.find(\"p\")\n if p:\n abstract = p.text.strip()\n\n if ABSTRACT_MAX_LENGTH and len(abstract) > ABSTRACT_MAX_LENGTH:\n abstract = abstract[:ABSTRACT_MAX_LENGTH]\n\n rank_start += 1\n\n # Create a SearchItem object\n list_data.append(\n SearchItem(\n title=title or f\"Bing Result {rank_start}\",\n url=url,\n description=abstract,\n )\n )\n except Exception:\n continue\n\n next_btn = root.find(\"a\", title=\"Next page\")\n if not next_btn:\n return list_data, None\n\n next_url = BING_HOST_URL + next_btn[\"href\"]\n return list_data, next_url\n except Exception as e:\n logger.warning(f\"Error parsing HTML: {e}\")\n return [], None\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Bing search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n return self._search_sync(query, num_results=num_results)\n"], ["/OpenManus/app/tool/str_replace_editor.py", "\"\"\"File and directory manipulation tool with sandbox support.\"\"\"\n\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Any, DefaultDict, List, Literal, Optional, get_args\n\nfrom app.config import config\nfrom app.exceptions import ToolError\nfrom app.tool import BaseTool\nfrom app.tool.base import CLIResult, ToolResult\nfrom app.tool.file_operators import (\n FileOperator,\n LocalFileOperator,\n PathLike,\n SandboxFileOperator,\n)\n\n\nCommand = Literal[\n \"view\",\n \"create\",\n \"str_replace\",\n \"insert\",\n \"undo_edit\",\n]\n\n# Constants\nSNIPPET_LINES: int = 4\nMAX_RESPONSE_LEN: int = 16000\nTRUNCATED_MESSAGE: str = (\n \"To save on context only part of this file has been shown to you. \"\n \"You should retry this tool after you have searched inside the file with `grep -n` \"\n \"in order to find the line numbers of what you are looking for.\"\n)\n\n# Tool description\n_STR_REPLACE_EDITOR_DESCRIPTION = \"\"\"Custom editing tool for viewing, creating and editing files\n* State is persistent across command calls and discussions with the user\n* If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep\n* The `create` command cannot be used if the specified `path` already exists as a file\n* If a `command` generates a long output, it will be truncated and marked with ``\n* The `undo_edit` command will revert the last edit made to the file at `path`\n\nNotes for using the `str_replace` command:\n* The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces!\n* If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique\n* The `new_str` parameter should contain the edited lines that should replace the `old_str`\n\"\"\"\n\n\ndef maybe_truncate(\n content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN\n) -> str:\n \"\"\"Truncate content and append a notice if content exceeds the specified length.\"\"\"\n if not truncate_after or len(content) <= truncate_after:\n return content\n return content[:truncate_after] + TRUNCATED_MESSAGE\n\n\nclass StrReplaceEditor(BaseTool):\n \"\"\"A tool for viewing, creating, and editing files with sandbox support.\"\"\"\n\n name: str = \"str_replace_editor\"\n description: str = _STR_REPLACE_EDITOR_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"command\": {\n \"description\": \"The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.\",\n \"enum\": [\"view\", \"create\", \"str_replace\", \"insert\", \"undo_edit\"],\n \"type\": \"string\",\n },\n \"path\": {\n \"description\": \"Absolute path to file or directory.\",\n \"type\": \"string\",\n },\n \"file_text\": {\n \"description\": \"Required parameter of `create` command, with the content of the file to be created.\",\n \"type\": \"string\",\n },\n \"old_str\": {\n \"description\": \"Required parameter of `str_replace` command containing the string in `path` to replace.\",\n \"type\": \"string\",\n },\n \"new_str\": {\n \"description\": \"Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.\",\n \"type\": \"string\",\n },\n \"insert_line\": {\n \"description\": \"Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.\",\n \"type\": \"integer\",\n },\n \"view_range\": {\n \"description\": \"Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.\",\n \"items\": {\"type\": \"integer\"},\n \"type\": \"array\",\n },\n },\n \"required\": [\"command\", \"path\"],\n }\n _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list)\n _local_operator: LocalFileOperator = LocalFileOperator()\n _sandbox_operator: SandboxFileOperator = SandboxFileOperator()\n\n # def _get_operator(self, use_sandbox: bool) -> FileOperator:\n def _get_operator(self) -> FileOperator:\n \"\"\"Get the appropriate file operator based on execution mode.\"\"\"\n return (\n self._sandbox_operator\n if config.sandbox.use_sandbox\n else self._local_operator\n )\n\n async def execute(\n self,\n *,\n command: Command,\n path: str,\n file_text: str | None = None,\n view_range: list[int] | None = None,\n old_str: str | None = None,\n new_str: str | None = None,\n insert_line: int | None = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"Execute a file operation command.\"\"\"\n # Get the appropriate file operator\n operator = self._get_operator()\n\n # Validate path and command combination\n await self.validate_path(command, Path(path), operator)\n\n # Execute the appropriate command\n if command == \"view\":\n result = await self.view(path, view_range, operator)\n elif command == \"create\":\n if file_text is None:\n raise ToolError(\"Parameter `file_text` is required for command: create\")\n await operator.write_file(path, file_text)\n self._file_history[path].append(file_text)\n result = ToolResult(output=f\"File created successfully at: {path}\")\n elif command == \"str_replace\":\n if old_str is None:\n raise ToolError(\n \"Parameter `old_str` is required for command: str_replace\"\n )\n result = await self.str_replace(path, old_str, new_str, operator)\n elif command == \"insert\":\n if insert_line is None:\n raise ToolError(\n \"Parameter `insert_line` is required for command: insert\"\n )\n if new_str is None:\n raise ToolError(\"Parameter `new_str` is required for command: insert\")\n result = await self.insert(path, insert_line, new_str, operator)\n elif command == \"undo_edit\":\n result = await self.undo_edit(path, operator)\n else:\n # This should be caught by type checking, but we include it for safety\n raise ToolError(\n f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {\", \".join(get_args(Command))}'\n )\n\n return str(result)\n\n async def validate_path(\n self, command: str, path: Path, operator: FileOperator\n ) -> None:\n \"\"\"Validate path and command combination based on execution environment.\"\"\"\n # Check if path is absolute\n if not path.is_absolute():\n raise ToolError(f\"The path {path} is not an absolute path\")\n\n # Only check if path exists for non-create commands\n if command != \"create\":\n if not await operator.exists(path):\n raise ToolError(\n f\"The path {path} does not exist. Please provide a valid path.\"\n )\n\n # Check if path is a directory\n is_dir = await operator.is_directory(path)\n if is_dir and command != \"view\":\n raise ToolError(\n f\"The path {path} is a directory and only the `view` command can be used on directories\"\n )\n\n # Check if file exists for create command\n elif command == \"create\":\n exists = await operator.exists(path)\n if exists:\n raise ToolError(\n f\"File already exists at: {path}. Cannot overwrite files using command `create`.\"\n )\n\n async def view(\n self,\n path: PathLike,\n view_range: Optional[List[int]] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Display file or directory content.\"\"\"\n # Determine if path is a directory\n is_dir = await operator.is_directory(path)\n\n if is_dir:\n # Directory handling\n if view_range:\n raise ToolError(\n \"The `view_range` parameter is not allowed when `path` points to a directory.\"\n )\n\n return await self._view_directory(path, operator)\n else:\n # File handling\n return await self._view_file(path, operator, view_range)\n\n @staticmethod\n async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult:\n \"\"\"Display directory contents.\"\"\"\n find_cmd = f\"find {path} -maxdepth 2 -not -path '*/\\\\.*'\"\n\n # Execute command using the operator\n returncode, stdout, stderr = await operator.run_command(find_cmd)\n\n if not stderr:\n stdout = (\n f\"Here's the files and directories up to 2 levels deep in {path}, \"\n f\"excluding hidden items:\\n{stdout}\\n\"\n )\n\n return CLIResult(output=stdout, error=stderr)\n\n async def _view_file(\n self,\n path: PathLike,\n operator: FileOperator,\n view_range: Optional[List[int]] = None,\n ) -> CLIResult:\n \"\"\"Display file content, optionally within a specified line range.\"\"\"\n # Read file content\n file_content = await operator.read_file(path)\n init_line = 1\n\n # Apply view range if specified\n if view_range:\n if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range):\n raise ToolError(\n \"Invalid `view_range`. It should be a list of two integers.\"\n )\n\n file_lines = file_content.split(\"\\n\")\n n_lines_file = len(file_lines)\n init_line, final_line = view_range\n\n # Validate view range\n if init_line < 1 or init_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be \"\n f\"within the range of lines of the file: {[1, n_lines_file]}\"\n )\n if final_line > n_lines_file:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"smaller than the number of lines in the file: `{n_lines_file}`\"\n )\n if final_line != -1 and final_line < init_line:\n raise ToolError(\n f\"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be \"\n f\"larger or equal than its first `{init_line}`\"\n )\n\n # Apply range\n if final_line == -1:\n file_content = \"\\n\".join(file_lines[init_line - 1 :])\n else:\n file_content = \"\\n\".join(file_lines[init_line - 1 : final_line])\n\n # Format and return result\n return CLIResult(\n output=self._make_output(file_content, str(path), init_line=init_line)\n )\n\n async def str_replace(\n self,\n path: PathLike,\n old_str: str,\n new_str: Optional[str] = None,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Replace a unique string in a file with a new string.\"\"\"\n # Read file content and expand tabs\n file_content = (await operator.read_file(path)).expandtabs()\n old_str = old_str.expandtabs()\n new_str = new_str.expandtabs() if new_str is not None else \"\"\n\n # Check if old_str is unique in the file\n occurrences = file_content.count(old_str)\n if occurrences == 0:\n raise ToolError(\n f\"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}.\"\n )\n elif occurrences > 1:\n # Find line numbers of occurrences\n file_content_lines = file_content.split(\"\\n\")\n lines = [\n idx + 1\n for idx, line in enumerate(file_content_lines)\n if old_str in line\n ]\n raise ToolError(\n f\"No replacement was performed. Multiple occurrences of old_str `{old_str}` \"\n f\"in lines {lines}. Please ensure it is unique\"\n )\n\n # Replace old_str with new_str\n new_file_content = file_content.replace(old_str, new_str)\n\n # Write the new content to the file\n await operator.write_file(path, new_file_content)\n\n # Save the original content to history\n self._file_history[path].append(file_content)\n\n # Create a snippet of the edited section\n replacement_line = file_content.split(old_str)[0].count(\"\\n\")\n start_line = max(0, replacement_line - SNIPPET_LINES)\n end_line = replacement_line + SNIPPET_LINES + new_str.count(\"\\n\")\n snippet = \"\\n\".join(new_file_content.split(\"\\n\")[start_line : end_line + 1])\n\n # Prepare the success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet, f\"a snippet of {path}\", start_line + 1\n )\n success_msg += \"Review the changes and make sure they are as expected. Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def insert(\n self,\n path: PathLike,\n insert_line: int,\n new_str: str,\n operator: FileOperator = None,\n ) -> CLIResult:\n \"\"\"Insert text at a specific line in a file.\"\"\"\n # Read and prepare content\n file_text = (await operator.read_file(path)).expandtabs()\n new_str = new_str.expandtabs()\n file_text_lines = file_text.split(\"\\n\")\n n_lines_file = len(file_text_lines)\n\n # Validate insert_line\n if insert_line < 0 or insert_line > n_lines_file:\n raise ToolError(\n f\"Invalid `insert_line` parameter: {insert_line}. It should be within \"\n f\"the range of lines of the file: {[0, n_lines_file]}\"\n )\n\n # Perform insertion\n new_str_lines = new_str.split(\"\\n\")\n new_file_text_lines = (\n file_text_lines[:insert_line]\n + new_str_lines\n + file_text_lines[insert_line:]\n )\n\n # Create a snippet for preview\n snippet_lines = (\n file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]\n + new_str_lines\n + file_text_lines[insert_line : insert_line + SNIPPET_LINES]\n )\n\n # Join lines and write to file\n new_file_text = \"\\n\".join(new_file_text_lines)\n snippet = \"\\n\".join(snippet_lines)\n\n await operator.write_file(path, new_file_text)\n self._file_history[path].append(file_text)\n\n # Prepare success message\n success_msg = f\"The file {path} has been edited. \"\n success_msg += self._make_output(\n snippet,\n \"a snippet of the edited file\",\n max(1, insert_line - SNIPPET_LINES + 1),\n )\n success_msg += \"Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary.\"\n\n return CLIResult(output=success_msg)\n\n async def undo_edit(\n self, path: PathLike, operator: FileOperator = None\n ) -> CLIResult:\n \"\"\"Revert the last edit made to a file.\"\"\"\n if not self._file_history[path]:\n raise ToolError(f\"No edit history found for {path}.\")\n\n old_text = self._file_history[path].pop()\n await operator.write_file(path, old_text)\n\n return CLIResult(\n output=f\"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}\"\n )\n\n def _make_output(\n self,\n file_content: str,\n file_descriptor: str,\n init_line: int = 1,\n expand_tabs: bool = True,\n ) -> str:\n \"\"\"Format file content for display with line numbers.\"\"\"\n file_content = maybe_truncate(file_content)\n if expand_tabs:\n file_content = file_content.expandtabs()\n\n # Add line numbers to each line\n file_content = \"\\n\".join(\n [\n f\"{i + init_line:6}\\t{line}\"\n for i, line in enumerate(file_content.split(\"\\n\"))\n ]\n )\n\n return (\n f\"Here's the result of running `cat -n` on {file_descriptor}:\\n\"\n + file_content\n + \"\\n\"\n )\n"], ["/OpenManus/protocol/a2a/app/agent_executor.py", "import logging\n\nfrom a2a.server.agent_execution import AgentExecutor, RequestContext\nfrom a2a.server.events import Event, EventQueue\nfrom a2a.server.tasks import TaskUpdater\nfrom a2a.types import (\n InvalidParamsError,\n Part,\n Task,\n TextPart,\n UnsupportedOperationError,\n)\nfrom a2a.utils import (\n completed_task,\n new_artifact,\n)\nfrom .agent import A2AManus\nfrom a2a.utils.errors import ServerError\nfrom typing import Callable, Awaitable\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass ManusExecutor(AgentExecutor):\n \"\"\"Currency Conversion AgentExecutor Example.\"\"\"\n\n def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):\n self.agent_factory = agent_factory\n\n async def execute(\n self,\n context: RequestContext,\n event_queue: EventQueue,\n ) -> None:\n error = self._validate_request(context)\n if error:\n raise ServerError(error=InvalidParamsError())\n\n query = context.get_user_input()\n try:\n self.agent = await self.agent_factory()\n result = await self.agent.invoke(query, context.context_id)\n print(f\"Final Result ===> {result}\")\n except Exception as e:\n print(\"Error invoking agent: %s\", e)\n raise ServerError(error=ValueError(f\"Error invoking agent: {e}\")) from e\n parts = [\n Part(\n root=TextPart(\n text=(\n result[\"content\"]\n if result[\"content\"]\n else \"failed to generate response\"\n )\n ),\n )\n ]\n event_queue.enqueue_event(\n completed_task(\n context.task_id,\n context.context_id,\n [new_artifact(parts, f\"task_{context.task_id}\")],\n [context.message],\n )\n )\n\n def _validate_request(self, context: RequestContext) -> bool:\n return False\n\n async def cancel(\n self, request: RequestContext, event_queue: EventQueue\n ) -> Task | None:\n raise ServerError(error=UnsupportedOperationError())\n"], ["/OpenManus/app/agent/mcp.py", "from typing import Any, Dict, List, Optional, Tuple\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.logger import logger\nfrom app.prompt.mcp import MULTIMEDIA_RESPONSE_PROMPT, NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.schema import AgentState, Message\nfrom app.tool.base import ToolResult\nfrom app.tool.mcp import MCPClients\n\n\nclass MCPAgent(ToolCallAgent):\n \"\"\"Agent for interacting with MCP (Model Context Protocol) servers.\n\n This agent connects to an MCP server using either SSE or stdio transport\n and makes the server's tools available through the agent's tool interface.\n \"\"\"\n\n name: str = \"mcp_agent\"\n description: str = \"An agent that connects to an MCP server and uses its tools.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n # Initialize MCP tool collection\n mcp_clients: MCPClients = Field(default_factory=MCPClients)\n available_tools: MCPClients = None # Will be set in initialize()\n\n max_steps: int = 20\n connection_type: str = \"stdio\" # \"stdio\" or \"sse\"\n\n # Track tool schemas to detect changes\n tool_schemas: Dict[str, Dict[str, Any]] = Field(default_factory=dict)\n _refresh_tools_interval: int = 5 # Refresh tools every N steps\n\n # Special tool names that should trigger termination\n special_tool_names: List[str] = Field(default_factory=lambda: [\"terminate\"])\n\n async def initialize(\n self,\n connection_type: Optional[str] = None,\n server_url: Optional[str] = None,\n command: Optional[str] = None,\n args: Optional[List[str]] = None,\n ) -> None:\n \"\"\"Initialize the MCP connection.\n\n Args:\n connection_type: Type of connection to use (\"stdio\" or \"sse\")\n server_url: URL of the MCP server (for SSE connection)\n command: Command to run (for stdio connection)\n args: Arguments for the command (for stdio connection)\n \"\"\"\n if connection_type:\n self.connection_type = connection_type\n\n # Connect to the MCP server based on connection type\n if self.connection_type == \"sse\":\n if not server_url:\n raise ValueError(\"Server URL is required for SSE connection\")\n await self.mcp_clients.connect_sse(server_url=server_url)\n elif self.connection_type == \"stdio\":\n if not command:\n raise ValueError(\"Command is required for stdio connection\")\n await self.mcp_clients.connect_stdio(command=command, args=args or [])\n else:\n raise ValueError(f\"Unsupported connection type: {self.connection_type}\")\n\n # Set available_tools to our MCP instance\n self.available_tools = self.mcp_clients\n\n # Store initial tool schemas\n await self._refresh_tools()\n\n # Add system message about available tools\n tool_names = list(self.mcp_clients.tool_map.keys())\n tools_info = \", \".join(tool_names)\n\n # Add system prompt and available tools information\n self.memory.add_message(\n Message.system_message(\n f\"{self.system_prompt}\\n\\nAvailable MCP tools: {tools_info}\"\n )\n )\n\n async def _refresh_tools(self) -> Tuple[List[str], List[str]]:\n \"\"\"Refresh the list of available tools from the MCP server.\n\n Returns:\n A tuple of (added_tools, removed_tools)\n \"\"\"\n if not self.mcp_clients.sessions:\n return [], []\n\n # Get current tool schemas directly from the server\n response = await self.mcp_clients.list_tools()\n current_tools = {tool.name: tool.inputSchema for tool in response.tools}\n\n # Determine added, removed, and changed tools\n current_names = set(current_tools.keys())\n previous_names = set(self.tool_schemas.keys())\n\n added_tools = list(current_names - previous_names)\n removed_tools = list(previous_names - current_names)\n\n # Check for schema changes in existing tools\n changed_tools = []\n for name in current_names.intersection(previous_names):\n if current_tools[name] != self.tool_schemas.get(name):\n changed_tools.append(name)\n\n # Update stored schemas\n self.tool_schemas = current_tools\n\n # Log and notify about changes\n if added_tools:\n logger.info(f\"Added MCP tools: {added_tools}\")\n self.memory.add_message(\n Message.system_message(f\"New tools available: {', '.join(added_tools)}\")\n )\n if removed_tools:\n logger.info(f\"Removed MCP tools: {removed_tools}\")\n self.memory.add_message(\n Message.system_message(\n f\"Tools no longer available: {', '.join(removed_tools)}\"\n )\n )\n if changed_tools:\n logger.info(f\"Changed MCP tools: {changed_tools}\")\n\n return added_tools, removed_tools\n\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action.\"\"\"\n # Check MCP session and tools availability\n if not self.mcp_clients.sessions or not self.mcp_clients.tool_map:\n logger.info(\"MCP service is no longer available, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Refresh tools periodically\n if self.current_step % self._refresh_tools_interval == 0:\n await self._refresh_tools()\n # All tools removed indicates shutdown\n if not self.mcp_clients.tool_map:\n logger.info(\"MCP service has shut down, ending interaction\")\n self.state = AgentState.FINISHED\n return False\n\n # Use the parent class's think method\n return await super().think()\n\n async def _handle_special_tool(self, name: str, result: Any, **kwargs) -> None:\n \"\"\"Handle special tool execution and state changes\"\"\"\n # First process with parent handler\n await super()._handle_special_tool(name, result, **kwargs)\n\n # Handle multimedia responses\n if isinstance(result, ToolResult) and result.base64_image:\n self.memory.add_message(\n Message.system_message(\n MULTIMEDIA_RESPONSE_PROMPT.format(tool_name=name)\n )\n )\n\n def _should_finish_execution(self, name: str, **kwargs) -> bool:\n \"\"\"Determine if tool execution should finish the agent\"\"\"\n # Terminate if the tool name is 'terminate'\n return name.lower() == \"terminate\"\n\n async def cleanup(self) -> None:\n \"\"\"Clean up MCP connection when done.\"\"\"\n if self.mcp_clients.sessions:\n await self.mcp_clients.disconnect()\n logger.info(\"MCP connection closed\")\n\n async def run(self, request: Optional[str] = None) -> str:\n \"\"\"Run the agent with cleanup when done.\"\"\"\n try:\n result = await super().run(request)\n return result\n finally:\n # Ensure cleanup happens even if there's an error\n await self.cleanup()\n"], ["/OpenManus/app/tool/python_execute.py", "import multiprocessing\nimport sys\nfrom io import StringIO\nfrom typing import Dict\n\nfrom app.tool.base import BaseTool\n\n\nclass PythonExecute(BaseTool):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n \"description\": \"The Python code to execute.\",\n },\n },\n \"required\": [\"code\"],\n }\n\n def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None:\n original_stdout = sys.stdout\n try:\n output_buffer = StringIO()\n sys.stdout = output_buffer\n exec(code, safe_globals, safe_globals)\n result_dict[\"observation\"] = output_buffer.getvalue()\n result_dict[\"success\"] = True\n except Exception as e:\n result_dict[\"observation\"] = str(e)\n result_dict[\"success\"] = False\n finally:\n sys.stdout = original_stdout\n\n async def execute(\n self,\n code: str,\n timeout: int = 5,\n ) -> Dict:\n \"\"\"\n Executes the provided Python code with a timeout.\n\n Args:\n code (str): The Python code to execute.\n timeout (int): Execution timeout in seconds.\n\n Returns:\n Dict: Contains 'output' with execution output or error message and 'success' status.\n \"\"\"\n\n with multiprocessing.Manager() as manager:\n result = manager.dict({\"observation\": \"\", \"success\": False})\n if isinstance(__builtins__, dict):\n safe_globals = {\"__builtins__\": __builtins__}\n else:\n safe_globals = {\"__builtins__\": __builtins__.__dict__.copy()}\n proc = multiprocessing.Process(\n target=self._run_code, args=(code, result, safe_globals)\n )\n proc.start()\n proc.join(timeout)\n\n # timeout process\n if proc.is_alive():\n proc.terminate()\n proc.join(1)\n return {\n \"observation\": f\"Execution timeout after {timeout} seconds\",\n \"success\": False,\n }\n return dict(result)\n"], ["/OpenManus/app/tool/search/duckduckgo_search.py", "from typing import List\n\nfrom duckduckgo_search import DDGS\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass DuckDuckGoSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n DuckDuckGo search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = DDGS().text(query, max_results=num_results)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\", url=item, description=None\n )\n )\n elif isinstance(item, dict):\n # Extract data from the dictionary\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=item.get(\"href\", \"\"),\n description=item.get(\"body\", None),\n )\n )\n else:\n # Try to extract attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"DuckDuckGo Result {i + 1}\"),\n url=getattr(item, \"href\", \"\"),\n description=getattr(item, \"body\", None),\n )\n )\n except Exception:\n # Fallback\n results.append(\n SearchItem(\n title=f\"DuckDuckGo Result {i + 1}\",\n url=str(item),\n description=None,\n )\n )\n\n return results\n"], ["/OpenManus/app/sandbox/core/manager.py", "import asyncio\nimport uuid\nfrom contextlib import asynccontextmanager\nfrom typing import Dict, Optional, Set\n\nimport docker\nfrom docker.errors import APIError, ImageNotFound\n\nfrom app.config import SandboxSettings\nfrom app.logger import logger\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxManager:\n \"\"\"Docker sandbox manager.\n\n Manages multiple DockerSandbox instances lifecycle including creation,\n monitoring, and cleanup. Provides concurrent access control and automatic\n cleanup mechanisms for sandbox resources.\n\n Attributes:\n max_sandboxes: Maximum allowed number of sandboxes.\n idle_timeout: Sandbox idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n _sandboxes: Active sandbox instance mapping.\n _last_used: Last used time record for sandboxes.\n \"\"\"\n\n def __init__(\n self,\n max_sandboxes: int = 100,\n idle_timeout: int = 3600,\n cleanup_interval: int = 300,\n ):\n \"\"\"Initializes sandbox manager.\n\n Args:\n max_sandboxes: Maximum sandbox count limit.\n idle_timeout: Idle timeout in seconds.\n cleanup_interval: Cleanup check interval in seconds.\n \"\"\"\n self.max_sandboxes = max_sandboxes\n self.idle_timeout = idle_timeout\n self.cleanup_interval = cleanup_interval\n\n # Docker client\n self._client = docker.from_env()\n\n # Resource mappings\n self._sandboxes: Dict[str, DockerSandbox] = {}\n self._last_used: Dict[str, float] = {}\n\n # Concurrency control\n self._locks: Dict[str, asyncio.Lock] = {}\n self._global_lock = asyncio.Lock()\n self._active_operations: Set[str] = set()\n\n # Cleanup task\n self._cleanup_task: Optional[asyncio.Task] = None\n self._is_shutting_down = False\n\n # Start automatic cleanup\n self.start_cleanup_task()\n\n async def ensure_image(self, image: str) -> bool:\n \"\"\"Ensures Docker image is available.\n\n Args:\n image: Image name.\n\n Returns:\n bool: Whether image is available.\n \"\"\"\n try:\n self._client.images.get(image)\n return True\n except ImageNotFound:\n try:\n logger.info(f\"Pulling image {image}...\")\n await asyncio.get_event_loop().run_in_executor(\n None, self._client.images.pull, image\n )\n return True\n except (APIError, Exception) as e:\n logger.error(f\"Failed to pull image {image}: {e}\")\n return False\n\n @asynccontextmanager\n async def sandbox_operation(self, sandbox_id: str):\n \"\"\"Context manager for sandbox operations.\n\n Provides concurrency control and usage time updates.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Raises:\n KeyError: If sandbox not found.\n \"\"\"\n if sandbox_id not in self._locks:\n self._locks[sandbox_id] = asyncio.Lock()\n\n async with self._locks[sandbox_id]:\n if sandbox_id not in self._sandboxes:\n raise KeyError(f\"Sandbox {sandbox_id} not found\")\n\n self._active_operations.add(sandbox_id)\n try:\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n yield self._sandboxes[sandbox_id]\n finally:\n self._active_operations.remove(sandbox_id)\n\n async def create_sandbox(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> str:\n \"\"\"Creates a new sandbox instance.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mapping configuration.\n\n Returns:\n str: Sandbox ID.\n\n Raises:\n RuntimeError: If max sandbox count reached or creation fails.\n \"\"\"\n async with self._global_lock:\n if len(self._sandboxes) >= self.max_sandboxes:\n raise RuntimeError(\n f\"Maximum number of sandboxes ({self.max_sandboxes}) reached\"\n )\n\n config = config or SandboxSettings()\n if not await self.ensure_image(config.image):\n raise RuntimeError(f\"Failed to ensure Docker image: {config.image}\")\n\n sandbox_id = str(uuid.uuid4())\n try:\n sandbox = DockerSandbox(config, volume_bindings)\n await sandbox.create()\n\n self._sandboxes[sandbox_id] = sandbox\n self._last_used[sandbox_id] = asyncio.get_event_loop().time()\n self._locks[sandbox_id] = asyncio.Lock()\n\n logger.info(f\"Created sandbox {sandbox_id}\")\n return sandbox_id\n\n except Exception as e:\n logger.error(f\"Failed to create sandbox: {e}\")\n if sandbox_id in self._sandboxes:\n await self.delete_sandbox(sandbox_id)\n raise RuntimeError(f\"Failed to create sandbox: {e}\")\n\n async def get_sandbox(self, sandbox_id: str) -> DockerSandbox:\n \"\"\"Gets a sandbox instance.\n\n Args:\n sandbox_id: Sandbox ID.\n\n Returns:\n DockerSandbox: Sandbox instance.\n\n Raises:\n KeyError: If sandbox does not exist.\n \"\"\"\n async with self.sandbox_operation(sandbox_id) as sandbox:\n return sandbox\n\n def start_cleanup_task(self) -> None:\n \"\"\"Starts automatic cleanup task.\"\"\"\n\n async def cleanup_loop():\n while not self._is_shutting_down:\n try:\n await self._cleanup_idle_sandboxes()\n except Exception as e:\n logger.error(f\"Error in cleanup loop: {e}\")\n await asyncio.sleep(self.cleanup_interval)\n\n self._cleanup_task = asyncio.create_task(cleanup_loop())\n\n async def _cleanup_idle_sandboxes(self) -> None:\n \"\"\"Cleans up idle sandboxes.\"\"\"\n current_time = asyncio.get_event_loop().time()\n to_cleanup = []\n\n async with self._global_lock:\n for sandbox_id, last_used in self._last_used.items():\n if (\n sandbox_id not in self._active_operations\n and current_time - last_used > self.idle_timeout\n ):\n to_cleanup.append(sandbox_id)\n\n for sandbox_id in to_cleanup:\n try:\n await self.delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Error cleaning up sandbox {sandbox_id}: {e}\")\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up all resources.\"\"\"\n logger.info(\"Starting manager cleanup...\")\n self._is_shutting_down = True\n\n # Cancel cleanup task\n if self._cleanup_task:\n self._cleanup_task.cancel()\n try:\n await asyncio.wait_for(self._cleanup_task, timeout=1.0)\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n\n # Get all sandbox IDs to clean up\n async with self._global_lock:\n sandbox_ids = list(self._sandboxes.keys())\n\n # Concurrently clean up all sandboxes\n cleanup_tasks = []\n for sandbox_id in sandbox_ids:\n task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id))\n cleanup_tasks.append(task)\n\n if cleanup_tasks:\n # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting\n try:\n await asyncio.wait(cleanup_tasks, timeout=30.0)\n except asyncio.TimeoutError:\n logger.error(\"Sandbox cleanup timed out\")\n\n # Clean up remaining references\n self._sandboxes.clear()\n self._last_used.clear()\n self._locks.clear()\n self._active_operations.clear()\n\n logger.info(\"Manager cleanup completed\")\n\n async def _safe_delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Safely deletes a single sandbox.\n\n Args:\n sandbox_id: Sandbox ID to delete.\n \"\"\"\n try:\n if sandbox_id in self._active_operations:\n logger.warning(\n f\"Sandbox {sandbox_id} has active operations, waiting for completion\"\n )\n for _ in range(10): # Wait at most 10 times\n await asyncio.sleep(0.5)\n if sandbox_id not in self._active_operations:\n break\n else:\n logger.warning(\n f\"Timeout waiting for sandbox {sandbox_id} operations to complete\"\n )\n\n # Get reference to sandbox object\n sandbox = self._sandboxes.get(sandbox_id)\n if sandbox:\n await sandbox.cleanup()\n\n # Remove sandbox record from manager\n async with self._global_lock:\n self._sandboxes.pop(sandbox_id, None)\n self._last_used.pop(sandbox_id, None)\n self._locks.pop(sandbox_id, None)\n logger.info(f\"Deleted sandbox {sandbox_id}\")\n except Exception as e:\n logger.error(f\"Error during cleanup of sandbox {sandbox_id}: {e}\")\n\n async def delete_sandbox(self, sandbox_id: str) -> None:\n \"\"\"Deletes specified sandbox.\n\n Args:\n sandbox_id: Sandbox ID.\n \"\"\"\n if sandbox_id not in self._sandboxes:\n return\n\n try:\n await self._safe_delete_sandbox(sandbox_id)\n except Exception as e:\n logger.error(f\"Failed to delete sandbox {sandbox_id}: {e}\")\n\n async def __aenter__(self) -> \"SandboxManager\":\n \"\"\"Async context manager entry.\"\"\"\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:\n \"\"\"Async context manager exit.\"\"\"\n await self.cleanup()\n\n def get_stats(self) -> Dict:\n \"\"\"Gets manager statistics.\n\n Returns:\n Dict: Statistics information.\n \"\"\"\n return {\n \"total_sandboxes\": len(self._sandboxes),\n \"active_operations\": len(self._active_operations),\n \"max_sandboxes\": self.max_sandboxes,\n \"idle_timeout\": self.idle_timeout,\n \"cleanup_interval\": self.cleanup_interval,\n \"is_shutting_down\": self._is_shutting_down,\n }\n"], ["/OpenManus/app/tool/search/baidu_search.py", "from typing import List\n\nfrom baidusearch.baidusearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass BaiduSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Baidu search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results)\n\n # Convert raw results to SearchItem format\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n SearchItem(title=f\"Baidu Result {i+1}\", url=item, description=None)\n )\n elif isinstance(item, dict):\n # If it's a dictionary with details\n results.append(\n SearchItem(\n title=item.get(\"title\", f\"Baidu Result {i+1}\"),\n url=item.get(\"url\", \"\"),\n description=item.get(\"abstract\", None),\n )\n )\n else:\n # Try to get attributes directly\n try:\n results.append(\n SearchItem(\n title=getattr(item, \"title\", f\"Baidu Result {i+1}\"),\n url=getattr(item, \"url\", \"\"),\n description=getattr(item, \"abstract\", None),\n )\n )\n except Exception:\n # Fallback to a basic result\n results.append(\n SearchItem(\n title=f\"Baidu Result {i+1}\", url=str(item), description=None\n )\n )\n\n return results\n"], ["/OpenManus/app/tool/base.py", "from abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass BaseTool(ABC, BaseModel):\n name: str\n description: str\n parameters: Optional[dict] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n async def __call__(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n return await self.execute(**kwargs)\n\n @abstractmethod\n async def execute(self, **kwargs) -> Any:\n \"\"\"Execute the tool with given parameters.\"\"\"\n\n def to_param(self) -> Dict:\n \"\"\"Convert tool to function call format.\"\"\"\n return {\n \"type\": \"function\",\n \"function\": {\n \"name\": self.name,\n \"description\": self.description,\n \"parameters\": self.parameters,\n },\n }\n\n\nclass ToolResult(BaseModel):\n \"\"\"Represents the result of a tool execution.\"\"\"\n\n output: Any = Field(default=None)\n error: Optional[str] = Field(default=None)\n base64_image: Optional[str] = Field(default=None)\n system: Optional[str] = Field(default=None)\n\n class Config:\n arbitrary_types_allowed = True\n\n def __bool__(self):\n return any(getattr(self, field) for field in self.__fields__)\n\n def __add__(self, other: \"ToolResult\"):\n def combine_fields(\n field: Optional[str], other_field: Optional[str], concatenate: bool = True\n ):\n if field and other_field:\n if concatenate:\n return field + other_field\n raise ValueError(\"Cannot combine tool results\")\n return field or other_field\n\n return ToolResult(\n output=combine_fields(self.output, other.output),\n error=combine_fields(self.error, other.error),\n base64_image=combine_fields(self.base64_image, other.base64_image, False),\n system=combine_fields(self.system, other.system),\n )\n\n def __str__(self):\n return f\"Error: {self.error}\" if self.error else self.output\n\n def replace(self, **kwargs):\n \"\"\"Returns a new ToolResult with the given fields replaced.\"\"\"\n # return self.copy(update=kwargs)\n return type(self)(**{**self.dict(), **kwargs})\n\n\nclass CLIResult(ToolResult):\n \"\"\"A ToolResult that can be rendered as a CLI output.\"\"\"\n\n\nclass ToolFailure(ToolResult):\n \"\"\"A ToolResult that represents a failure.\"\"\"\n"], ["/OpenManus/protocol/a2a/app/main.py", "import httpx\nimport argparse\n\nfrom a2a.server.apps import A2AStarletteApplication\nfrom a2a.server.request_handlers import DefaultRequestHandler\nfrom a2a.server.tasks import InMemoryTaskStore, InMemoryPushNotifier\nfrom a2a.types import (\n AgentCapabilities,\n AgentCard,\n AgentSkill,\n)\n\nfrom .agent_executor import ManusExecutor\n\nfrom .agent import A2AManus\nfrom app.tool.browser_use_tool import _BROWSER_DESCRIPTION\nfrom app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION\nfrom app.tool.terminate import _TERMINATE_DESCRIPTION\nimport logging\nfrom dotenv import load_dotenv\nimport asyncio\nfrom typing import Optional\n\nload_dotenv()\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nasync def main(host: str = \"localhost\", port: int = 10000):\n \"\"\"Starts the Manus Agent server.\"\"\"\n try:\n capabilities = AgentCapabilities(streaming=False, pushNotifications=True)\n skills = [\n AgentSkill(\n id=\"Python Execute\",\n name=\"Python Execute Tool\",\n description=\"Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.\",\n tags=[\"Execute Python Code\"],\n examples=[\n \"Execute Python code:'''python \\n Print('Hello World') \\n '''\"\n ],\n ),\n AgentSkill(\n id=\"Browser use\",\n name=\"Browser use Tool\",\n description=_BROWSER_DESCRIPTION,\n tags=[\"Use Browser\"],\n examples=[\"go_to 'https://www.google.com'\"],\n ),\n AgentSkill(\n id=\"Replace String\",\n name=\"Str_replace Tool\",\n description=_STR_REPLACE_EDITOR_DESCRIPTION,\n tags=[\"Operate Files\"],\n examples=[\"Replace 'old' with 'new' in 'file.txt'\"],\n ),\n AgentSkill(\n id=\"Ask human\",\n name=\"Ask human Tool\",\n description=\"Use this tool to ask human for help.\",\n tags=[\"Ask human for help\"],\n examples=[\"Ask human: 'What time is it?'\"],\n ),\n AgentSkill(\n id=\"terminate\",\n name=\"terminate Tool\",\n description=_TERMINATE_DESCRIPTION,\n tags=[\"terminate task\"],\n examples=[\"terminate\"],\n ),\n # Add more skills as needed\n ]\n\n agent_card = AgentCard(\n name=\"Manus Agent\",\n description=\"A versatile agent that can solve various tasks using multiple tools including MCP-based tools\",\n url=f\"http://{host}:{port}/\",\n version=\"1.0.0\",\n defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,\n capabilities=capabilities,\n skills=skills,\n )\n\n httpx_client = httpx.AsyncClient()\n request_handler = DefaultRequestHandler(\n agent_executor=ManusExecutor(\n agent_factory=lambda: A2AManus.create(max_steps=3)\n ),\n task_store=InMemoryTaskStore(),\n push_notifier=InMemoryPushNotifier(httpx_client),\n )\n\n server = A2AStarletteApplication(\n agent_card=agent_card, http_handler=request_handler\n )\n\n logger.info(f\"Starting server on {host}:{port}\")\n return server.build()\n except Exception as e:\n logger.error(f\"An error occurred during server startup: {e}\")\n exit(1)\n\n\ndef run_server(host: Optional[str] = \"localhost\", port: Optional[int] = 10000):\n try:\n import uvicorn\n\n app = asyncio.run(main(host, port))\n config = uvicorn.Config(\n app=app, host=host, port=port, loop=\"asyncio\", proxy_headers=True\n )\n uvicorn.Server(config=config).run()\n logger.info(f\"Server started on {host}:{port}\")\n except Exception as e:\n logger.error(f\"An error occurred while starting the server: {e}\")\n\n\nif __name__ == \"__main__\":\n # Parse command line arguments for host and port, with default values\n parser = argparse.ArgumentParser(description=\"Start Manus Agent service\")\n parser.add_argument(\n \"--host\",\n type=str,\n default=\"localhost\",\n help=\"Server host address, default is localhost\",\n )\n parser.add_argument(\n \"--port\", type=int, default=10000, help=\"Server port, default is 10000\"\n )\n args = parser.parse_args()\n # Start the server with the specified or default host and port\n run_server(args.host, args.port)\n"], ["/OpenManus/run_mcp.py", "#!/usr/bin/env python\nimport argparse\nimport asyncio\nimport sys\n\nfrom app.agent.mcp import MCPAgent\nfrom app.config import config\nfrom app.logger import logger\n\n\nclass MCPRunner:\n \"\"\"Runner class for MCP Agent with proper path handling and configuration.\"\"\"\n\n def __init__(self):\n self.root_path = config.root_path\n self.server_reference = config.mcp_config.server_reference\n self.agent = MCPAgent()\n\n async def initialize(\n self,\n connection_type: str,\n server_url: str | None = None,\n ) -> None:\n \"\"\"Initialize the MCP agent with the appropriate connection.\"\"\"\n logger.info(f\"Initializing MCPAgent with {connection_type} connection...\")\n\n if connection_type == \"stdio\":\n await self.agent.initialize(\n connection_type=\"stdio\",\n command=sys.executable,\n args=[\"-m\", self.server_reference],\n )\n else: # sse\n await self.agent.initialize(connection_type=\"sse\", server_url=server_url)\n\n logger.info(f\"Connected to MCP server via {connection_type}\")\n\n async def run_interactive(self) -> None:\n \"\"\"Run the agent in interactive mode.\"\"\"\n print(\"\\nMCP Agent Interactive Mode (type 'exit' to quit)\\n\")\n while True:\n user_input = input(\"\\nEnter your request: \")\n if user_input.lower() in [\"exit\", \"quit\", \"q\"]:\n break\n response = await self.agent.run(user_input)\n print(f\"\\nAgent: {response}\")\n\n async def run_single_prompt(self, prompt: str) -> None:\n \"\"\"Run the agent with a single prompt.\"\"\"\n await self.agent.run(prompt)\n\n async def run_default(self) -> None:\n \"\"\"Run the agent in default mode.\"\"\"\n prompt = input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await self.agent.run(prompt)\n logger.info(\"Request processing completed.\")\n\n async def cleanup(self) -> None:\n \"\"\"Clean up agent resources.\"\"\"\n await self.agent.cleanup()\n logger.info(\"Session ended\")\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Run the MCP Agent\")\n parser.add_argument(\n \"--connection\",\n \"-c\",\n choices=[\"stdio\", \"sse\"],\n default=\"stdio\",\n help=\"Connection type: stdio or sse\",\n )\n parser.add_argument(\n \"--server-url\",\n default=\"http://127.0.0.1:8000/sse\",\n help=\"URL for SSE connection\",\n )\n parser.add_argument(\n \"--interactive\", \"-i\", action=\"store_true\", help=\"Run in interactive mode\"\n )\n parser.add_argument(\"--prompt\", \"-p\", help=\"Single prompt to execute and exit\")\n return parser.parse_args()\n\n\nasync def run_mcp() -> None:\n \"\"\"Main entry point for the MCP runner.\"\"\"\n args = parse_args()\n runner = MCPRunner()\n\n try:\n await runner.initialize(args.connection, args.server_url)\n\n if args.prompt:\n await runner.run_single_prompt(args.prompt)\n elif args.interactive:\n await runner.run_interactive()\n else:\n await runner.run_default()\n\n except KeyboardInterrupt:\n logger.info(\"Program interrupted by user\")\n except Exception as e:\n logger.error(f\"Error running MCPAgent: {str(e)}\", exc_info=True)\n sys.exit(1)\n finally:\n await runner.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_mcp())\n"], ["/OpenManus/app/tool/chart_visualization/python_execute.py", "from app.config import config\nfrom app.tool.python_execute import PythonExecute\n\n\nclass NormalPythonExecute(PythonExecute):\n \"\"\"A tool for executing Python code with timeout and safety restrictions.\"\"\"\n\n name: str = \"python_execute\"\n description: str = \"\"\"Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization.\"\"\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, data process / data report / others\",\n \"type\": \"string\",\n \"default\": \"process\",\n \"enum\": [\"process\", \"report\", \"others\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code to execute.\n# Note\n1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights.\n2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also\n3. Save any report / processed files / each analysis result in worksapce directory: {directory}\n4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization.\n5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also\"\"\".format(\n directory=config.workspace_root\n ),\n },\n },\n \"required\": [\"code\"],\n }\n\n async def execute(self, code: str, code_type: str | None = None, timeout=5):\n return await super().execute(code, timeout)\n"], ["/OpenManus/app/tool/tool_collection.py", "\"\"\"Collection classes for managing multiple tools.\"\"\"\nfrom typing import Any, Dict, List\n\nfrom app.exceptions import ToolError\nfrom app.logger import logger\nfrom app.tool.base import BaseTool, ToolFailure, ToolResult\n\n\nclass ToolCollection:\n \"\"\"A collection of defined tools.\"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(self, *tools: BaseTool):\n self.tools = tools\n self.tool_map = {tool.name: tool for tool in tools}\n\n def __iter__(self):\n return iter(self.tools)\n\n def to_params(self) -> List[Dict[str, Any]]:\n return [tool.to_param() for tool in self.tools]\n\n async def execute(\n self, *, name: str, tool_input: Dict[str, Any] = None\n ) -> ToolResult:\n tool = self.tool_map.get(name)\n if not tool:\n return ToolFailure(error=f\"Tool {name} is invalid\")\n try:\n result = await tool(**tool_input)\n return result\n except ToolError as e:\n return ToolFailure(error=e.message)\n\n async def execute_all(self) -> List[ToolResult]:\n \"\"\"Execute all tools in the collection sequentially.\"\"\"\n results = []\n for tool in self.tools:\n try:\n result = await tool()\n results.append(result)\n except ToolError as e:\n results.append(ToolFailure(error=e.message))\n return results\n\n def get_tool(self, name: str) -> BaseTool:\n return self.tool_map.get(name)\n\n def add_tool(self, tool: BaseTool):\n \"\"\"Add a single tool to the collection.\n\n If a tool with the same name already exists, it will be skipped and a warning will be logged.\n \"\"\"\n if tool.name in self.tool_map:\n logger.warning(f\"Tool {tool.name} already exists in collection, skipping\")\n return self\n\n self.tools += (tool,)\n self.tool_map[tool.name] = tool\n return self\n\n def add_tools(self, *tools: BaseTool):\n \"\"\"Add multiple tools to the collection.\n\n If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged.\n \"\"\"\n for tool in tools:\n self.add_tool(tool)\n return self\n"], ["/OpenManus/app/tool/search/google_search.py", "from typing import List\n\nfrom googlesearch import search\n\nfrom app.tool.search.base import SearchItem, WebSearchEngine\n\n\nclass GoogleSearchEngine(WebSearchEngine):\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Google search engine.\n\n Returns results formatted according to SearchItem model.\n \"\"\"\n raw_results = search(query, num_results=num_results, advanced=True)\n\n results = []\n for i, item in enumerate(raw_results):\n if isinstance(item, str):\n # If it's just a URL\n results.append(\n {\"title\": f\"Google Result {i+1}\", \"url\": item, \"description\": \"\"}\n )\n else:\n results.append(\n SearchItem(\n title=item.title, url=item.url, description=item.description\n )\n )\n\n return results\n"], ["/OpenManus/run_flow.py", "import asyncio\nimport time\n\nfrom app.agent.data_analysis import DataAnalysis\nfrom app.agent.manus import Manus\nfrom app.config import config\nfrom app.flow.flow_factory import FlowFactory, FlowType\nfrom app.logger import logger\n\n\nasync def run_flow():\n agents = {\n \"manus\": Manus(),\n }\n if config.run_flow_config.use_data_analysis_agent:\n agents[\"data_analysis\"] = DataAnalysis()\n try:\n prompt = input(\"Enter your prompt: \")\n\n if prompt.strip().isspace() or not prompt:\n logger.warning(\"Empty prompt provided.\")\n return\n\n flow = FlowFactory.create_flow(\n flow_type=FlowType.PLANNING,\n agents=agents,\n )\n logger.warning(\"Processing your request...\")\n\n try:\n start_time = time.time()\n result = await asyncio.wait_for(\n flow.execute(prompt),\n timeout=3600, # 60 minute timeout for the entire execution\n )\n elapsed_time = time.time() - start_time\n logger.info(f\"Request processed in {elapsed_time:.2f} seconds\")\n logger.info(result)\n except asyncio.TimeoutError:\n logger.error(\"Request processing timed out after 1 hour\")\n logger.info(\n \"Operation terminated due to timeout. Please try a simpler request.\"\n )\n\n except KeyboardInterrupt:\n logger.info(\"Operation cancelled by user.\")\n except Exception as e:\n logger.error(f\"Error: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(run_flow())\n"], ["/OpenManus/app/flow/base.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, List, Optional, Union\n\nfrom pydantic import BaseModel\n\nfrom app.agent.base import BaseAgent\n\n\nclass BaseFlow(BaseModel, ABC):\n \"\"\"Base class for execution flows supporting multiple agents\"\"\"\n\n agents: Dict[str, BaseAgent]\n tools: Optional[List] = None\n primary_agent_key: Optional[str] = None\n\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data\n ):\n # Handle different ways of providing agents\n if isinstance(agents, BaseAgent):\n agents_dict = {\"default\": agents}\n elif isinstance(agents, list):\n agents_dict = {f\"agent_{i}\": agent for i, agent in enumerate(agents)}\n else:\n agents_dict = agents\n\n # If primary agent not specified, use first agent\n primary_key = data.get(\"primary_agent_key\")\n if not primary_key and agents_dict:\n primary_key = next(iter(agents_dict))\n data[\"primary_agent_key\"] = primary_key\n\n # Set the agents dictionary\n data[\"agents\"] = agents_dict\n\n # Initialize using BaseModel's init\n super().__init__(**data)\n\n @property\n def primary_agent(self) -> Optional[BaseAgent]:\n \"\"\"Get the primary agent for the flow\"\"\"\n return self.agents.get(self.primary_agent_key)\n\n def get_agent(self, key: str) -> Optional[BaseAgent]:\n \"\"\"Get a specific agent by key\"\"\"\n return self.agents.get(key)\n\n def add_agent(self, key: str, agent: BaseAgent) -> None:\n \"\"\"Add a new agent to the flow\"\"\"\n self.agents[key] = agent\n\n @abstractmethod\n async def execute(self, input_text: str) -> str:\n \"\"\"Execute the flow with given input\"\"\"\n"], ["/OpenManus/app/tool/chart_visualization/chart_prepare.py", "from app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass VisualizationPrepare(NormalPythonExecute):\n \"\"\"A tool for Chart Generation Preparation\"\"\"\n\n name: str = \"visualization_preparation\"\n description: str = \"Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional).\"\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"code_type\": {\n \"description\": \"code type, visualization: csv -> chart; insight: choose insight into chart\",\n \"type\": \"string\",\n \"default\": \"visualization\",\n \"enum\": [\"visualization\", \"insight\"],\n },\n \"code\": {\n \"type\": \"string\",\n \"description\": \"\"\"Python code for data_visualization prepare.\n## Visualization Type\n1. Data loading logic\n2. Csv Data and chart description generate\n2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv)\n2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.)\n3. Save information in json file.( format: {\"csvFilePath\": string, \"chartTitle\": string}[])\n## Insight Type\n1. Select the insights from the data_visualization results that you want to add to the chart.\n2. Save information in json file.( format: {\"chartPath\": string, \"insights_id\": number[]}[])\n# Note\n1. You can generate one or multiple csv data with different visualization needs.\n2. Make each chart data esay, clean and different.\n3. Json file saving in utf-8 with path print: print(json_path)\n\"\"\",\n },\n },\n \"required\": [\"code\", \"code_type\"],\n }\n"], ["/OpenManus/app/tool/file_operators.py", "\"\"\"File operation interfaces and implementations for local and sandbox environments.\"\"\"\n\nimport asyncio\nfrom pathlib import Path\nfrom typing import Optional, Protocol, Tuple, Union, runtime_checkable\n\nfrom app.config import SandboxSettings\nfrom app.exceptions import ToolError\nfrom app.sandbox.client import SANDBOX_CLIENT\n\n\nPathLike = Union[str, Path]\n\n\n@runtime_checkable\nclass FileOperator(Protocol):\n \"\"\"Interface for file operations in different environments.\"\"\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file.\"\"\"\n ...\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file.\"\"\"\n ...\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n ...\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n ...\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command and return (return_code, stdout, stderr).\"\"\"\n ...\n\n\nclass LocalFileOperator(FileOperator):\n \"\"\"File operations implementation for local filesystem.\"\"\"\n\n encoding: str = \"utf-8\"\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a local file.\"\"\"\n try:\n return Path(path).read_text(encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to read {path}: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a local file.\"\"\"\n try:\n Path(path).write_text(content, encoding=self.encoding)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path}: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory.\"\"\"\n return Path(path).is_dir()\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists.\"\"\"\n return Path(path).exists()\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a shell command locally.\"\"\"\n process = await asyncio.create_subprocess_shell(\n cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n\n try:\n stdout, stderr = await asyncio.wait_for(\n process.communicate(), timeout=timeout\n )\n return (\n process.returncode or 0,\n stdout.decode(),\n stderr.decode(),\n )\n except asyncio.TimeoutError as exc:\n try:\n process.kill()\n except ProcessLookupError:\n pass\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds\"\n ) from exc\n\n\nclass SandboxFileOperator(FileOperator):\n \"\"\"File operations implementation for sandbox environment.\"\"\"\n\n def __init__(self):\n self.sandbox_client = SANDBOX_CLIENT\n\n async def _ensure_sandbox_initialized(self):\n \"\"\"Ensure sandbox is initialized.\"\"\"\n if not self.sandbox_client.sandbox:\n await self.sandbox_client.create(config=SandboxSettings())\n\n async def read_file(self, path: PathLike) -> str:\n \"\"\"Read content from a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n return await self.sandbox_client.read_file(str(path))\n except Exception as e:\n raise ToolError(f\"Failed to read {path} in sandbox: {str(e)}\") from None\n\n async def write_file(self, path: PathLike, content: str) -> None:\n \"\"\"Write content to a file in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n await self.sandbox_client.write_file(str(path), content)\n except Exception as e:\n raise ToolError(f\"Failed to write to {path} in sandbox: {str(e)}\") from None\n\n async def is_directory(self, path: PathLike) -> bool:\n \"\"\"Check if path points to a directory in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -d {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def exists(self, path: PathLike) -> bool:\n \"\"\"Check if path exists in sandbox.\"\"\"\n await self._ensure_sandbox_initialized()\n result = await self.sandbox_client.run_command(\n f\"test -e {path} && echo 'true' || echo 'false'\"\n )\n return result.strip() == \"true\"\n\n async def run_command(\n self, cmd: str, timeout: Optional[float] = 120.0\n ) -> Tuple[int, str, str]:\n \"\"\"Run a command in sandbox environment.\"\"\"\n await self._ensure_sandbox_initialized()\n try:\n stdout = await self.sandbox_client.run_command(\n cmd, timeout=int(timeout) if timeout else None\n )\n return (\n 0, # Always return 0 since we don't have explicit return code from sandbox\n stdout,\n \"\", # No stderr capture in the current sandbox implementation\n )\n except TimeoutError as exc:\n raise TimeoutError(\n f\"Command '{cmd}' timed out after {timeout} seconds in sandbox\"\n ) from exc\n except Exception as exc:\n return 1, \"\", f\"Error executing command in sandbox: {str(exc)}\"\n"], ["/OpenManus/app/prompt/browser.py", "SYSTEM_PROMPT = \"\"\"\\\nYou are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules.\n\n# Input Format\nTask\nPrevious steps\nCurrent URL\nOpen Tabs\nInteractive Elements\n[index]text\n- index: Numeric identifier for interaction\n- type: HTML element type (button, input, etc.)\n- text: Element description\nExample:\n[33]\n\n- Only elements with numeric indexes in [] are interactive\n- elements without [] provide only context\n\n# Response Rules\n1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format:\n{{\"current_state\": {{\"evaluation_previous_goal\": \"Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not\",\n\"memory\": \"Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz\",\n\"next_goal\": \"What needs to be done with the next immediate action\"}},\n\"action\":[{{\"one_action_name\": {{// action-specific parameter}}}}, // ... more actions in sequence]}}\n\n2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence.\nCommon action sequences:\n- Form filling: [{{\"input_text\": {{\"index\": 1, \"text\": \"username\"}}}}, {{\"input_text\": {{\"index\": 2, \"text\": \"password\"}}}}, {{\"click_element\": {{\"index\": 3}}}}]\n- Navigation and extraction: [{{\"go_to_url\": {{\"url\": \"https://example.com\"}}}}, {{\"extract_content\": {{\"goal\": \"extract the names\"}}}}]\n- Actions are executed in the given order\n- If the page changes after an action, the sequence is interrupted and you get the new state.\n- Only provide the action sequence until an action which changes the page state significantly.\n- Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page\n- only use multiple actions if it makes sense.\n\n3. ELEMENT INTERACTION:\n- Only use indexes of the interactive elements\n- Elements marked with \"[]Non-interactive text\" are non-interactive\n\n4. NAVIGATION & ERROR HANDLING:\n- If no suitable elements exist, use other functions to complete the task\n- If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc.\n- Handle popups/cookies by accepting or closing them\n- Use scroll to find elements you are looking for\n- If you want to research something, open a new tab instead of using the current tab\n- If captcha pops up, try to solve it - else try a different approach\n- If the page is not fully loaded, use wait action\n\n5. TASK COMPLETION:\n- Use the done action as the last action as soon as the ultimate task is complete\n- Dont use \"done\" before you are done with everything the user asked you, except you reach the last step of max_steps.\n- If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false!\n- If you have to do something repeatedly for example the task says for \"each\", or \"for all\", or \"x times\", count always inside \"memory\" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step.\n- Don't hallucinate actions\n- Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task.\n\n6. VISUAL CONTEXT:\n- When an image is provided, use it to understand the page layout\n- Bounding boxes with labels on their top right corner correspond to element indexes\n\n7. Form filling:\n- If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field.\n\n8. Long tasks:\n- Keep track of the status and subresults in the memory.\n\n9. Extraction:\n- If your task is to find information - call extract_content on the specific pages to get and store the information.\nYour responses must be always JSON with the specified format.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nWhat should I do next to achieve my goal?\n\nWhen you see [Current state starts here], focus on the following:\n- Current URL and page title{url_placeholder}\n- Available tabs{tabs_placeholder}\n- Interactive elements and their indices\n- Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated)\n- Any action results or errors{results_placeholder}\n\nFor browser interactions:\n- To navigate: browser_use with action=\"go_to_url\", url=\"...\"\n- To click: browser_use with action=\"click_element\", index=N\n- To type: browser_use with action=\"input_text\", index=N, text=\"...\"\n- To extract: browser_use with action=\"extract_content\", goal=\"...\"\n- To scroll: browser_use with action=\"scroll_down\" or \"scroll_up\"\n\nConsider both what's visible and what might be beyond the current viewport.\nBe methodical - remember your progress and what you've learned so far.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/tool/terminate.py", "from app.tool.base import BaseTool\n\n\n_TERMINATE_DESCRIPTION = \"\"\"Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task.\nWhen you have finished all the tasks, call this tool to end the work.\"\"\"\n\n\nclass Terminate(BaseTool):\n name: str = \"terminate\"\n description: str = _TERMINATE_DESCRIPTION\n parameters: dict = {\n \"type\": \"object\",\n \"properties\": {\n \"status\": {\n \"type\": \"string\",\n \"description\": \"The finish status of the interaction.\",\n \"enum\": [\"success\", \"failure\"],\n }\n },\n \"required\": [\"status\"],\n }\n\n async def execute(self, status: str) -> str:\n \"\"\"Finish the current execution\"\"\"\n return f\"The interaction has been completed with status: {status}\"\n"], ["/OpenManus/app/tool/ask_human.py", "from app.tool import BaseTool\n\n\nclass AskHuman(BaseTool):\n \"\"\"Add a tool to ask human for help.\"\"\"\n\n name: str = \"ask_human\"\n description: str = \"Use this tool to ask human for help.\"\n parameters: str = {\n \"type\": \"object\",\n \"properties\": {\n \"inquire\": {\n \"type\": \"string\",\n \"description\": \"The question you want to ask human.\",\n }\n },\n \"required\": [\"inquire\"],\n }\n\n async def execute(self, inquire: str) -> str:\n return input(f\"\"\"Bot: {inquire}\\n\\nYou: \"\"\").strip()\n"], ["/OpenManus/app/sandbox/client.py", "from abc import ABC, abstractmethod\nfrom typing import Dict, Optional, Protocol\n\nfrom app.config import SandboxSettings\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\nclass SandboxFileOperations(Protocol):\n \"\"\"Protocol for sandbox file operations.\"\"\"\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n \"\"\"\n ...\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n \"\"\"\n ...\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file content from container.\n\n Args:\n path: File path in container.\n\n Returns:\n str: File content.\n \"\"\"\n ...\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes content to file in container.\n\n Args:\n path: File path in container.\n content: Content to write.\n \"\"\"\n ...\n\n\nclass BaseSandboxClient(ABC):\n \"\"\"Base sandbox client interface.\"\"\"\n\n @abstractmethod\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates sandbox.\"\"\"\n\n @abstractmethod\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Executes command.\"\"\"\n\n @abstractmethod\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container.\"\"\"\n\n @abstractmethod\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file to container.\"\"\"\n\n @abstractmethod\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file.\"\"\"\n\n @abstractmethod\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file.\"\"\"\n\n @abstractmethod\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n\n\nclass LocalSandboxClient(BaseSandboxClient):\n \"\"\"Local sandbox client implementation.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes local sandbox client.\"\"\"\n self.sandbox: Optional[DockerSandbox] = None\n\n async def create(\n self,\n config: Optional[SandboxSettings] = None,\n volume_bindings: Optional[Dict[str, str]] = None,\n ) -> None:\n \"\"\"Creates a sandbox.\n\n Args:\n config: Sandbox configuration.\n volume_bindings: Volume mappings.\n\n Raises:\n RuntimeError: If sandbox creation fails.\n \"\"\"\n self.sandbox = DockerSandbox(config, volume_bindings)\n await self.sandbox.create()\n\n async def run_command(self, command: str, timeout: Optional[int] = None) -> str:\n \"\"\"Runs command in sandbox.\n\n Args:\n command: Command to execute.\n timeout: Execution timeout in seconds.\n\n Returns:\n Command output.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.run_command(command, timeout)\n\n async def copy_from(self, container_path: str, local_path: str) -> None:\n \"\"\"Copies file from container to local.\n\n Args:\n container_path: File path in container.\n local_path: Local destination path.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_from(container_path, local_path)\n\n async def copy_to(self, local_path: str, container_path: str) -> None:\n \"\"\"Copies file from local to container.\n\n Args:\n local_path: Local source file path.\n container_path: Destination path in container.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.copy_to(local_path, container_path)\n\n async def read_file(self, path: str) -> str:\n \"\"\"Reads file from container.\n\n Args:\n path: File path in container.\n\n Returns:\n File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n return await self.sandbox.read_file(path)\n\n async def write_file(self, path: str, content: str) -> None:\n \"\"\"Writes file to container.\n\n Args:\n path: File path in container.\n content: File content.\n\n Raises:\n RuntimeError: If sandbox not initialized.\n \"\"\"\n if not self.sandbox:\n raise RuntimeError(\"Sandbox not initialized\")\n await self.sandbox.write_file(path, content)\n\n async def cleanup(self) -> None:\n \"\"\"Cleans up resources.\"\"\"\n if self.sandbox:\n await self.sandbox.cleanup()\n self.sandbox = None\n\n\ndef create_sandbox_client() -> LocalSandboxClient:\n \"\"\"Creates a sandbox client.\n\n Returns:\n LocalSandboxClient: Sandbox client instance.\n \"\"\"\n return LocalSandboxClient()\n\n\nSANDBOX_CLIENT = create_sandbox_client()\n"], ["/OpenManus/app/logger.py", "import sys\nfrom datetime import datetime\n\nfrom loguru import logger as _logger\n\nfrom app.config import PROJECT_ROOT\n\n\n_print_level = \"INFO\"\n\n\ndef define_log_level(print_level=\"INFO\", logfile_level=\"DEBUG\", name: str = None):\n \"\"\"Adjust the log level to above level\"\"\"\n global _print_level\n _print_level = print_level\n\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%Y%m%d%H%M%S\")\n log_name = (\n f\"{name}_{formatted_date}\" if name else formatted_date\n ) # name a log with prefix name\n\n _logger.remove()\n _logger.add(sys.stderr, level=print_level)\n _logger.add(PROJECT_ROOT / f\"logs/{log_name}.log\", level=logfile_level)\n return _logger\n\n\nlogger = define_log_level()\n\n\nif __name__ == \"__main__\":\n logger.info(\"Starting application\")\n logger.debug(\"Debug message\")\n logger.warning(\"Warning message\")\n logger.error(\"Error message\")\n logger.critical(\"Critical message\")\n\n try:\n raise ValueError(\"Test error\")\n except Exception as e:\n logger.exception(f\"An error occurred: {e}\")\n"], ["/OpenManus/app/flow/flow_factory.py", "from enum import Enum\nfrom typing import Dict, List, Union\n\nfrom app.agent.base import BaseAgent\nfrom app.flow.base import BaseFlow\nfrom app.flow.planning import PlanningFlow\n\n\nclass FlowType(str, Enum):\n PLANNING = \"planning\"\n\n\nclass FlowFactory:\n \"\"\"Factory for creating different types of flows with support for multiple agents\"\"\"\n\n @staticmethod\n def create_flow(\n flow_type: FlowType,\n agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]],\n **kwargs,\n ) -> BaseFlow:\n flows = {\n FlowType.PLANNING: PlanningFlow,\n }\n\n flow_class = flows.get(flow_type)\n if not flow_class:\n raise ValueError(f\"Unknown flow type: {flow_type}\")\n\n return flow_class(agents, **kwargs)\n"], ["/OpenManus/app/tool/search/base.py", "from typing import List, Optional\n\nfrom pydantic import BaseModel, Field\n\n\nclass SearchItem(BaseModel):\n \"\"\"Represents a single search result item\"\"\"\n\n title: str = Field(description=\"The title of the search result\")\n url: str = Field(description=\"The URL of the search result\")\n description: Optional[str] = Field(\n default=None, description=\"A description or snippet of the search result\"\n )\n\n def __str__(self) -> str:\n \"\"\"String representation of a search result item.\"\"\"\n return f\"{self.title} - {self.url}\"\n\n\nclass WebSearchEngine(BaseModel):\n \"\"\"Base class for web search engines.\"\"\"\n\n model_config = {\"arbitrary_types_allowed\": True}\n\n def perform_search(\n self, query: str, num_results: int = 10, *args, **kwargs\n ) -> List[SearchItem]:\n \"\"\"\n Perform a web search and return a list of search items.\n\n Args:\n query (str): The search query to submit to the search engine.\n num_results (int, optional): The number of search results to return. Default is 10.\n args: Additional arguments.\n kwargs: Additional keyword arguments.\n\n Returns:\n List[SearchItem]: A list of SearchItem objects matching the search query.\n \"\"\"\n raise NotImplementedError\n"], ["/OpenManus/app/agent/data_analysis.py", "from pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.config import config\nfrom app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT\nfrom app.tool import Terminate, ToolCollection\nfrom app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\nclass DataAnalysis(ToolCallAgent):\n \"\"\"\n A data analysis agent that uses planning to solve various data analysis tasks.\n\n This agent extends ToolCallAgent with a comprehensive set of tools and capabilities,\n including Data Analysis, Chart Visualization, Data Report.\n \"\"\"\n\n name: str = \"Data_Analysis\"\n description: str = \"An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks\"\n\n system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)\n next_step_prompt: str = NEXT_STEP_PROMPT\n\n max_observe: int = 15000\n max_steps: int = 20\n\n # Add general-purpose tools to the tool collection\n available_tools: ToolCollection = Field(\n default_factory=lambda: ToolCollection(\n NormalPythonExecute(),\n VisualizationPrepare(),\n DataVisualization(),\n Terminate(),\n )\n )\n"], ["/OpenManus/main.py", "import argparse\nimport asyncio\n\nfrom app.agent.manus import Manus\nfrom app.logger import logger\n\n\nasync def main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Run Manus agent with a prompt\")\n parser.add_argument(\n \"--prompt\", type=str, required=False, help=\"Input prompt for the agent\"\n )\n args = parser.parse_args()\n\n # Create and initialize Manus agent\n agent = await Manus.create()\n try:\n # Use command line prompt if provided, otherwise ask for input\n prompt = args.prompt if args.prompt else input(\"Enter your prompt: \")\n if not prompt.strip():\n logger.warning(\"Empty prompt provided.\")\n return\n\n logger.warning(\"Processing your request...\")\n await agent.run(prompt)\n logger.info(\"Request processing completed.\")\n except KeyboardInterrupt:\n logger.warning(\"Operation interrupted.\")\n finally:\n # Ensure agent resources are cleaned up before exiting\n await agent.cleanup()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n"], ["/OpenManus/app/tool/__init__.py", "from app.tool.base import BaseTool\nfrom app.tool.bash import Bash\nfrom app.tool.browser_use_tool import BrowserUseTool\nfrom app.tool.create_chat_completion import CreateChatCompletion\nfrom app.tool.planning import PlanningTool\nfrom app.tool.str_replace_editor import StrReplaceEditor\nfrom app.tool.terminate import Terminate\nfrom app.tool.tool_collection import ToolCollection\nfrom app.tool.web_search import WebSearch\nfrom app.tool.crawl4ai import Crawl4aiTool\n\n\n__all__ = [\n \"BaseTool\",\n \"Bash\",\n \"BrowserUseTool\",\n \"Terminate\",\n \"StrReplaceEditor\",\n \"WebSearch\",\n \"ToolCollection\",\n \"CreateChatCompletion\",\n \"PlanningTool\",\n \"Crawl4aiTool\"\n]\n"], ["/OpenManus/app/agent/react.py", "from abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom pydantic import Field\n\nfrom app.agent.base import BaseAgent\nfrom app.llm import LLM\nfrom app.schema import AgentState, Memory\n\n\nclass ReActAgent(BaseAgent, ABC):\n name: str\n description: Optional[str] = None\n\n system_prompt: Optional[str] = None\n next_step_prompt: Optional[str] = None\n\n llm: Optional[LLM] = Field(default_factory=LLM)\n memory: Memory = Field(default_factory=Memory)\n state: AgentState = AgentState.IDLE\n\n max_steps: int = 10\n current_step: int = 0\n\n @abstractmethod\n async def think(self) -> bool:\n \"\"\"Process current state and decide next action\"\"\"\n\n @abstractmethod\n async def act(self) -> str:\n \"\"\"Execute decided actions\"\"\"\n\n async def step(self) -> str:\n \"\"\"Execute a single step: think and act.\"\"\"\n should_act = await self.think()\n if not should_act:\n return \"Thinking complete - no action needed\"\n return await self.act()\n"], ["/OpenManus/app/agent/swe.py", "from typing import List\n\nfrom pydantic import Field\n\nfrom app.agent.toolcall import ToolCallAgent\nfrom app.prompt.swe import SYSTEM_PROMPT\nfrom app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection\n\n\nclass SWEAgent(ToolCallAgent):\n \"\"\"An agent that implements the SWEAgent paradigm for executing code and natural conversations.\"\"\"\n\n name: str = \"swe\"\n description: str = \"an autonomous AI programmer that interacts directly with the computer to solve tasks.\"\n\n system_prompt: str = SYSTEM_PROMPT\n next_step_prompt: str = \"\"\n\n available_tools: ToolCollection = ToolCollection(\n Bash(), StrReplaceEditor(), Terminate()\n )\n special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name])\n\n max_steps: int = 20\n"], ["/OpenManus/app/prompt/mcp.py", "\"\"\"Prompts for the MCP Agent.\"\"\"\n\nSYSTEM_PROMPT = \"\"\"You are an AI assistant with access to a Model Context Protocol (MCP) server.\nYou can use the tools provided by the MCP server to complete tasks.\nThe MCP server will dynamically expose tools that you can use - always check the available tools first.\n\nWhen using an MCP tool:\n1. Choose the appropriate tool based on your task requirements\n2. Provide properly formatted arguments as required by the tool\n3. Observe the results and use them to determine next steps\n4. Tools may change during operation - new tools might appear or existing ones might disappear\n\nFollow these guidelines:\n- Call tools with valid parameters as documented in their schemas\n- Handle errors gracefully by understanding what went wrong and trying again with corrected parameters\n- For multimedia responses (like images), you'll receive a description of the content\n- Complete user requests step by step, using the most appropriate tools\n- If multiple tools need to be called in sequence, make one call at a time and wait for results\n\nRemember to clearly explain your reasoning and actions to the user.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on the current state and available tools, what should be done next?\nThink step by step about the problem and identify which MCP tool would be most helpful for the current stage.\nIf you've already made progress, consider what additional information you need or what actions would move you closer to completing the task.\n\"\"\"\n\n# Additional specialized prompts\nTOOL_ERROR_PROMPT = \"\"\"You encountered an error with the tool '{tool_name}'.\nTry to understand what went wrong and correct your approach.\nCommon issues include:\n- Missing or incorrect parameters\n- Invalid parameter formats\n- Using a tool that's no longer available\n- Attempting an operation that's not supported\n\nPlease check the tool specifications and try again with corrected parameters.\n\"\"\"\n\nMULTIMEDIA_RESPONSE_PROMPT = \"\"\"You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'.\nThis content has been processed and described for you.\nUse this information to continue the task or provide insights to the user.\n\"\"\"\n"], ["/OpenManus/app/__init__.py", "# Python version check: 3.11-3.13\nimport sys\n\n\nif sys.version_info < (3, 11) or sys.version_info > (3, 13):\n print(\n \"Warning: Unsupported Python version {ver}, please use 3.11-3.13\".format(\n ver=\".\".join(map(str, sys.version_info))\n )\n )\n"], ["/OpenManus/app/sandbox/__init__.py", "\"\"\"\nDocker Sandbox Module\n\nProvides secure containerized execution environment with resource limits\nand isolation for running untrusted code.\n\"\"\"\nfrom app.sandbox.client import (\n BaseSandboxClient,\n LocalSandboxClient,\n create_sandbox_client,\n)\nfrom app.sandbox.core.exceptions import (\n SandboxError,\n SandboxResourceError,\n SandboxTimeoutError,\n)\nfrom app.sandbox.core.manager import SandboxManager\nfrom app.sandbox.core.sandbox import DockerSandbox\n\n\n__all__ = [\n \"DockerSandbox\",\n \"SandboxManager\",\n \"BaseSandboxClient\",\n \"LocalSandboxClient\",\n \"create_sandbox_client\",\n \"SandboxError\",\n \"SandboxTimeoutError\",\n \"SandboxResourceError\",\n]\n"], ["/OpenManus/app/exceptions.py", "class ToolError(Exception):\n \"\"\"Raised when a tool encounters an error.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n\nclass OpenManusError(Exception):\n \"\"\"Base exception for all OpenManus errors\"\"\"\n\n\nclass TokenLimitExceeded(OpenManusError):\n \"\"\"Exception raised when the token limit is exceeded\"\"\"\n"], ["/OpenManus/app/prompt/swe.py", "SYSTEM_PROMPT = \"\"\"SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface.\n\nThe special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time.\nIn addition to typical bash commands, you can also use specific commands to help you navigate and edit files.\nTo call a command, you need to invoke it with a function call/tool call.\n\nPlease note that THE EDIT COMMAND REQUIRES PROPER INDENTATION.\nIf you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.\n\nRESPONSE FORMAT:\nYour shell prompt is formatted as follows:\n(Open file: )\n(Current directory: )\nbash-$\n\nFirst, you should _always_ include a general thought about what you're going to do next.\nThen, for every response, you must include exactly _ONE_ tool call/function call.\n\nRemember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference.\nIf you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call.\nNote that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them.\n\"\"\"\n"], ["/OpenManus/app/prompt/manus.py", "SYSTEM_PROMPT = (\n \"You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all.\"\n \"The initial directory is: {directory}\"\n)\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps.\n\nIf you want to stop the interaction at any point, use the `terminate` tool/function call.\n\"\"\"\n"], ["/OpenManus/app/tool/chart_visualization/__init__.py", "from app.tool.chart_visualization.chart_prepare import VisualizationPrepare\nfrom app.tool.chart_visualization.data_visualization import DataVisualization\nfrom app.tool.chart_visualization.python_execute import NormalPythonExecute\n\n\n__all__ = [\"DataVisualization\", \"VisualizationPrepare\", \"NormalPythonExecute\"]\n"], ["/OpenManus/app/prompt/visualization.py", "SYSTEM_PROMPT = \"\"\"You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests.\n# Note:\n1. The workspace directory is: {directory}; Read / write file in workspace\n2. Generate analysis conclusion report in the end\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"Based on user needs, break down the problem and use different tools step by step to solve it.\n# Note\n1. Each step select the most appropriate tool proactively (ONLY ONE).\n2. After using each tool, clearly explain the execution results and suggest the next steps.\n3. When observation with Error, review and fix it.\"\"\"\n"], ["/OpenManus/run_mcp_server.py", "# coding: utf-8\n# A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues.\nfrom app.mcp.server import MCPServer, parse_args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Create and run server (maintaining original flow)\n server = MCPServer()\n server.run(transport=args.transport)\n"], ["/OpenManus/app/prompt/planning.py", "PLANNING_SYSTEM_PROMPT = \"\"\"\nYou are an expert Planning Agent tasked with solving problems efficiently through structured plans.\nYour job is:\n1. Analyze requests to understand the task scope\n2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool\n3. Execute steps using available tools as needed\n4. Track progress and adapt plans when necessary\n5. Use `finish` to conclude immediately when the task is complete\n\n\nAvailable tools will vary by task but may include:\n- `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.)\n- `finish`: End the task when complete\nBreak tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps.\nThink about dependencies and verification methods.\nKnow when to conclude - don't continue thinking once objectives are met.\n\"\"\"\n\nNEXT_STEP_PROMPT = \"\"\"\nBased on the current state, what's your next action?\nChoose the most efficient path forward:\n1. Is the plan sufficient, or does it need refinement?\n2. Can you execute the next step immediately?\n3. Is the task complete? If so, use `finish` right away.\n\nBe concise in your reasoning, then select the appropriate tool or action.\n\"\"\"\n"], ["/OpenManus/app/prompt/toolcall.py", "SYSTEM_PROMPT = \"You are an agent that can execute tool calls\"\n\nNEXT_STEP_PROMPT = (\n \"If you want to stop interaction, use `terminate` tool/function call.\"\n)\n"], ["/OpenManus/app/agent/__init__.py", "from app.agent.base import BaseAgent\nfrom app.agent.browser import BrowserAgent\nfrom app.agent.mcp import MCPAgent\nfrom app.agent.react import ReActAgent\nfrom app.agent.swe import SWEAgent\nfrom app.agent.toolcall import ToolCallAgent\n\n\n__all__ = [\n \"BaseAgent\",\n \"BrowserAgent\",\n \"ReActAgent\",\n \"SWEAgent\",\n \"ToolCallAgent\",\n \"MCPAgent\",\n]\n"], ["/OpenManus/app/tool/search/__init__.py", "from app.tool.search.baidu_search import BaiduSearchEngine\nfrom app.tool.search.base import WebSearchEngine\nfrom app.tool.search.bing_search import BingSearchEngine\nfrom app.tool.search.duckduckgo_search import DuckDuckGoSearchEngine\nfrom app.tool.search.google_search import GoogleSearchEngine\n\n\n__all__ = [\n \"WebSearchEngine\",\n \"BaiduSearchEngine\",\n \"DuckDuckGoSearchEngine\",\n \"GoogleSearchEngine\",\n \"BingSearchEngine\",\n]\n"], ["/OpenManus/app/sandbox/core/exceptions.py", "\"\"\"Exception classes for the sandbox system.\n\nThis module defines custom exceptions used throughout the sandbox system to\nhandle various error conditions in a structured way.\n\"\"\"\n\n\nclass SandboxError(Exception):\n \"\"\"Base exception for sandbox-related errors.\"\"\"\n\n\nclass SandboxTimeoutError(SandboxError):\n \"\"\"Exception raised when a sandbox operation times out.\"\"\"\n\n\nclass SandboxResourceError(SandboxError):\n \"\"\"Exception raised for resource-related errors.\"\"\"\n"], ["/OpenManus/examples/benchmarks/__init__.py", "\"\"\"\nOpenManus benchmark system for standardized agent evaluation.\n\"\"\"\n"], ["/OpenManus/app/mcp/__init__.py", ""], ["/OpenManus/app/flow/__init__.py", ""], ["/OpenManus/app/prompt/__init__.py", ""], ["/OpenManus/protocol/a2a/__init__.py", ""], ["/OpenManus/protocol/a2a/app/__init__.py", ""]], "task_instance_info": {"created_time": "2025-08-20 15:01:10", "created_task_model": "DeepSeek-R1", "class_skeleton": "class ChatCompletions:\n def __init__(self, client):\n pass\n def _convert_openai_tools_to_bedrock_format(self, tools):\n pass\n def _convert_openai_messages_to_bedrock_format(self, messages):\n pass\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n pass", "code_description": "This code segment functions to: Translate message formats and tool specifications between different AI service interfaces while maintaining conversational state and handling asynchronous communication patterns."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass ChatCompletions:\n def __init__(self, client):\n self.client = client\n\n def _convert_openai_tools_to_bedrock_format(self, tools):\n bedrock_tools = []\n for tool in tools:\n func = tool[\"function\"]\n bedrock_tools.append({\n \"name\": func[\"name\"],\n \"description\": func.get(\"description\", \"\"),\n \"inputSchema\": {\n \"type\": \"object\",\n \"properties\": func[\"parameters\"].get(\"properties\", {}),\n \"required\": func[\"parameters\"].get(\"required\", [])\n }\n })\n return {\"tools\": bedrock_tools}\n\n def _convert_openai_messages_to_bedrock_format(self, messages):\n system_messages = []\n converted_messages = []\n \n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_messages.append(msg[\"content\"])\n elif msg[\"role\"] == \"tool\":\n converted_messages.append({\n \"role\": \"user\",\n \"content\": [{\n \"type\": \"tool_result\",\n \"tool_use_id\": msg.get(\"tool_call_id\", \"\"),\n \"content\": msg[\"content\"]\n }]\n })\n else:\n new_msg = {\"role\": msg[\"role\"], \"content\": []}\n if \"tool_calls\" in msg:\n for tool_call in msg[\"tool_calls\"]:\n new_msg[\"content\"].append({\n \"type\": \"tool_use\",\n \"id\": str(uuid.uuid4()),\n \"name\": tool_call[\"function\"][\"name\"],\n \"input\": json.loads(tool_call[\"function\"][\"arguments\"])\n })\n if msg.get(\"content\"):\n new_msg[\"content\"].append({\"type\": \"text\", \"text\": msg[\"content\"]})\n converted_messages.append(new_msg)\n \n return {\n \"preamble\": \"\\n\".join(system_messages),\n \"messages\": converted_messages\n }\n\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n global CURRENT_TOOLUSE_ID\n response_body = json.loads(bedrock_response[\"body\"].read())\n \n message_content = \"\"\n tool_calls = []\n \n for content in response_body.get(\"content\", []):\n if content[\"type\"] == \"text\":\n message_content = content[\"text\"]\n elif content[\"type\"] == \"tool_use\":\n CURRENT_TOOLUSE_ID = content[\"id\"]\n tool_calls.append({\n \"id\": content[\"id\"],\n \"type\": \"function\",\n \"function\": {\n \"name\": content[\"name\"],\n \"arguments\": json.dumps(content[\"input\"])\n }\n })\n \n return OpenAIResponse({\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"model\": \"bedrock-proxy\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": message_content,\n \"tool_calls\": tool_calls\n },\n \"finish_reason\": \"tool_calls\" if tool_calls else \"stop\"\n }]\n })\n[TASK_END]\n```", "inference_time": "2025-08-20 15-01-50"}, "editdistance_info": {"edit_distance": 15.8969, "calculate_time": "2025-08-20 15:01:50", "true_code_clean": "class ChatCompletions:\n def __init__(self, client):\n self.client = client\n def _convert_openai_tools_to_bedrock_format(self, tools):\n bedrock_tools = []\n for tool in tools:\n if tool.get(\"type\") == \"function\":\n function = tool.get(\"function\", {})\n bedrock_tool = {\n \"toolSpec\": {\n \"name\": function.get(\"name\", \"\"),\n \"description\": function.get(\"description\", \"\"),\n \"inputSchema\": {\n \"json\": {\n \"type\": \"object\",\n \"properties\": function.get(\"parameters\", {}).get(\n \"properties\", {}\n ),\n \"required\": function.get(\"parameters\", {}).get(\n \"required\", []\n ),\n }\n },\n }\n }\n bedrock_tools.append(bedrock_tool)\n return bedrock_tools\n def _convert_openai_messages_to_bedrock_format(self, messages):\n bedrock_messages = []\n system_prompt = []\n for message in messages:\n if message.get(\"role\") == \"system\":\n system_prompt = [{\"text\": message.get(\"content\")}]\n elif message.get(\"role\") == \"user\":\n bedrock_message = {\n \"role\": message.get(\"role\", \"user\"),\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"assistant\":\n bedrock_message = {\n \"role\": \"assistant\",\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n openai_tool_calls = message.get(\"tool_calls\", [])\n if openai_tool_calls:\n bedrock_tool_use = {\n \"toolUseId\": openai_tool_calls[0][\"id\"],\n \"name\": openai_tool_calls[0][\"function\"][\"name\"],\n \"input\": json.loads(\n openai_tool_calls[0][\"function\"][\"arguments\"]\n ),\n }\n bedrock_message[\"content\"].append({\"toolUse\": bedrock_tool_use})\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = openai_tool_calls[0][\"id\"]\n bedrock_messages.append(bedrock_message)\n elif message.get(\"role\") == \"tool\":\n bedrock_message = {\n \"role\": \"user\",\n \"content\": [\n {\n \"toolResult\": {\n \"toolUseId\": CURRENT_TOOLUSE_ID,\n \"content\": [{\"text\": message.get(\"content\")}],\n }\n }\n ],\n }\n bedrock_messages.append(bedrock_message)\n else:\n raise ValueError(f\"Invalid role: {message.get('role')}\")\n return system_prompt, bedrock_messages\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n content = \"\"\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n content_array = bedrock_response[\"output\"][\"message\"][\"content\"]\n content = \"\".join(item.get(\"text\", \"\") for item in content_array)\n if content == \"\":\n content = \".\"\n openai_tool_calls = []\n if bedrock_response.get(\"output\", {}).get(\"message\", {}).get(\"content\"):\n for content_item in bedrock_response[\"output\"][\"message\"][\"content\"]:\n if content_item.get(\"toolUse\"):\n bedrock_tool_use = content_item[\"toolUse\"]\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n openai_tool_call = {\n \"id\": CURRENT_TOOLUSE_ID,\n \"type\": \"function\",\n \"function\": {\n \"name\": bedrock_tool_use[\"name\"],\n \"arguments\": json.dumps(bedrock_tool_use[\"input\"]),\n },\n }\n openai_tool_calls.append(openai_tool_call)\n openai_format = {\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"created\": int(time.time()),\n \"object\": \"chat.completion\",\n \"system_fingerprint\": None,\n \"choices\": [\n {\n \"finish_reason\": bedrock_response.get(\"stopReason\", \"end_turn\"),\n \"index\": 0,\n \"message\": {\n \"content\": content,\n \"role\": bedrock_response.get(\"output\", {})\n .get(\"message\", {})\n .get(\"role\", \"assistant\"),\n \"tool_calls\": openai_tool_calls\n if openai_tool_calls != []\n else None,\n \"function_call\": None,\n },\n }\n ],\n \"usage\": {\n \"completion_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"outputTokens\", 0\n ),\n \"prompt_tokens\": bedrock_response.get(\"usage\", {}).get(\n \"inputTokens\", 0\n ),\n \"total_tokens\": bedrock_response.get(\"usage\", {}).get(\"totalTokens\", 0),\n },\n }\n return OpenAIResponse(openai_format)\n async def _invoke_bedrock(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n openai_response = self._convert_bedrock_response_to_openai_format(response)\n return openai_response\n async def _invoke_bedrock_stream(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n (\n system_prompt,\n bedrock_messages,\n ) = self._convert_openai_messages_to_bedrock_format(messages)\n response = self.client.converse_stream(\n modelId=model,\n system=system_prompt,\n messages=bedrock_messages,\n inferenceConfig={\"temperature\": temperature, \"maxTokens\": max_tokens},\n toolConfig={\"tools\": tools} if tools else None,\n )\n bedrock_response = {\n \"output\": {\"message\": {\"role\": \"\", \"content\": []}},\n \"stopReason\": \"\",\n \"usage\": {},\n \"metrics\": {},\n }\n bedrock_response_text = \"\"\n bedrock_response_tool_input = \"\"\n stream = response.get(\"stream\")\n if stream:\n for event in stream:\n if event.get(\"messageStart\", {}).get(\"role\"):\n bedrock_response[\"output\"][\"message\"][\"role\"] = event[\n \"messageStart\"\n ][\"role\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"text\"):\n bedrock_response_text += event[\"contentBlockDelta\"][\"delta\"][\"text\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"text\"], end=\"\", flush=True\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 0:\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"text\": bedrock_response_text}\n )\n if event.get(\"contentBlockStart\", {}).get(\"start\", {}).get(\"toolUse\"):\n bedrock_tool_use = event[\"contentBlockStart\"][\"start\"][\"toolUse\"]\n tool_use = {\n \"toolUseId\": bedrock_tool_use[\"toolUseId\"],\n \"name\": bedrock_tool_use[\"name\"],\n }\n bedrock_response[\"output\"][\"message\"][\"content\"].append(\n {\"toolUse\": tool_use}\n )\n global CURRENT_TOOLUSE_ID\n CURRENT_TOOLUSE_ID = bedrock_tool_use[\"toolUseId\"]\n if event.get(\"contentBlockDelta\", {}).get(\"delta\", {}).get(\"toolUse\"):\n bedrock_response_tool_input += event[\"contentBlockDelta\"][\"delta\"][\n \"toolUse\"\n ][\"input\"]\n print(\n event[\"contentBlockDelta\"][\"delta\"][\"toolUse\"][\"input\"],\n end=\"\",\n flush=True,\n )\n if event.get(\"contentBlockStop\", {}).get(\"contentBlockIndex\") == 1:\n bedrock_response[\"output\"][\"message\"][\"content\"][1][\"toolUse\"][\n \"input\"\n ] = json.loads(bedrock_response_tool_input)\n print()\n openai_response = self._convert_bedrock_response_to_openai_format(\n bedrock_response\n )\n return openai_response\n def create(\n self,\n model: str,\n messages: List[Dict[str, str]],\n max_tokens: int,\n temperature: float,\n stream: Optional[bool] = True,\n tools: Optional[List[dict]] = None,\n tool_choice: Literal[\"none\", \"auto\", \"required\"] = \"auto\",\n **kwargs,\n ) -> OpenAIResponse:\n bedrock_tools = []\n if tools is not None:\n bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools)\n if stream:\n return self._invoke_bedrock_stream(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )\n else:\n return self._invoke_bedrock(\n model,\n messages,\n max_tokens,\n temperature,\n bedrock_tools,\n tool_choice,\n **kwargs,\n )", "predict_code_clean": "class ChatCompletions:\n def __init__(self, client):\n self.client = client\n def _convert_openai_tools_to_bedrock_format(self, tools):\n bedrock_tools = []\n for tool in tools:\n func = tool[\"function\"]\n bedrock_tools.append({\n \"name\": func[\"name\"],\n \"description\": func.get(\"description\", \"\"),\n \"inputSchema\": {\n \"type\": \"object\",\n \"properties\": func[\"parameters\"].get(\"properties\", {}),\n \"required\": func[\"parameters\"].get(\"required\", [])\n }\n })\n return {\"tools\": bedrock_tools}\n def _convert_openai_messages_to_bedrock_format(self, messages):\n system_messages = []\n converted_messages = []\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_messages.append(msg[\"content\"])\n elif msg[\"role\"] == \"tool\":\n converted_messages.append({\n \"role\": \"user\",\n \"content\": [{\n \"type\": \"tool_result\",\n \"tool_use_id\": msg.get(\"tool_call_id\", \"\"),\n \"content\": msg[\"content\"]\n }]\n })\n else:\n new_msg = {\"role\": msg[\"role\"], \"content\": []}\n if \"tool_calls\" in msg:\n for tool_call in msg[\"tool_calls\"]:\n new_msg[\"content\"].append({\n \"type\": \"tool_use\",\n \"id\": str(uuid.uuid4()),\n \"name\": tool_call[\"function\"][\"name\"],\n \"input\": json.loads(tool_call[\"function\"][\"arguments\"])\n })\n if msg.get(\"content\"):\n new_msg[\"content\"].append({\"type\": \"text\", \"text\": msg[\"content\"]})\n converted_messages.append(new_msg)\n return {\n \"preamble\": \"\\n\".join(system_messages),\n \"messages\": converted_messages\n }\n def _convert_bedrock_response_to_openai_format(self, bedrock_response):\n global CURRENT_TOOLUSE_ID\n response_body = json.loads(bedrock_response[\"body\"].read())\n message_content = \"\"\n tool_calls = []\n for content in response_body.get(\"content\", []):\n if content[\"type\"] == \"text\":\n message_content = content[\"text\"]\n elif content[\"type\"] == \"tool_use\":\n CURRENT_TOOLUSE_ID = content[\"id\"]\n tool_calls.append({\n \"id\": content[\"id\"],\n \"type\": \"function\",\n \"function\": {\n \"name\": content[\"name\"],\n \"arguments\": json.dumps(content[\"input\"])\n }\n })\n return OpenAIResponse({\n \"id\": f\"chatcmpl-{uuid.uuid4()}\",\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"model\": \"bedrock-proxy\",\n \"choices\": [{\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": message_content,\n \"tool_calls\": tool_calls\n },\n \"finish_reason\": \"tool_calls\" if tool_calls else \"stop\"\n }]\n })"}}