Spaces:
Paused
Paused
Commit ·
3fe716f
0
Parent(s):
Switch to Docker SDK to fix dependency conflicts
Browse files- Add Dockerfile with python:3.10-slim base image
- Pin gradio==5.23.1 and huggingface_hub==0.23.0 in requirements.txt
- Change sdk from gradio to docker in README.md
- This resolves the HfFolder import error from huggingface_hub
- Dockerfile +27 -0
- README.md +48 -0
- agents/__init__.py +4 -0
- agents/search_agent.py +126 -0
- app.py +125 -0
- gui/__init__.py +4 -0
- gui/assets/appBot.css +157 -0
- gui/web_ui.py +380 -0
- llm/__init__.py +4 -0
- llm/oai.py +289 -0
- prompt.py +16 -0
- requirements.txt +8 -0
- scholar.py +107 -0
- search.py +134 -0
- tool_python.py +156 -0
- utils/__init__.py +2 -0
- visit.py +260 -0
Dockerfile
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# Install system dependencies
|
| 6 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 7 |
+
git \
|
| 8 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 9 |
+
|
| 10 |
+
# Copy requirements first for better caching
|
| 11 |
+
COPY requirements.txt .
|
| 12 |
+
|
| 13 |
+
# Install Python dependencies
|
| 14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
# Copy application code
|
| 17 |
+
COPY . .
|
| 18 |
+
|
| 19 |
+
# Expose port
|
| 20 |
+
EXPOSE 7860
|
| 21 |
+
|
| 22 |
+
# Set environment variables
|
| 23 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
| 24 |
+
ENV GRADIO_SERVER_PORT="7860"
|
| 25 |
+
|
| 26 |
+
# Run the application
|
| 27 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Tongyi DeepResearch
|
| 3 |
+
emoji: 🔍
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_file: app.py
|
| 8 |
+
pinned: false
|
| 9 |
+
license: apache-2.0
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Tongyi DeepResearch
|
| 13 |
+
|
| 14 |
+
A leading open-source Deep Research Agent that conducts thorough, multi-source investigations into any topic.
|
| 15 |
+
|
| 16 |
+
## Features
|
| 17 |
+
|
| 18 |
+
- 🔍 **Web Search**: Perform Google web searches to find relevant information
|
| 19 |
+
- 📚 **Academic Search**: Leverage Google Scholar for academic publications
|
| 20 |
+
- 🌐 **Web Visiting**: Visit webpages and extract useful information
|
| 21 |
+
- 🐍 **Python Interpreter**: Execute Python code in a sandboxed environment
|
| 22 |
+
|
| 23 |
+
## Environment Variables
|
| 24 |
+
|
| 25 |
+
To run this application, you need to set the following environment variables:
|
| 26 |
+
|
| 27 |
+
| Variable | Description |
|
| 28 |
+
|----------|-------------|
|
| 29 |
+
| `DR_MODEL_NAME` | The name of the LLM model to use |
|
| 30 |
+
| `DR_MODEL_SERVER` | The API endpoint of the model server |
|
| 31 |
+
| `DR_MODEL_API_KEY` | The API key for the model server |
|
| 32 |
+
| `SERPER_KEY_ID` | API key for Serper.dev (Google Search) |
|
| 33 |
+
| `JINA_KEY` | API key for Jina.ai (Web Reading) |
|
| 34 |
+
| `SANDBOX_URL` | URL of the sandbox fusion endpoint for Python execution |
|
| 35 |
+
| `API_KEY` | API key for the summary model |
|
| 36 |
+
| `API_BASE` | Base URL for the summary model API |
|
| 37 |
+
|
| 38 |
+
## Usage
|
| 39 |
+
|
| 40 |
+
1. Clone this repository
|
| 41 |
+
2. Set up the environment variables
|
| 42 |
+
3. Run `python app.py`
|
| 43 |
+
4. Open your browser and navigate to `http://localhost:7860`
|
| 44 |
+
|
| 45 |
+
## License
|
| 46 |
+
|
| 47 |
+
Apache-2.0
|
| 48 |
+
|
agents/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .search_agent import SearchAgent
|
| 2 |
+
|
| 3 |
+
__all__ = ['SearchAgent']
|
| 4 |
+
|
agents/search_agent.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
from typing import Dict, Iterator, List, Literal, Union, Optional
|
| 3 |
+
|
| 4 |
+
from qwen_agent.agents import Assistant
|
| 5 |
+
from qwen_agent.llm import BaseChatModel
|
| 6 |
+
from qwen_agent.llm.schema import USER, FUNCTION, Message, DEFAULT_SYSTEM_MESSAGE, SYSTEM, ROLE
|
| 7 |
+
from qwen_agent.tools import BaseTool
|
| 8 |
+
from qwen_agent.log import logger
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SearchAgent(Assistant):
|
| 12 |
+
def __init__(self,
|
| 13 |
+
function_list: Optional[List[Union[str, Dict, BaseTool]]] = None,
|
| 14 |
+
llm: Optional[Union[Dict, BaseChatModel]] = None,
|
| 15 |
+
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
|
| 16 |
+
name: Optional[str] = None,
|
| 17 |
+
description: Optional[str] = None,
|
| 18 |
+
files: Optional[List[str]] = None,
|
| 19 |
+
rag_cfg: Optional[Dict] = None,
|
| 20 |
+
extra: Optional[Dict] = {},
|
| 21 |
+
custom_user_prompt: Optional[str] = '',
|
| 22 |
+
make_system_prompt=None,
|
| 23 |
+
addtional_agent=None):
|
| 24 |
+
super().__init__(function_list=function_list,
|
| 25 |
+
llm=llm,
|
| 26 |
+
system_message=system_message,
|
| 27 |
+
name=name,
|
| 28 |
+
description=description,
|
| 29 |
+
files=files,
|
| 30 |
+
rag_cfg=rag_cfg)
|
| 31 |
+
self.extra = extra
|
| 32 |
+
self.custom_user_prompt = custom_user_prompt
|
| 33 |
+
self.make_system_prompt = make_system_prompt
|
| 34 |
+
self.addtional_agent = addtional_agent
|
| 35 |
+
|
| 36 |
+
def insert_in_custom_user_prompt(self, messages: List[Message]) -> List[Message]:
|
| 37 |
+
for message in messages:
|
| 38 |
+
if message.role == USER:
|
| 39 |
+
message.content[0].text = self.custom_user_prompt + message.content[0].text
|
| 40 |
+
break
|
| 41 |
+
return messages
|
| 42 |
+
|
| 43 |
+
def _run(self,
|
| 44 |
+
messages: List[Message],
|
| 45 |
+
lang: Literal['en', 'zh'] = 'zh',
|
| 46 |
+
knowledge: str = '',
|
| 47 |
+
**kwargs) -> Iterator[List[Message]]:
|
| 48 |
+
messages = self._prepend_knowledge_prompt(messages=messages,
|
| 49 |
+
lang=lang,
|
| 50 |
+
knowledge=knowledge,
|
| 51 |
+
**kwargs)
|
| 52 |
+
messages = copy.deepcopy(messages)
|
| 53 |
+
self.insert_in_custom_user_prompt(messages=messages)
|
| 54 |
+
|
| 55 |
+
if self.make_system_prompt:
|
| 56 |
+
if not messages or messages[0][ROLE] != SYSTEM:
|
| 57 |
+
messages.insert(0, Message(role=SYSTEM, content=self.make_system_prompt()))
|
| 58 |
+
|
| 59 |
+
for msg in messages:
|
| 60 |
+
if isinstance(msg.content, list):
|
| 61 |
+
assert len(msg.content) == 1
|
| 62 |
+
msg.content = msg.content[0].text
|
| 63 |
+
if msg.role == USER:
|
| 64 |
+
msg.content = msg.content.strip()
|
| 65 |
+
|
| 66 |
+
reasoning = self.extra.get('reasoning', True)
|
| 67 |
+
num_llm_calls_available = self.extra.get('max_llm_calls', 20)
|
| 68 |
+
|
| 69 |
+
response = []
|
| 70 |
+
while True and num_llm_calls_available > 0:
|
| 71 |
+
num_llm_calls_available -= 1
|
| 72 |
+
|
| 73 |
+
extra_generate_cfg = {'lang': lang}
|
| 74 |
+
if kwargs.get('seed') is not None:
|
| 75 |
+
extra_generate_cfg['seed'] = kwargs['seed']
|
| 76 |
+
|
| 77 |
+
output_stream = self._call_llm(messages=messages,
|
| 78 |
+
functions=[func.function for func in self.function_map.values()],
|
| 79 |
+
extra_generate_cfg=extra_generate_cfg)
|
| 80 |
+
|
| 81 |
+
output: List[Message] = []
|
| 82 |
+
for output in output_stream:
|
| 83 |
+
if output:
|
| 84 |
+
first_msg = output[0]
|
| 85 |
+
if reasoning and isinstance(first_msg.content, str):
|
| 86 |
+
first_msg.content = "<think>\n" + first_msg.content.strip()
|
| 87 |
+
yield response + output
|
| 88 |
+
|
| 89 |
+
if output:
|
| 90 |
+
response.extend(output)
|
| 91 |
+
messages.extend(output)
|
| 92 |
+
|
| 93 |
+
used_any_tool = False
|
| 94 |
+
for out in output:
|
| 95 |
+
print("out:\n", out)
|
| 96 |
+
print("output:\n", output)
|
| 97 |
+
use_tool, tool_name, tool_args, _ = self._detect_tool(out)
|
| 98 |
+
logger.info(f"{self.name} use_tool: {use_tool}, tool_name: {tool_name}, tool_args: {tool_args}")
|
| 99 |
+
|
| 100 |
+
if use_tool:
|
| 101 |
+
tool_result = self._call_tool(tool_name, tool_args, messages=messages, **kwargs)
|
| 102 |
+
fn_msg = Message(
|
| 103 |
+
role=FUNCTION,
|
| 104 |
+
name=tool_name,
|
| 105 |
+
content=tool_result,
|
| 106 |
+
)
|
| 107 |
+
messages.append(fn_msg)
|
| 108 |
+
used_any_tool = True
|
| 109 |
+
|
| 110 |
+
if not used_any_tool:
|
| 111 |
+
logger.info(f'{self.name} not used any tool, skip out')
|
| 112 |
+
break
|
| 113 |
+
|
| 114 |
+
yield response
|
| 115 |
+
|
| 116 |
+
if self.addtional_agent:
|
| 117 |
+
new_messages = copy.deepcopy(messages)
|
| 118 |
+
new_messages.pop()
|
| 119 |
+
new_response = copy.deepcopy(response)
|
| 120 |
+
new_response.pop()
|
| 121 |
+
if new_messages[0][ROLE] == SYSTEM:
|
| 122 |
+
# Add the system instruction to the agent
|
| 123 |
+
new_messages[0].content = self.addtional_agent.make_system_prompt()
|
| 124 |
+
for rsp in self.addtional_agent._run(messages=new_messages, **kwargs):
|
| 125 |
+
yield new_response + rsp
|
| 126 |
+
|
app.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tongyi DeepResearch - A Deep Research Agent Demo for Huggingface Spaces"""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from agents.search_agent import SearchAgent
|
| 5 |
+
from llm.oai import TextChatAtOAI
|
| 6 |
+
from gui.web_ui import WebUI
|
| 7 |
+
import datetime
|
| 8 |
+
from search import Search
|
| 9 |
+
from visit import Visit
|
| 10 |
+
from scholar import Scholar
|
| 11 |
+
from tool_python import PythonInterpreter
|
| 12 |
+
|
| 13 |
+
DR_MODEL_NAME = os.getenv("DR_MODEL_NAME", "")
|
| 14 |
+
DR_MODEL_SERVER = os.getenv("DR_MODEL_SERVER", "")
|
| 15 |
+
DR_MODEL_API_KEY = os.getenv("DR_MODEL_API_KEY", "")
|
| 16 |
+
|
| 17 |
+
ROOT_RESOURCE = os.path.join(os.path.dirname(__file__), 'resource')
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def today_date():
|
| 21 |
+
return datetime.date.today().strftime("%Y-%m-%d")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def init_dev_search_agent_service(
|
| 25 |
+
name: str = 'SEARCH',
|
| 26 |
+
port: int = 8002,
|
| 27 |
+
desc: str = '初版',
|
| 28 |
+
reasoning: bool = True,
|
| 29 |
+
max_llm_calls: int = 20,
|
| 30 |
+
tools=['search', 'visit'],
|
| 31 |
+
addtional_agent=None
|
| 32 |
+
):
|
| 33 |
+
llm_cfg = TextChatAtOAI({
|
| 34 |
+
'model': DR_MODEL_NAME,
|
| 35 |
+
'model_type': 'oai',
|
| 36 |
+
'model_server': DR_MODEL_SERVER,
|
| 37 |
+
'api_key': DR_MODEL_API_KEY,
|
| 38 |
+
'generate_cfg': {
|
| 39 |
+
'fncall_prompt_type': 'nous',
|
| 40 |
+
'temperature': 0.85,
|
| 41 |
+
'top_p': 0.95,
|
| 42 |
+
'top_k': -1,
|
| 43 |
+
'presence_penalty': 1.1,
|
| 44 |
+
'max_tokens': 32768,
|
| 45 |
+
'stream_options': {
|
| 46 |
+
'include_usage': True,
|
| 47 |
+
},
|
| 48 |
+
'timeout': 3000
|
| 49 |
+
},
|
| 50 |
+
})
|
| 51 |
+
|
| 52 |
+
def make_system_prompt():
|
| 53 |
+
system_message = """You are a deep research assistant. Your core function is to conduct thorough, multi-source investigations into any topic. You must handle both broad, open-domain inquiries and queries within specialized academic fields. For every request, synthesize information from credible, diverse sources to deliver a comprehensive, accurate, and objective response. When you have gathered sufficient information and are ready to provide the definitive response, you must enclose the entire final answer within <answer></answer> tags.\n\n"""
|
| 54 |
+
return system_message
|
| 55 |
+
|
| 56 |
+
bot = SearchAgent(
|
| 57 |
+
llm=llm_cfg,
|
| 58 |
+
function_list=tools,
|
| 59 |
+
system_message="",
|
| 60 |
+
name=f'Tongyi DeepResearch',
|
| 61 |
+
description=f"I am Tongyi DeepResearch, a leading open-source Deep Research Agent, welcome to try!",
|
| 62 |
+
extra={
|
| 63 |
+
'reasoning': reasoning,
|
| 64 |
+
'max_llm_calls': max_llm_calls,
|
| 65 |
+
},
|
| 66 |
+
addtional_agent=addtional_agent,
|
| 67 |
+
make_system_prompt=make_system_prompt,
|
| 68 |
+
custom_user_prompt=''''''
|
| 69 |
+
)
|
| 70 |
+
return bot
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def app_gui():
|
| 74 |
+
agents = []
|
| 75 |
+
for name, port, desc, reasoning, max_llm_calls, tools in [
|
| 76 |
+
('Tongyi DeepResearch', 8004, '...', True, 50, ['search', 'visit', 'google_scholar', 'PythonInterpreter']),
|
| 77 |
+
]:
|
| 78 |
+
search_bot_dev = init_dev_search_agent_service(
|
| 79 |
+
name=name,
|
| 80 |
+
port=port,
|
| 81 |
+
desc=desc,
|
| 82 |
+
reasoning=reasoning,
|
| 83 |
+
max_llm_calls=max_llm_calls,
|
| 84 |
+
tools=tools,
|
| 85 |
+
)
|
| 86 |
+
agents.append(search_bot_dev)
|
| 87 |
+
|
| 88 |
+
chatbot_config = {
|
| 89 |
+
'prompt.suggestions': [
|
| 90 |
+
'中国国足的一场比赛,国足首先失球,由一名宿姓球员扳平了。后来还发生了点球。比分最终是平局。这是哪场比赛?',
|
| 91 |
+
'When is the paper submission deadline for the ACL 2025 Industry Track, and what is the venue address for the conference?',
|
| 92 |
+
'On June 6, 2023, an article by Carolyn Collins Petersen was published in Universe Today. This article mentions a team that produced a paper about their observations, linked at the bottom of the article. Find this paper. Under what NASA award number was the work performed by R. G. Arendt supported by?',
|
| 93 |
+
'有一位华语娱乐圈的重要人物,与其兄弟共同创作并主演了一部在中国南方沿海城市上映的喜剧电影,这部电影成为该类型的开山之作。与此同时,这位人物还凭借两首极具影响力的本地方言歌曲在音乐领域取得突破,极大推动了本地方言流行音乐的发展。请问,这一切发生在20世纪70年代的哪一年?',
|
| 94 |
+
'有一首欧洲国家的国歌自20世纪50年代初被正式采用,并只选用了其中的一部分歌词。同一年,一位中国文艺界的重要人物创作了一部以民间传说为基础的戏曲作品,并在当年担任了多个文化领域的重要职务。请问这位中国文艺界人物是谁?',
|
| 95 |
+
'有一部英国文坛上极具影响力的长篇诗歌,由一位16世纪末的著名诗人创作,这位诗人在16世纪90年代末于伦敦去世后,被安葬在一个象征英国文学传统的著名场所,与多位文学巨匠为邻。请问,这位诗人安息之地是哪里?',
|
| 96 |
+
'出一份三天两夜的端午北京旅游攻略',
|
| 97 |
+
'对比下最新小米汽车和保时捷性能参数,然后根据最终的结果分析下性价比最高的车型,并给出杭州的供应商',
|
| 98 |
+
'量子计算突破对现有加密体系的威胁',
|
| 99 |
+
'人工智能伦理框架的全球差异',
|
| 100 |
+
'老龄化社会对全球养老金体系的长期冲击',
|
| 101 |
+
'全球碳中和目标下的能源转型路径差异',
|
| 102 |
+
'塑料污染在海洋食物链中的累积效应',
|
| 103 |
+
'AI生成内容(如AI绘画)对传统艺术价值的重构'
|
| 104 |
+
],
|
| 105 |
+
'user.name': 'User',
|
| 106 |
+
'verbose': True
|
| 107 |
+
}
|
| 108 |
+
messages = {'role': 'user', 'content': '介绍下你自己'}
|
| 109 |
+
|
| 110 |
+
WebUI(
|
| 111 |
+
agent=agents,
|
| 112 |
+
chatbot_config=chatbot_config,
|
| 113 |
+
).run(
|
| 114 |
+
message=messages,
|
| 115 |
+
share=False,
|
| 116 |
+
server_name="0.0.0.0",
|
| 117 |
+
server_port=7860,
|
| 118 |
+
concurrency_limit=20,
|
| 119 |
+
enable_mention=False,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
if __name__ == '__main__':
|
| 124 |
+
app_gui()
|
| 125 |
+
|
gui/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .web_ui import WebUI
|
| 2 |
+
|
| 3 |
+
__all__ = ['WebUI']
|
| 4 |
+
|
gui/assets/appBot.css
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* Deep Research Agent Custom Styles */
|
| 2 |
+
|
| 3 |
+
.container {
|
| 4 |
+
max-width: 1400px;
|
| 5 |
+
margin: 0 auto;
|
| 6 |
+
padding: 20px;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
.gradio-container {
|
| 10 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
/* Chat message styling */
|
| 14 |
+
.message {
|
| 15 |
+
border-radius: 12px;
|
| 16 |
+
padding: 12px 16px;
|
| 17 |
+
margin: 8px 0;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
.user-message {
|
| 21 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 22 |
+
color: white;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
.bot-message {
|
| 26 |
+
background: #f5f5f5;
|
| 27 |
+
border: 1px solid #e0e0e0;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
/* Code blocks */
|
| 31 |
+
pre, code {
|
| 32 |
+
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
| 33 |
+
background: #1e1e1e;
|
| 34 |
+
color: #d4d4d4;
|
| 35 |
+
border-radius: 8px;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
pre {
|
| 39 |
+
padding: 16px;
|
| 40 |
+
overflow-x: auto;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
/* Thinking blocks */
|
| 44 |
+
.thinking-block {
|
| 45 |
+
background: linear-gradient(135deg, #f6f8fc 0%, #eef1f8 100%);
|
| 46 |
+
border-left: 4px solid #667eea;
|
| 47 |
+
padding: 12px 16px;
|
| 48 |
+
margin: 8px 0;
|
| 49 |
+
border-radius: 0 8px 8px 0;
|
| 50 |
+
font-style: italic;
|
| 51 |
+
color: #666;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
/* Tool call styling */
|
| 55 |
+
.tool-call {
|
| 56 |
+
background: #fff3cd;
|
| 57 |
+
border: 1px solid #ffc107;
|
| 58 |
+
border-radius: 8px;
|
| 59 |
+
padding: 12px;
|
| 60 |
+
margin: 8px 0;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
.tool-result {
|
| 64 |
+
background: #d4edda;
|
| 65 |
+
border: 1px solid #28a745;
|
| 66 |
+
border-radius: 8px;
|
| 67 |
+
padding: 12px;
|
| 68 |
+
margin: 8px 0;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
/* Answer block */
|
| 72 |
+
.answer-block {
|
| 73 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 74 |
+
color: white;
|
| 75 |
+
border-radius: 12px;
|
| 76 |
+
padding: 20px;
|
| 77 |
+
margin: 16px 0;
|
| 78 |
+
box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
/* Input area */
|
| 82 |
+
.input-area {
|
| 83 |
+
border-radius: 12px;
|
| 84 |
+
border: 2px solid #e0e0e0;
|
| 85 |
+
transition: border-color 0.3s ease;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
.input-area:focus-within {
|
| 89 |
+
border-color: #667eea;
|
| 90 |
+
box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1);
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
/* Buttons */
|
| 94 |
+
button.primary {
|
| 95 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 96 |
+
border: none;
|
| 97 |
+
border-radius: 8px;
|
| 98 |
+
color: white;
|
| 99 |
+
padding: 10px 20px;
|
| 100 |
+
font-weight: 600;
|
| 101 |
+
cursor: pointer;
|
| 102 |
+
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
button.primary:hover {
|
| 106 |
+
transform: translateY(-2px);
|
| 107 |
+
box-shadow: 0 4px 12px rgba(102, 126, 234, 0.4);
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
/* Scrollbar styling */
|
| 111 |
+
::-webkit-scrollbar {
|
| 112 |
+
width: 8px;
|
| 113 |
+
height: 8px;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
::-webkit-scrollbar-track {
|
| 117 |
+
background: #f1f1f1;
|
| 118 |
+
border-radius: 4px;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
::-webkit-scrollbar-thumb {
|
| 122 |
+
background: #c1c1c1;
|
| 123 |
+
border-radius: 4px;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
::-webkit-scrollbar-thumb:hover {
|
| 127 |
+
background: #a8a8a8;
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
/* Loading animation */
|
| 131 |
+
.loading-dots {
|
| 132 |
+
display: inline-block;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
.loading-dots::after {
|
| 136 |
+
content: '';
|
| 137 |
+
animation: dots 1.5s steps(4, end) infinite;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
@keyframes dots {
|
| 141 |
+
0%, 20% { content: ''; }
|
| 142 |
+
40% { content: '.'; }
|
| 143 |
+
60% { content: '..'; }
|
| 144 |
+
80%, 100% { content: '...'; }
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/* Responsive design */
|
| 148 |
+
@media (max-width: 768px) {
|
| 149 |
+
.container {
|
| 150 |
+
padding: 10px;
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
.message {
|
| 154 |
+
padding: 10px 12px;
|
| 155 |
+
}
|
| 156 |
+
}
|
| 157 |
+
|
gui/web_ui.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pprint
|
| 3 |
+
import re
|
| 4 |
+
from typing import List, Optional, Union
|
| 5 |
+
|
| 6 |
+
from qwen_agent import Agent, MultiAgentHub
|
| 7 |
+
from qwen_agent.agents.user_agent import PENDING_USER_INPUT
|
| 8 |
+
from qwen_agent.gui.gradio_utils import format_cover_html
|
| 9 |
+
from qwen_agent.gui.utils import convert_fncall_to_text, convert_history_to_chatbot, get_avatar_image
|
| 10 |
+
from qwen_agent.llm.schema import AUDIO, CONTENT, FILE, IMAGE, NAME, ROLE, USER, VIDEO, Message
|
| 11 |
+
from qwen_agent.log import logger
|
| 12 |
+
from qwen_agent.utils.utils import print_traceback
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class WebUI:
|
| 16 |
+
"""A Common chatbot application for agent."""
|
| 17 |
+
|
| 18 |
+
def __init__(self, agent: Union[Agent, MultiAgentHub, List[Agent]], chatbot_config: Optional[dict] = None):
|
| 19 |
+
"""
|
| 20 |
+
Initialization the chatbot.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
agent: The agent or a list of agents, supports various types of agents such as Assistant, GroupChat, Router, etc.
|
| 24 |
+
chatbot_config: The chatbot configuration. Set the configuration as {'user.name': '', 'user.avatar': '', 'agent.avatar': '', 'input.placeholder': '', 'prompt.suggestions': []}.
|
| 25 |
+
"""
|
| 26 |
+
chatbot_config = chatbot_config or {}
|
| 27 |
+
|
| 28 |
+
if isinstance(agent, MultiAgentHub):
|
| 29 |
+
self.agent_list = [agent for agent in agent.nonuser_agents]
|
| 30 |
+
self.agent_hub = agent
|
| 31 |
+
elif isinstance(agent, list):
|
| 32 |
+
self.agent_list = agent
|
| 33 |
+
self.agent_hub = None
|
| 34 |
+
else:
|
| 35 |
+
self.agent_list = [agent]
|
| 36 |
+
self.agent_hub = None
|
| 37 |
+
|
| 38 |
+
user_name = chatbot_config.get('user.name', 'user')
|
| 39 |
+
self.user_config = {
|
| 40 |
+
'name': user_name,
|
| 41 |
+
'avatar': chatbot_config.get(
|
| 42 |
+
'user.avatar',
|
| 43 |
+
get_avatar_image(user_name),
|
| 44 |
+
),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
self.agent_config_list = [{
|
| 48 |
+
'name': agent.name,
|
| 49 |
+
'avatar': chatbot_config.get(
|
| 50 |
+
'agent.avatar',
|
| 51 |
+
get_avatar_image(agent.name),
|
| 52 |
+
),
|
| 53 |
+
'description': agent.description or "I'm a helpful assistant.",
|
| 54 |
+
} for agent in self.agent_list]
|
| 55 |
+
|
| 56 |
+
self.input_placeholder = chatbot_config.get('input.placeholder', '请输入需要分析的问题,尽管交给我吧~')
|
| 57 |
+
self.prompt_suggestions = chatbot_config.get('prompt.suggestions', [])
|
| 58 |
+
self.verbose = chatbot_config.get('verbose', False)
|
| 59 |
+
|
| 60 |
+
"""
|
| 61 |
+
Run the chatbot.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
messages: The chat history.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
def run(self,
|
| 68 |
+
messages: List[Message] = None,
|
| 69 |
+
share: bool = False,
|
| 70 |
+
server_name: str = None,
|
| 71 |
+
server_port: int = None,
|
| 72 |
+
concurrency_limit: int = 10,
|
| 73 |
+
enable_mention: bool = False,
|
| 74 |
+
**kwargs):
|
| 75 |
+
self.run_kwargs = kwargs
|
| 76 |
+
from qwen_agent.gui.gradio_dep import gr, mgr, ms
|
| 77 |
+
|
| 78 |
+
customTheme = gr.themes.Default(
|
| 79 |
+
primary_hue=gr.themes.utils.colors.blue,
|
| 80 |
+
radius_size=gr.themes.utils.sizes.radius_none,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
if messages is not None:
|
| 84 |
+
logger.info('web-ui messages.size %s' % len(messages))
|
| 85 |
+
|
| 86 |
+
with gr.Blocks(
|
| 87 |
+
css=os.path.join(os.path.dirname(__file__), 'assets/appBot.css'),
|
| 88 |
+
theme=customTheme,
|
| 89 |
+
) as demo:
|
| 90 |
+
history = gr.State([])
|
| 91 |
+
|
| 92 |
+
with ms.Application():
|
| 93 |
+
with gr.Row(elem_classes='container'):
|
| 94 |
+
with gr.Column(scale=4):
|
| 95 |
+
chatbot = mgr.Chatbot(value=convert_history_to_chatbot(messages=messages),
|
| 96 |
+
avatar_images=[
|
| 97 |
+
self.user_config,
|
| 98 |
+
self.agent_config_list,
|
| 99 |
+
],
|
| 100 |
+
height=850,
|
| 101 |
+
avatar_image_width=80,
|
| 102 |
+
flushing=False,
|
| 103 |
+
show_copy_button=True,
|
| 104 |
+
latex_delimiters=[{
|
| 105 |
+
'left': '\\(',
|
| 106 |
+
'right': '\\)',
|
| 107 |
+
'display': True
|
| 108 |
+
}, {
|
| 109 |
+
'left': '\\begin{equation}',
|
| 110 |
+
'right': '\\end{equation}',
|
| 111 |
+
'display': True
|
| 112 |
+
}, {
|
| 113 |
+
'left': '\\begin{align}',
|
| 114 |
+
'right': '\\end{align}',
|
| 115 |
+
'display': True
|
| 116 |
+
}, {
|
| 117 |
+
'left': '\\begin{alignat}',
|
| 118 |
+
'right': '\\end{alignat}',
|
| 119 |
+
'display': True
|
| 120 |
+
}, {
|
| 121 |
+
'left': '\\begin{gather}',
|
| 122 |
+
'right': '\\end{gather}',
|
| 123 |
+
'display': True
|
| 124 |
+
}, {
|
| 125 |
+
'left': '\\begin{CD}',
|
| 126 |
+
'right': '\\end{CD}',
|
| 127 |
+
'display': True
|
| 128 |
+
}, {
|
| 129 |
+
'left': '\\[',
|
| 130 |
+
'right': '\\]',
|
| 131 |
+
'display': True
|
| 132 |
+
}])
|
| 133 |
+
|
| 134 |
+
input = mgr.MultimodalInput(
|
| 135 |
+
placeholder=self.input_placeholder,
|
| 136 |
+
show_copy_button=True,
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
with gr.Column(scale=1):
|
| 140 |
+
if len(self.agent_list) > 1:
|
| 141 |
+
agent_selector = gr.Dropdown(
|
| 142 |
+
[(agent.name, i) for i, agent in enumerate(self.agent_list)],
|
| 143 |
+
label='Agents',
|
| 144 |
+
info='请选择一个 Agent',
|
| 145 |
+
value=0,
|
| 146 |
+
interactive=True,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
agent_info_block = self._create_agent_info_block()
|
| 150 |
+
agent_plugins_block = self._create_agent_plugins_block()
|
| 151 |
+
|
| 152 |
+
if self.prompt_suggestions:
|
| 153 |
+
gr.Examples(
|
| 154 |
+
label='推荐对话',
|
| 155 |
+
examples=self.prompt_suggestions,
|
| 156 |
+
inputs=[input],
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
if len(self.agent_list) > 1:
|
| 160 |
+
agent_selector.change(
|
| 161 |
+
fn=self.change_agent,
|
| 162 |
+
inputs=[agent_selector],
|
| 163 |
+
outputs=[agent_selector, agent_info_block, agent_plugins_block],
|
| 164 |
+
queue=False,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
input.change(
|
| 168 |
+
fn=self.change_text,
|
| 169 |
+
inputs=[input],
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
input_promise = input.submit(
|
| 173 |
+
fn=self.add_text,
|
| 174 |
+
inputs=[input, chatbot, history],
|
| 175 |
+
outputs=[input, chatbot, history],
|
| 176 |
+
queue=True,
|
| 177 |
+
concurrency_limit=concurrency_limit,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
if len(self.agent_list) > 1:
|
| 181 |
+
if enable_mention:
|
| 182 |
+
input_promise = input_promise.then(
|
| 183 |
+
self.add_mention,
|
| 184 |
+
[chatbot, agent_selector],
|
| 185 |
+
[chatbot, agent_selector],
|
| 186 |
+
).then(
|
| 187 |
+
self.agent_run,
|
| 188 |
+
[chatbot, history, agent_selector],
|
| 189 |
+
[chatbot, history, agent_selector],
|
| 190 |
+
)
|
| 191 |
+
else:
|
| 192 |
+
input_promise = input_promise.then(
|
| 193 |
+
self.agent_run,
|
| 194 |
+
[chatbot, history, agent_selector],
|
| 195 |
+
[chatbot, history, agent_selector],
|
| 196 |
+
)
|
| 197 |
+
else:
|
| 198 |
+
input_promise = input_promise.then(
|
| 199 |
+
self.agent_run,
|
| 200 |
+
[chatbot, history],
|
| 201 |
+
[chatbot, history],
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
input_promise.then(self.flushed, None, [input])
|
| 205 |
+
|
| 206 |
+
demo.load(None)
|
| 207 |
+
|
| 208 |
+
demo.queue(default_concurrency_limit=concurrency_limit).launch()
|
| 209 |
+
|
| 210 |
+
def change_agent(self, agent_selector):
|
| 211 |
+
yield agent_selector, self._create_agent_info_block(agent_selector), self._create_agent_plugins_block(
|
| 212 |
+
agent_selector)
|
| 213 |
+
|
| 214 |
+
def change_text(self, _input):
|
| 215 |
+
logger.info(f'agent_run change_text input:{_input.text}')
|
| 216 |
+
|
| 217 |
+
def add_text(self, _input, _chatbot, _history):
|
| 218 |
+
_history.append({
|
| 219 |
+
ROLE: USER,
|
| 220 |
+
CONTENT: [{
|
| 221 |
+
'text': _input.text
|
| 222 |
+
}],
|
| 223 |
+
})
|
| 224 |
+
|
| 225 |
+
if self.user_config[NAME]:
|
| 226 |
+
_history[-1][NAME] = self.user_config[NAME]
|
| 227 |
+
|
| 228 |
+
logger.info('agent_run add_text input:\n' + pprint.pformat(_history, indent=2))
|
| 229 |
+
|
| 230 |
+
if _input.files:
|
| 231 |
+
for file in _input.files:
|
| 232 |
+
if file.mime_type.startswith('image/'):
|
| 233 |
+
_history[-1][CONTENT].append({IMAGE: 'file://' + file.path})
|
| 234 |
+
elif file.mime_type.startswith('audio/'):
|
| 235 |
+
_history[-1][CONTENT].append({AUDIO: 'file://' + file.path})
|
| 236 |
+
elif file.mime_type.startswith('video/'):
|
| 237 |
+
_history[-1][CONTENT].append({VIDEO: 'file://' + file.path})
|
| 238 |
+
else:
|
| 239 |
+
_history[-1][CONTENT].append({FILE: file.path})
|
| 240 |
+
|
| 241 |
+
_chatbot.append([_input, None])
|
| 242 |
+
from qwen_agent.gui.gradio_dep import gr
|
| 243 |
+
yield gr.update(interactive=False, value=''), _chatbot, _history
|
| 244 |
+
|
| 245 |
+
def add_mention(self, _chatbot, _agent_selector):
|
| 246 |
+
if len(self.agent_list) == 1:
|
| 247 |
+
yield _chatbot, _agent_selector
|
| 248 |
+
|
| 249 |
+
query = _chatbot[-1][0].text
|
| 250 |
+
match = re.search(r'@\w+\b', query)
|
| 251 |
+
if match:
|
| 252 |
+
_agent_selector = self._get_agent_index_by_name(match.group()[1:])
|
| 253 |
+
|
| 254 |
+
agent_name = self.agent_list[_agent_selector].name
|
| 255 |
+
if ('@' + agent_name) not in query and self.agent_hub is None:
|
| 256 |
+
_chatbot[-1][0].text = '@' + agent_name + ' ' + query
|
| 257 |
+
|
| 258 |
+
yield _chatbot, _agent_selector
|
| 259 |
+
|
| 260 |
+
def agent_run(self, _chatbot, _history, _agent_selector=None):
|
| 261 |
+
# TODO 仅保持任务的单论对话
|
| 262 |
+
if self.verbose:
|
| 263 |
+
logger.info('agent_run input[all]:\n' + pprint.pformat(_history, indent=2))
|
| 264 |
+
|
| 265 |
+
_history = _history[-1:]
|
| 266 |
+
if self.verbose:
|
| 267 |
+
logger.info('agent_run input[new]:\n' + pprint.pformat(_history, indent=2))
|
| 268 |
+
|
| 269 |
+
if len(_history) == 0:
|
| 270 |
+
if _agent_selector is not None:
|
| 271 |
+
yield _chatbot, _history, _agent_selector
|
| 272 |
+
else:
|
| 273 |
+
yield _chatbot, _history
|
| 274 |
+
logger.info('agent_run input with empty input, do nothing.')
|
| 275 |
+
return
|
| 276 |
+
|
| 277 |
+
num_input_bubbles = len(_chatbot) - 1
|
| 278 |
+
num_output_bubbles = 1
|
| 279 |
+
_chatbot[-1][1] = [None for _ in range(len(self.agent_list))]
|
| 280 |
+
|
| 281 |
+
logger.info('agent_run input:_agent_selector %s' % _agent_selector)
|
| 282 |
+
agent_runner = self.agent_list[_agent_selector or 0]
|
| 283 |
+
if self.agent_hub:
|
| 284 |
+
agent_runner = self.agent_hub
|
| 285 |
+
|
| 286 |
+
agent_runner.function_map
|
| 287 |
+
responses = []
|
| 288 |
+
for responses in agent_runner.run(_history, **self.run_kwargs):
|
| 289 |
+
if not responses:
|
| 290 |
+
continue
|
| 291 |
+
if responses[-1][CONTENT] == PENDING_USER_INPUT:
|
| 292 |
+
logger.info('Interrupted. Waiting for user input!')
|
| 293 |
+
break
|
| 294 |
+
|
| 295 |
+
display_responses = convert_fncall_to_text(responses)
|
| 296 |
+
if not display_responses:
|
| 297 |
+
continue
|
| 298 |
+
if display_responses[-1][CONTENT] is None:
|
| 299 |
+
continue
|
| 300 |
+
|
| 301 |
+
while len(display_responses) > num_output_bubbles:
|
| 302 |
+
# Create a new chat bubble
|
| 303 |
+
_chatbot.append([None, None])
|
| 304 |
+
_chatbot[-1][1] = [None for _ in range(len(self.agent_list))]
|
| 305 |
+
num_output_bubbles += 1
|
| 306 |
+
|
| 307 |
+
assert num_output_bubbles == len(display_responses)
|
| 308 |
+
assert num_input_bubbles + num_output_bubbles == len(_chatbot)
|
| 309 |
+
|
| 310 |
+
for i, rsp in enumerate(display_responses):
|
| 311 |
+
agent_index = self._get_agent_index_by_name(rsp[NAME])
|
| 312 |
+
_chatbot[num_input_bubbles + i][1][agent_index] = rsp[CONTENT]
|
| 313 |
+
if len(self.agent_list) > 1:
|
| 314 |
+
_agent_selector = agent_index
|
| 315 |
+
|
| 316 |
+
if _agent_selector is not None:
|
| 317 |
+
yield _chatbot, _history, _agent_selector
|
| 318 |
+
else:
|
| 319 |
+
yield _chatbot, _history
|
| 320 |
+
|
| 321 |
+
if responses:
|
| 322 |
+
_history.extend([res for res in responses if res[CONTENT] != PENDING_USER_INPUT])
|
| 323 |
+
|
| 324 |
+
if _agent_selector is not None:
|
| 325 |
+
yield _chatbot, _history, _agent_selector
|
| 326 |
+
else:
|
| 327 |
+
yield _chatbot, _history
|
| 328 |
+
|
| 329 |
+
if self.verbose:
|
| 330 |
+
logger.info('agent_run response:\n' + pprint.pformat(responses, indent=2))
|
| 331 |
+
|
| 332 |
+
def flushed(self):
|
| 333 |
+
logger.info('agent_run flushed')
|
| 334 |
+
from qwen_agent.gui.gradio_dep import gr
|
| 335 |
+
return gr.update(interactive=True, value='')
|
| 336 |
+
|
| 337 |
+
def _get_agent_index_by_name(self, agent_name):
|
| 338 |
+
if agent_name is None:
|
| 339 |
+
return 0
|
| 340 |
+
try:
|
| 341 |
+
agent_name = agent_name.strip()
|
| 342 |
+
for i, agent in enumerate(self.agent_list):
|
| 343 |
+
if agent.name == agent_name:
|
| 344 |
+
return i
|
| 345 |
+
return 0
|
| 346 |
+
except Exception:
|
| 347 |
+
print_traceback()
|
| 348 |
+
return 0
|
| 349 |
+
|
| 350 |
+
def _create_agent_info_block(self, agent_index=0):
|
| 351 |
+
from qwen_agent.gui.gradio_dep import gr
|
| 352 |
+
agent_config_interactive = self.agent_config_list[agent_index]
|
| 353 |
+
|
| 354 |
+
return gr.HTML(
|
| 355 |
+
format_cover_html(
|
| 356 |
+
bot_name=agent_config_interactive['name'],
|
| 357 |
+
bot_description=agent_config_interactive['description'],
|
| 358 |
+
bot_avatar=agent_config_interactive['avatar'],
|
| 359 |
+
))
|
| 360 |
+
|
| 361 |
+
def _create_agent_plugins_block(self, agent_index=0):
|
| 362 |
+
from qwen_agent.gui.gradio_dep import gr
|
| 363 |
+
agent_interactive = self.agent_list[agent_index]
|
| 364 |
+
|
| 365 |
+
if agent_interactive.function_map:
|
| 366 |
+
capabilities = [key for key in agent_interactive.function_map.keys()]
|
| 367 |
+
return gr.CheckboxGroup(
|
| 368 |
+
label='插件',
|
| 369 |
+
value=capabilities,
|
| 370 |
+
choices=capabilities,
|
| 371 |
+
interactive=False,
|
| 372 |
+
)
|
| 373 |
+
else:
|
| 374 |
+
return gr.CheckboxGroup(
|
| 375 |
+
label='插件',
|
| 376 |
+
value=[],
|
| 377 |
+
choices=[],
|
| 378 |
+
interactive=False,
|
| 379 |
+
)
|
| 380 |
+
|
llm/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .oai import TextChatAtOAI
|
| 2 |
+
|
| 3 |
+
__all__ = ['TextChatAtOAI']
|
| 4 |
+
|
llm/oai.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
from typing import List, Iterator
|
| 3 |
+
import copy
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import random
|
| 7 |
+
import os
|
| 8 |
+
from pprint import pformat
|
| 9 |
+
from typing import Dict, Iterator, List, Optional, Literal, Union
|
| 10 |
+
import openai
|
| 11 |
+
from openai import OpenAIError, RateLimitError
|
| 12 |
+
|
| 13 |
+
if openai.__version__.startswith('0.'):
|
| 14 |
+
from openai.error import OpenAIError # noqa
|
| 15 |
+
else:
|
| 16 |
+
from openai import OpenAIError
|
| 17 |
+
|
| 18 |
+
from qwen_agent.llm.base import ModelServiceError, register_llm
|
| 19 |
+
from qwen_agent.llm.function_calling import BaseFnCallModel, simulate_response_completion_with_chat
|
| 20 |
+
from qwen_agent.llm.schema import ASSISTANT, Message, FunctionCall
|
| 21 |
+
from qwen_agent.log import logger
|
| 22 |
+
import datetime
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def today_date():
|
| 26 |
+
return datetime.date.today().strftime("%Y-%m-%d")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
SYSTEM_PROMPT = """You are a deep research assistant. Your core function is to conduct thorough, multi-source investigations into any topic. You must handle both broad, open-domain inquiries and queries within specialized academic fields. For every request, synthesize information from credible, diverse sources to deliver a comprehensive, accurate, and objective response. When you have gathered sufficient information and are ready to provide the definitive response, you must enclose the entire final answer within <answer></answer> tags.
|
| 30 |
+
|
| 31 |
+
# Tools
|
| 32 |
+
|
| 33 |
+
You may call one or more functions to assist with the user query.
|
| 34 |
+
|
| 35 |
+
You are provided with function signatures within <tools></tools> XML tags:
|
| 36 |
+
<tools>
|
| 37 |
+
{"type": "function", "function": {"name": "search", "description": "Perform Google web searches then returns a string of the top search results. Accepts multiple queries.", "parameters": {"type": "object", "properties": {"query": {"type": "array", "items": {"type": "string", "description": "The search query."}, "minItems": 1, "description": "The list of search queries."}}, "required": ["query"]}}}
|
| 38 |
+
{"type": "function", "function": {"name": "visit", "description": "Visit webpage(s) and return the summary of the content.", "parameters": {"type": "object", "properties": {"url": {"type": "array", "items": {"type": "string"}, "description": "The URL(s) of the webpage(s) to visit. Can be a single URL or an array of URLs."}, "goal": {"type": "string", "description": "The specific information goal for visiting webpage(s)."}}, "required": ["url", "goal"]}}}
|
| 39 |
+
{"type": "function", "function": {"name": "PythonInterpreter", "description": "Executes Python code in a sandboxed environment. To use this tool, you must follow this format:
|
| 40 |
+
1. The 'arguments' JSON object must be empty: {}.
|
| 41 |
+
2. The Python code to be executed must be placed immediately after the JSON block, enclosed within <code> and </code> tags.
|
| 42 |
+
|
| 43 |
+
IMPORTANT: Any output you want to see MUST be printed to standard output using the print() function.
|
| 44 |
+
|
| 45 |
+
Example of a correct call:
|
| 46 |
+
<tool_call>
|
| 47 |
+
{"name": "PythonInterpreter", "arguments": {}}
|
| 48 |
+
<code>
|
| 49 |
+
import numpy as np
|
| 50 |
+
# Your code here
|
| 51 |
+
print(f"The result is: {np.mean([1,2,3])}")
|
| 52 |
+
</code>
|
| 53 |
+
</tool_call>", "parameters": {"type": "object", "properties": {}, "required": []}}}
|
| 54 |
+
{"type": "function", "function": {"name": "google_scholar", "description": "Leverage Google Scholar to retrieve relevant information from academic publications. Accepts multiple queries. This tool will also return results from google search", "parameters": {"type": "object", "properties": {"query": {"type": "array", "items": {"type": "string", "description": "The search query."}, "minItems": 1, "description": "The list of search queries for Google Scholar."}}, "required": ["query"]}}}
|
| 55 |
+
{"type": "function", "function": {"name": "parse_file", "description": "This is a tool that can be used to parse multiple user uploaded local files such as PDF, DOCX, PPTX, TXT, CSV, XLSX, DOC, ZIP, MP4, MP3.", "parameters": {"type": "object", "properties": {"files": {"type": "array", "items": {"type": "string"}, "description": "The file name of the user uploaded local files to be parsed."}}, "required": ["files"]}}}
|
| 56 |
+
</tools>
|
| 57 |
+
|
| 58 |
+
For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
|
| 59 |
+
<tool_call>
|
| 60 |
+
{"name": <function-name>, "arguments": <args-json-object>}
|
| 61 |
+
</tool_call>
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@register_llm('oai')
|
| 67 |
+
class TextChatAtOAI(BaseFnCallModel):
|
| 68 |
+
def __init__(self, cfg: Optional[Dict] = None):
|
| 69 |
+
super().__init__(cfg)
|
| 70 |
+
self.model = self.model or 'gpt-4o-mini'
|
| 71 |
+
cfg = cfg or {}
|
| 72 |
+
api_base = cfg.get('api_base')
|
| 73 |
+
api_base = api_base or cfg.get('base_url')
|
| 74 |
+
api_base = api_base or cfg.get('model_server')
|
| 75 |
+
api_base = (api_base or '').strip()
|
| 76 |
+
|
| 77 |
+
api_key = cfg.get('api_key')
|
| 78 |
+
api_key = api_key or os.getenv('OPENAI_API_KEY')
|
| 79 |
+
api_key = (api_key or 'EMPTY').strip()
|
| 80 |
+
|
| 81 |
+
if openai.__version__.startswith('0.'):
|
| 82 |
+
if api_base:
|
| 83 |
+
openai.api_base = api_base
|
| 84 |
+
if api_key:
|
| 85 |
+
openai.api_key = api_key
|
| 86 |
+
self._complete_create = openai.Completion.create
|
| 87 |
+
self._chat_complete_create = openai.ChatCompletion.create
|
| 88 |
+
else:
|
| 89 |
+
api_kwargs = {}
|
| 90 |
+
if api_base:
|
| 91 |
+
api_kwargs['base_url'] = api_base
|
| 92 |
+
if api_key:
|
| 93 |
+
api_kwargs['api_key'] = api_key
|
| 94 |
+
|
| 95 |
+
def _chat_complete_create(*args, **kwargs):
|
| 96 |
+
# OpenAI API v1 does not allow the following args, must pass by extra_body
|
| 97 |
+
extra_params = ['top_k', 'repetition_penalty']
|
| 98 |
+
if any((k in kwargs) for k in extra_params):
|
| 99 |
+
kwargs['extra_body'] = copy.deepcopy(kwargs.get('extra_body', {}))
|
| 100 |
+
for k in extra_params:
|
| 101 |
+
if k in kwargs:
|
| 102 |
+
kwargs['extra_body'][k] = kwargs.pop(k)
|
| 103 |
+
if 'request_timeout' in kwargs:
|
| 104 |
+
kwargs['timeout'] = kwargs.pop('request_timeout')
|
| 105 |
+
client = openai.OpenAI(**api_kwargs)
|
| 106 |
+
return client.chat.completions.create(*args, **kwargs)
|
| 107 |
+
|
| 108 |
+
def _complete_create(*args, **kwargs):
|
| 109 |
+
# OpenAI API v1 does not allow the following args, must pass by extra_body
|
| 110 |
+
extra_params = ['top_k', 'repetition_penalty']
|
| 111 |
+
if any((k in kwargs) for k in extra_params):
|
| 112 |
+
kwargs['extra_body'] = copy.deepcopy(kwargs.get('extra_body', {}))
|
| 113 |
+
for k in extra_params:
|
| 114 |
+
if k in kwargs:
|
| 115 |
+
kwargs['extra_body'][k] = kwargs.pop(k)
|
| 116 |
+
if 'request_timeout' in kwargs:
|
| 117 |
+
kwargs['timeout'] = kwargs.pop('request_timeout')
|
| 118 |
+
client = openai.OpenAI(**api_kwargs)
|
| 119 |
+
return client.completions.create(*args, **kwargs)
|
| 120 |
+
|
| 121 |
+
self._complete_create = _complete_create
|
| 122 |
+
self._chat_complete_create = _chat_complete_create
|
| 123 |
+
|
| 124 |
+
def _chat_stream(
|
| 125 |
+
self,
|
| 126 |
+
messages: List[Message],
|
| 127 |
+
delta_stream: bool,
|
| 128 |
+
generate_cfg: dict,
|
| 129 |
+
) -> Iterator[List[Message]]:
|
| 130 |
+
messages = self.convert_messages_to_dicts(messages)
|
| 131 |
+
try:
|
| 132 |
+
MAX_RETRIES = 5
|
| 133 |
+
INITIAL_DELAY = 2
|
| 134 |
+
CONTENT_THRESHOLD = 50
|
| 135 |
+
REASONING_THRESHOLD = 50
|
| 136 |
+
|
| 137 |
+
response = None
|
| 138 |
+
for attempt in range(MAX_RETRIES):
|
| 139 |
+
try:
|
| 140 |
+
response = self._chat_complete_create(model=self.model,
|
| 141 |
+
messages=messages,
|
| 142 |
+
stream=True,
|
| 143 |
+
**generate_cfg)
|
| 144 |
+
break
|
| 145 |
+
except RateLimitError as ex:
|
| 146 |
+
if attempt == MAX_RETRIES - 1:
|
| 147 |
+
logger.error(f"API rate limit error after {MAX_RETRIES} retries. Raising exception.")
|
| 148 |
+
raise ModelServiceError(exception=ex) from ex
|
| 149 |
+
delay = INITIAL_DELAY * (2 ** attempt) + random.uniform(0, 1)
|
| 150 |
+
logger.warning(
|
| 151 |
+
f"Rate limit exceeded. Retrying in {delay:.2f} seconds... (Attempt {attempt + 1}/{MAX_RETRIES})"
|
| 152 |
+
)
|
| 153 |
+
time.sleep(delay)
|
| 154 |
+
except OpenAIError as ex:
|
| 155 |
+
logger.error(f"An OpenAI error occurred: {ex}")
|
| 156 |
+
raise ModelServiceError(exception=ex) from ex
|
| 157 |
+
|
| 158 |
+
if delta_stream:
|
| 159 |
+
for chunk in response:
|
| 160 |
+
if chunk.choices:
|
| 161 |
+
choice = chunk.choices[0]
|
| 162 |
+
if hasattr(choice.delta, 'reasoning_content') and choice.delta.reasoning_content:
|
| 163 |
+
yield [
|
| 164 |
+
Message(
|
| 165 |
+
role=ASSISTANT,
|
| 166 |
+
content='',
|
| 167 |
+
reasoning_content=choice.delta.reasoning_content
|
| 168 |
+
)
|
| 169 |
+
]
|
| 170 |
+
if hasattr(choice.delta, 'content') and choice.delta.content:
|
| 171 |
+
yield [Message(role=ASSISTANT, content=choice.delta.content, reasoning_content='')]
|
| 172 |
+
if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
|
| 173 |
+
function_name = choice.delta.tool_calls[0].function.name
|
| 174 |
+
function_call = {
|
| 175 |
+
'name': function_name,
|
| 176 |
+
'arguments': json.loads(choice.delta.tool_calls[0].function.arguments)
|
| 177 |
+
}
|
| 178 |
+
function_json = json.dumps(function_call, ensure_ascii=False)
|
| 179 |
+
yield [Message(role=ASSISTANT, content=f'<tool_call>{function_json}</tool_call>')]
|
| 180 |
+
logger.info(f'delta_stream message chunk: {chunk}')
|
| 181 |
+
else:
|
| 182 |
+
full_response = ''
|
| 183 |
+
full_reasoning_content = ''
|
| 184 |
+
content_buffer = ''
|
| 185 |
+
reasoning_content_buffer = ''
|
| 186 |
+
|
| 187 |
+
for chunk in response:
|
| 188 |
+
if not chunk.choices:
|
| 189 |
+
continue
|
| 190 |
+
choice = chunk.choices[0]
|
| 191 |
+
new_content = choice.delta.content if hasattr(choice.delta, 'content') and choice.delta.content else ''
|
| 192 |
+
new_reasoning = choice.delta.reasoning if hasattr(choice.delta, 'reasoning') and choice.delta.reasoning else ''
|
| 193 |
+
has_tool_calls = hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls
|
| 194 |
+
|
| 195 |
+
if new_reasoning:
|
| 196 |
+
full_reasoning_content += new_reasoning
|
| 197 |
+
reasoning_content_buffer += new_reasoning
|
| 198 |
+
|
| 199 |
+
if new_content:
|
| 200 |
+
full_response += new_content
|
| 201 |
+
content_buffer += new_content
|
| 202 |
+
|
| 203 |
+
if has_tool_calls:
|
| 204 |
+
function_name = choice.delta.tool_calls[0].function.name
|
| 205 |
+
function_call = {
|
| 206 |
+
'name': function_name,
|
| 207 |
+
'arguments': json.loads(choice.delta.tool_calls[0].function.arguments)
|
| 208 |
+
}
|
| 209 |
+
function_json = json.dumps(function_call, ensure_ascii=False)
|
| 210 |
+
logger.info(json.dumps(function_call, ensure_ascii=False, indent=4))
|
| 211 |
+
full_response += f'<tool_call>{function_json}</tool_call>'
|
| 212 |
+
content_buffer += '<tool_call>'
|
| 213 |
+
|
| 214 |
+
if (len(content_buffer) >= CONTENT_THRESHOLD or
|
| 215 |
+
len(reasoning_content_buffer) >= REASONING_THRESHOLD or
|
| 216 |
+
'\n' in new_content or
|
| 217 |
+
'\n' in new_reasoning):
|
| 218 |
+
yield [Message(role=ASSISTANT, content=full_response, reasoning_content=full_reasoning_content)]
|
| 219 |
+
content_buffer = ''
|
| 220 |
+
reasoning_content_buffer = ''
|
| 221 |
+
|
| 222 |
+
logger.info(f'message chunk: {chunk}')
|
| 223 |
+
|
| 224 |
+
if content_buffer or reasoning_content_buffer:
|
| 225 |
+
yield [Message(role=ASSISTANT, content=full_response, reasoning_content=full_reasoning_content)]
|
| 226 |
+
|
| 227 |
+
except OpenAIError as ex:
|
| 228 |
+
raise ModelServiceError(exception=ex)
|
| 229 |
+
|
| 230 |
+
def _chat_no_stream(
|
| 231 |
+
self,
|
| 232 |
+
messages: List[Message],
|
| 233 |
+
generate_cfg: dict,
|
| 234 |
+
) -> List[Message]:
|
| 235 |
+
messages = self.convert_messages_to_dicts(messages)
|
| 236 |
+
try:
|
| 237 |
+
response = self._chat_complete_create(model=self.model, messages=messages, stream=False, **generate_cfg)
|
| 238 |
+
if hasattr(response.choices[0].message, 'reasoning_content'):
|
| 239 |
+
return [
|
| 240 |
+
Message(role=ASSISTANT,
|
| 241 |
+
content=response.choices[0].message.content,
|
| 242 |
+
reasoning_content=response.choices[0].message.reasoning_content)
|
| 243 |
+
]
|
| 244 |
+
else:
|
| 245 |
+
return [Message(role=ASSISTANT, content=response.choices[0].message.content)]
|
| 246 |
+
except OpenAIError as ex:
|
| 247 |
+
raise ModelServiceError(exception=ex)
|
| 248 |
+
|
| 249 |
+
def _chat_with_functions(
|
| 250 |
+
self,
|
| 251 |
+
messages: List[Message],
|
| 252 |
+
functions: List[Dict],
|
| 253 |
+
stream: bool,
|
| 254 |
+
delta_stream: bool,
|
| 255 |
+
generate_cfg: dict,
|
| 256 |
+
lang: Literal['en', 'zh'],
|
| 257 |
+
) -> Union[List[Message], Iterator[List[Message]]]:
|
| 258 |
+
generate_cfg = copy.deepcopy(generate_cfg)
|
| 259 |
+
for k in ['parallel_function_calls', 'function_choice', 'thought_in_content']:
|
| 260 |
+
if k in generate_cfg:
|
| 261 |
+
del generate_cfg[k]
|
| 262 |
+
|
| 263 |
+
messages = simulate_response_completion_with_chat(messages)
|
| 264 |
+
return self._chat(messages, stream=stream, delta_stream=delta_stream, generate_cfg=generate_cfg)
|
| 265 |
+
|
| 266 |
+
def _chat(
|
| 267 |
+
self,
|
| 268 |
+
messages: List[Union[Message, Dict]],
|
| 269 |
+
stream: bool,
|
| 270 |
+
delta_stream: bool,
|
| 271 |
+
generate_cfg: dict,
|
| 272 |
+
) -> Union[List[Message], Iterator[List[Message]]]:
|
| 273 |
+
if stream:
|
| 274 |
+
return self._chat_stream(messages, delta_stream=delta_stream, generate_cfg=generate_cfg)
|
| 275 |
+
else:
|
| 276 |
+
return self._chat_no_stream(messages, generate_cfg=generate_cfg)
|
| 277 |
+
|
| 278 |
+
@staticmethod
|
| 279 |
+
def convert_messages_to_dicts(messages: List[Message]) -> List[dict]:
|
| 280 |
+
messages = [msg.model_dump() for msg in messages]
|
| 281 |
+
return_messages = []
|
| 282 |
+
messages[0]["content"] = SYSTEM_PROMPT + "Current date: " + str(today_date())
|
| 283 |
+
for i in messages:
|
| 284 |
+
i["content"] = i["content"].replace("<think>\n<think>\n", "<think>\n\n")
|
| 285 |
+
return_messages.append(i)
|
| 286 |
+
if logger.isEnabledFor(logging.DEBUG):
|
| 287 |
+
logger.debug(f'LLM Input:\n{pformat(messages, indent=2)}')
|
| 288 |
+
return return_messages
|
| 289 |
+
|
prompt.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
EXTRACTOR_PROMPT = """Please process the following webpage content and user goal to extract relevant information:
|
| 2 |
+
|
| 3 |
+
## **Webpage Content**
|
| 4 |
+
{webpage_content}
|
| 5 |
+
|
| 6 |
+
## **User Goal**
|
| 7 |
+
{goal}
|
| 8 |
+
|
| 9 |
+
## **Task Guidelines**
|
| 10 |
+
1. **Content Scanning for Rational**: Locate the **specific sections/data** directly related to the user's goal within the webpage content
|
| 11 |
+
2. **Key Extraction for Evidence**: Identify and extract the **most relevant information** from the content, you never miss any important information, output the **full original context** of the content as far as possible, it can be more than three paragraphs.
|
| 12 |
+
3. **Summary Output for Summary**: Organize into a concise paragraph with logical flow, prioritizing clarity and judge the contribution of the information to the goal.
|
| 13 |
+
|
| 14 |
+
**Final Output Format using JSON format has "rational", "evidence", "summary" feilds**
|
| 15 |
+
"""
|
| 16 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==5.23.1
|
| 2 |
+
huggingface_hub==0.23.0
|
| 3 |
+
qwen-agent[gui,rag,code_interpreter]==0.0.31
|
| 4 |
+
sandbox_fusion
|
| 5 |
+
tiktoken
|
| 6 |
+
openai
|
| 7 |
+
requests
|
| 8 |
+
modelscope_studio
|
scholar.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import requests
|
| 4 |
+
from typing import Union, List
|
| 5 |
+
from qwen_agent.tools.base import BaseTool, register_tool
|
| 6 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 7 |
+
import http.client
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
SERPER_KEY = os.environ.get('SERPER_KEY_ID')
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@register_tool("google_scholar", allow_overwrite=True)
|
| 14 |
+
class Scholar(BaseTool):
|
| 15 |
+
name = "google_scholar"
|
| 16 |
+
description = "Leverage Google Scholar to retrieve relevant information from academic publications. Accepts multiple queries."
|
| 17 |
+
parameters = {
|
| 18 |
+
"type": "object",
|
| 19 |
+
"properties": {
|
| 20 |
+
"query": {
|
| 21 |
+
"type": "array",
|
| 22 |
+
"items": {"type": "string", "description": "The search query."},
|
| 23 |
+
"minItems": 1,
|
| 24 |
+
"description": "The list of search queries for Google Scholar."
|
| 25 |
+
},
|
| 26 |
+
},
|
| 27 |
+
"required": ["query"],
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def google_scholar_with_serp(self, query: str):
|
| 31 |
+
conn = http.client.HTTPSConnection("google.serper.dev")
|
| 32 |
+
payload = json.dumps({
|
| 33 |
+
"q": query,
|
| 34 |
+
})
|
| 35 |
+
headers = {
|
| 36 |
+
'X-API-KEY': SERPER_KEY,
|
| 37 |
+
'Content-Type': 'application/json'
|
| 38 |
+
}
|
| 39 |
+
for i in range(5):
|
| 40 |
+
try:
|
| 41 |
+
conn.request("POST", "/scholar", payload, headers)
|
| 42 |
+
res = conn.getresponse()
|
| 43 |
+
break
|
| 44 |
+
except Exception as e:
|
| 45 |
+
print(e)
|
| 46 |
+
if i == 4:
|
| 47 |
+
return f"Google Scholar Timeout, return None, Please try again later."
|
| 48 |
+
continue
|
| 49 |
+
|
| 50 |
+
data = res.read()
|
| 51 |
+
results = json.loads(data.decode("utf-8"))
|
| 52 |
+
try:
|
| 53 |
+
if "organic" not in results:
|
| 54 |
+
raise Exception(f"No results found for query: '{query}'. Use a less specific query.")
|
| 55 |
+
|
| 56 |
+
web_snippets = list()
|
| 57 |
+
idx = 0
|
| 58 |
+
if "organic" in results:
|
| 59 |
+
for page in results["organic"]:
|
| 60 |
+
idx += 1
|
| 61 |
+
date_published = ""
|
| 62 |
+
if "year" in page:
|
| 63 |
+
date_published = "\nDate published: " + str(page["year"])
|
| 64 |
+
|
| 65 |
+
publicationInfo = ""
|
| 66 |
+
if "publicationInfo" in page:
|
| 67 |
+
publicationInfo = "\npublicationInfo: " + page["publicationInfo"]
|
| 68 |
+
|
| 69 |
+
snippet = ""
|
| 70 |
+
if "snippet" in page:
|
| 71 |
+
snippet = "\n" + page["snippet"]
|
| 72 |
+
|
| 73 |
+
link_info = "no available link"
|
| 74 |
+
if "pdfUrl" in page:
|
| 75 |
+
link_info = "pdfUrl: " + page["pdfUrl"]
|
| 76 |
+
|
| 77 |
+
citedBy = ""
|
| 78 |
+
if "citedBy" in page:
|
| 79 |
+
citedBy = "\ncitedBy: " + str(page["citedBy"])
|
| 80 |
+
|
| 81 |
+
redacted_version = f"{idx}. [{page['title']}]({link_info}){publicationInfo}{date_published}{citedBy}\n{snippet}"
|
| 82 |
+
|
| 83 |
+
redacted_version = redacted_version.replace("Your browser can't play this video.", "")
|
| 84 |
+
web_snippets.append(redacted_version)
|
| 85 |
+
|
| 86 |
+
content = f"A Google scholar for '{query}' found {len(web_snippets)} results:\n\n## Scholar Results\n" + "\n\n".join(web_snippets)
|
| 87 |
+
return content
|
| 88 |
+
except:
|
| 89 |
+
return f"No results found for '{query}'. Try with a more general query."
|
| 90 |
+
|
| 91 |
+
def call(self, params: Union[str, dict], **kwargs) -> str:
|
| 92 |
+
try:
|
| 93 |
+
params = json.loads(params)
|
| 94 |
+
params = self._verify_json_format_args(params)
|
| 95 |
+
query = params["query"]
|
| 96 |
+
except:
|
| 97 |
+
return "[google_scholar] Invalid request format: Input must be a JSON object containing 'query' field"
|
| 98 |
+
|
| 99 |
+
if isinstance(query, str):
|
| 100 |
+
response = self.google_scholar_with_serp(query)
|
| 101 |
+
else:
|
| 102 |
+
assert isinstance(query, List)
|
| 103 |
+
with ThreadPoolExecutor(max_workers=3) as executor:
|
| 104 |
+
response = list(executor.map(self.google_scholar_with_serp, query))
|
| 105 |
+
response = "\n=======\n".join(response)
|
| 106 |
+
return response
|
| 107 |
+
|
search.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 3 |
+
from typing import List, Union
|
| 4 |
+
import requests
|
| 5 |
+
from qwen_agent.tools.base import BaseTool, register_tool
|
| 6 |
+
import asyncio
|
| 7 |
+
from typing import Dict, List, Optional, Union
|
| 8 |
+
import uuid
|
| 9 |
+
import http.client
|
| 10 |
+
import json
|
| 11 |
+
import os
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
SERPER_KEY = os.environ.get('SERPER_KEY_ID')
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@register_tool("search", allow_overwrite=True)
|
| 18 |
+
class Search(BaseTool):
|
| 19 |
+
name = "search"
|
| 20 |
+
description = "Performs batched web searches: supply an array 'query'; the tool retrieves the top 10 results for each query in one call."
|
| 21 |
+
parameters = {
|
| 22 |
+
"type": "object",
|
| 23 |
+
"properties": {
|
| 24 |
+
"query": {
|
| 25 |
+
"type": "array",
|
| 26 |
+
"items": {
|
| 27 |
+
"type": "string"
|
| 28 |
+
},
|
| 29 |
+
"description": "Array of query strings. Include multiple complementary search queries in a single call."
|
| 30 |
+
},
|
| 31 |
+
},
|
| 32 |
+
"required": ["query"],
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
def __init__(self, cfg: Optional[dict] = None):
|
| 36 |
+
super().__init__(cfg)
|
| 37 |
+
|
| 38 |
+
def google_search_with_serp(self, query: str):
|
| 39 |
+
def contains_chinese_basic(text: str) -> bool:
|
| 40 |
+
return any('\u4E00' <= char <= '\u9FFF' for char in text)
|
| 41 |
+
|
| 42 |
+
conn = http.client.HTTPSConnection("google.serper.dev")
|
| 43 |
+
if contains_chinese_basic(query):
|
| 44 |
+
payload = json.dumps({
|
| 45 |
+
"q": query,
|
| 46 |
+
"location": "China",
|
| 47 |
+
"gl": "cn",
|
| 48 |
+
"hl": "zh-cn"
|
| 49 |
+
})
|
| 50 |
+
else:
|
| 51 |
+
payload = json.dumps({
|
| 52 |
+
"q": query,
|
| 53 |
+
"location": "United States",
|
| 54 |
+
"gl": "us",
|
| 55 |
+
"hl": "en"
|
| 56 |
+
})
|
| 57 |
+
headers = {
|
| 58 |
+
'X-API-KEY': SERPER_KEY,
|
| 59 |
+
'Content-Type': 'application/json'
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
for i in range(5):
|
| 63 |
+
try:
|
| 64 |
+
conn.request("POST", "/search", payload, headers)
|
| 65 |
+
res = conn.getresponse()
|
| 66 |
+
break
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(e)
|
| 69 |
+
if i == 4:
|
| 70 |
+
return f"Google search Timeout, return None, Please try again later."
|
| 71 |
+
continue
|
| 72 |
+
|
| 73 |
+
data = res.read()
|
| 74 |
+
results = json.loads(data.decode("utf-8"))
|
| 75 |
+
print(results)
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
if "organic" not in results:
|
| 79 |
+
raise Exception(f"No results found for query: '{query}'. Use a less specific query.")
|
| 80 |
+
|
| 81 |
+
web_snippets = list()
|
| 82 |
+
idx = 0
|
| 83 |
+
if "organic" in results:
|
| 84 |
+
for page in results["organic"]:
|
| 85 |
+
idx += 1
|
| 86 |
+
date_published = ""
|
| 87 |
+
if "date" in page:
|
| 88 |
+
date_published = "\nDate published: " + page["date"]
|
| 89 |
+
|
| 90 |
+
source = ""
|
| 91 |
+
if "source" in page:
|
| 92 |
+
source = "\nSource: " + page["source"]
|
| 93 |
+
|
| 94 |
+
snippet = ""
|
| 95 |
+
if "snippet" in page:
|
| 96 |
+
snippet = "\n" + page["snippet"]
|
| 97 |
+
|
| 98 |
+
redacted_version = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}"
|
| 99 |
+
redacted_version = redacted_version.replace("Your browser can't play this video.", "")
|
| 100 |
+
web_snippets.append(redacted_version)
|
| 101 |
+
|
| 102 |
+
content = f"A Google search for '{query}' found {len(web_snippets)} results:\n\n## Web Results\n" + "\n\n".join(web_snippets)
|
| 103 |
+
return content
|
| 104 |
+
except Exception as e:
|
| 105 |
+
print(e)
|
| 106 |
+
return f"No results found for '{query}'. Try with a more general query."
|
| 107 |
+
|
| 108 |
+
def search_with_serp(self, query: str):
|
| 109 |
+
result = self.google_search_with_serp(query)
|
| 110 |
+
return result
|
| 111 |
+
|
| 112 |
+
def call(self, params: Union[str, dict], **kwargs) -> str:
|
| 113 |
+
try:
|
| 114 |
+
print(params)
|
| 115 |
+
params = json.loads(params)
|
| 116 |
+
print(params)
|
| 117 |
+
query = params["query"]
|
| 118 |
+
print("query:\n", query)
|
| 119 |
+
except:
|
| 120 |
+
return "[Search] Invalid request format: Input must be a JSON object containing 'query' field"
|
| 121 |
+
|
| 122 |
+
if isinstance(query, str):
|
| 123 |
+
# 单个查询
|
| 124 |
+
response = self.search_with_serp(query)
|
| 125 |
+
else:
|
| 126 |
+
# 多个查询
|
| 127 |
+
assert isinstance(query, List)
|
| 128 |
+
responses = []
|
| 129 |
+
for q in query:
|
| 130 |
+
responses.append(self.search_with_serp(q))
|
| 131 |
+
response = "\n=======\n".join(responses)
|
| 132 |
+
|
| 133 |
+
return response
|
| 134 |
+
|
tool_python.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import Dict, List, Optional, Union
|
| 3 |
+
import json5
|
| 4 |
+
from qwen_agent.tools.base import BaseToolWithFileAccess, register_tool
|
| 5 |
+
from qwen_agent.utils.utils import extract_code
|
| 6 |
+
from sandbox_fusion import run_code, RunCodeRequest, RunStatus
|
| 7 |
+
from requests.exceptions import Timeout
|
| 8 |
+
import os
|
| 9 |
+
import random
|
| 10 |
+
import time
|
| 11 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
SANDBOX_URL = os.getenv('SANDBOX_URL', '')
|
| 15 |
+
SANDBOX_FUSION_ENDPOINTS = [SANDBOX_URL]
|
| 16 |
+
|
| 17 |
+
# Fallback to single endpoint if environment variable exists
|
| 18 |
+
if 'SANDBOX_FUSION_ENDPOINT' in os.environ:
|
| 19 |
+
SANDBOX_FUSION_ENDPOINTS = os.environ['SANDBOX_FUSION_ENDPOINT'].split(',')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def has_chinese_chars(data) -> bool:
|
| 23 |
+
CHINESE_CHAR_RE = re.compile(r'[\u4e00-\u9fff]')
|
| 24 |
+
text = f'{data}'
|
| 25 |
+
return bool(CHINESE_CHAR_RE.search(text))
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@register_tool('PythonInterpreter', allow_overwrite=True)
|
| 29 |
+
class PythonInterpreter(BaseToolWithFileAccess):
|
| 30 |
+
name = "PythonInterpreter"
|
| 31 |
+
description = 'Execute Python code in a sandboxed environment. Use this to run Python code and get the execution results.\n**Make sure to use print() for any output you want to see in the results.**\nFor code parameters, use placeholders first, and then put the code within <code></code> XML tags, such as:\n<tool_call>\n{"purpose": <detailed-purpose-of-this-tool-call>, "name": <tool-name>, "arguments": {"code": ""}}\n<code>\nHere is the code.\n</code>\n</tool_call>\n'
|
| 32 |
+
|
| 33 |
+
parameters = {
|
| 34 |
+
"type": "object",
|
| 35 |
+
"properties": {
|
| 36 |
+
"code": {
|
| 37 |
+
"type": "string",
|
| 38 |
+
"description": "The Python code to execute. Must be provided within <code></code> XML tags. Remember to use print() statements for any output you want to see.",
|
| 39 |
+
}
|
| 40 |
+
},
|
| 41 |
+
"required": ["code"],
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def __init__(self, cfg: Optional[Dict] = None):
|
| 45 |
+
super().__init__(cfg)
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def args_format(self) -> str:
|
| 49 |
+
fmt = self.cfg.get('args_format')
|
| 50 |
+
if fmt is None:
|
| 51 |
+
if has_chinese_chars([self.name_for_human, self.name, self.description, self.parameters]):
|
| 52 |
+
fmt = 'The input for this tool should be a Markdown code block.'
|
| 53 |
+
else:
|
| 54 |
+
fmt = 'Enclose the code within triple backticks (`) at the beginning and end of the code.'
|
| 55 |
+
return fmt
|
| 56 |
+
|
| 57 |
+
def observation(self, tool: dict, tool_dict: dict, tool_results, empty_mode: bool = False, readpage: bool = False, max_observation_length: int = None, tokenizer=None):
|
| 58 |
+
print('test')
|
| 59 |
+
assert isinstance(tool_results, str), f"result of python code should be str, instead of {type(tool_results)}. {tool_results}"
|
| 60 |
+
return tool_results
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def function(self) -> dict:
|
| 64 |
+
return {
|
| 65 |
+
'name': self.name,
|
| 66 |
+
'description': self.description,
|
| 67 |
+
'parameters': self.parameters,
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
def call(self, params, files=None, timeout=50, **kwargs) -> str:
|
| 71 |
+
try:
|
| 72 |
+
try:
|
| 73 |
+
code = params.split('<code>')[1].split('</code')[0]
|
| 74 |
+
except Exception:
|
| 75 |
+
return '[Python Interpreter Error]: format error.'
|
| 76 |
+
|
| 77 |
+
if not code.strip():
|
| 78 |
+
return '[Python Interpreter Error]: Empty code.'
|
| 79 |
+
|
| 80 |
+
last_error = None
|
| 81 |
+
for attempt in range(8):
|
| 82 |
+
try:
|
| 83 |
+
# Randomly sample an endpoint for each attempt
|
| 84 |
+
endpoint = random.choice(SANDBOX_FUSION_ENDPOINTS)
|
| 85 |
+
print(f"Attempt {attempt + 1}/5 using endpoint: {endpoint}")
|
| 86 |
+
|
| 87 |
+
code_result = run_code(RunCodeRequest(code=code, language='python', run_timeout=timeout), max_attempts=1, client_timeout=timeout, endpoint=endpoint)
|
| 88 |
+
print("[Python] Code Result", code_result)
|
| 89 |
+
result = []
|
| 90 |
+
if code_result.run_result.stdout:
|
| 91 |
+
result.append(f"stdout:\n{code_result.run_result.stdout}")
|
| 92 |
+
if code_result.run_result.stderr:
|
| 93 |
+
result.append(f"stderr:\n{code_result.run_result.stderr}")
|
| 94 |
+
if code_result.run_result.execution_time >= timeout - 1:
|
| 95 |
+
result.append(f"[PythonInterpreter Error] TimeoutError: Execution timed out.")
|
| 96 |
+
result = '\n'.join(result)
|
| 97 |
+
print('SUCCESS RUNNING TOOL')
|
| 98 |
+
return result if result.strip() else 'Finished execution.'
|
| 99 |
+
|
| 100 |
+
except Timeout as e:
|
| 101 |
+
last_error = f'[Python Interpreter Error] TimeoutError: Execution timed out on endpoint {endpoint}.'
|
| 102 |
+
print(f"Timeout on attempt {attempt + 1}: {last_error}")
|
| 103 |
+
if attempt == 4: # Last attempt
|
| 104 |
+
return last_error
|
| 105 |
+
continue
|
| 106 |
+
|
| 107 |
+
except Exception as e:
|
| 108 |
+
last_error = f'[Python Interpreter Error]: {str(e)} on endpoint {endpoint}'
|
| 109 |
+
print(f"Error on attempt {attempt + 1}: {last_error}")
|
| 110 |
+
if attempt == 4: # Last attempt
|
| 111 |
+
return last_error
|
| 112 |
+
continue
|
| 113 |
+
|
| 114 |
+
return last_error if last_error else '[Python Interpreter Error]: All attempts failed.'
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
return f"[Python Interpreter Error]: {str(e)}"
|
| 118 |
+
|
| 119 |
+
def call_specific_endpoint(self, params: Union[str, dict], endpoint: str, timeout: Optional[int] = 30, **kwargs) -> tuple:
|
| 120 |
+
"""Test a specific endpoint directly"""
|
| 121 |
+
try:
|
| 122 |
+
if type(params) is str:
|
| 123 |
+
params = json5.loads(params)
|
| 124 |
+
code = params.get('code', '')
|
| 125 |
+
if not code:
|
| 126 |
+
code = params.get('raw', '')
|
| 127 |
+
triple_match = re.search(r'```[^\n]*\n(.+?)```', code, re.DOTALL)
|
| 128 |
+
if triple_match:
|
| 129 |
+
code = triple_match.group(1)
|
| 130 |
+
except Exception:
|
| 131 |
+
code = extract_code(params)
|
| 132 |
+
|
| 133 |
+
if not code.strip():
|
| 134 |
+
return False, '[Python Interpreter Error]: Empty code.'
|
| 135 |
+
|
| 136 |
+
try:
|
| 137 |
+
start_time = time.time()
|
| 138 |
+
code_result = run_code(RunCodeRequest(code=code, language='python', run_timeout=timeout),
|
| 139 |
+
max_attempts=1, client_timeout=timeout, endpoint=endpoint)
|
| 140 |
+
end_time = time.time()
|
| 141 |
+
|
| 142 |
+
result = []
|
| 143 |
+
if code_result.run_result.stdout:
|
| 144 |
+
result.append(f"stdout:\n{code_result.run_result.stdout}")
|
| 145 |
+
if code_result.run_result.stderr:
|
| 146 |
+
result.append(f"stderr:\n{code_result.run_result.stderr}")
|
| 147 |
+
|
| 148 |
+
result = '\n'.join(result)
|
| 149 |
+
execution_time = end_time - start_time
|
| 150 |
+
return True, result if result.strip() else 'Finished execution.', execution_time
|
| 151 |
+
|
| 152 |
+
except Timeout as e:
|
| 153 |
+
return False, f'[Python Interpreter Error] TimeoutError: Execution timed out.', None
|
| 154 |
+
except Exception as e:
|
| 155 |
+
return False, f'[Python Interpreter Error]: {str(e)}', None
|
| 156 |
+
|
utils/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Utils module
|
| 2 |
+
|
visit.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import signal
|
| 4 |
+
import threading
|
| 5 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 6 |
+
from typing import List, Union
|
| 7 |
+
import requests
|
| 8 |
+
from qwen_agent.tools.base import BaseTool, register_tool
|
| 9 |
+
from prompt import EXTRACTOR_PROMPT
|
| 10 |
+
from openai import OpenAI
|
| 11 |
+
import random
|
| 12 |
+
from urllib.parse import urlparse, unquote
|
| 13 |
+
import time
|
| 14 |
+
import tiktoken
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
VISIT_SERVER_TIMEOUT = int(os.getenv("VISIT_SERVER_TIMEOUT", 200))
|
| 18 |
+
WEBCONTENT_MAXLENGTH = int(os.getenv("WEBCONTENT_MAXLENGTH", 150000))
|
| 19 |
+
|
| 20 |
+
JINA_API_KEYS = os.getenv("JINA_KEY", "")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def truncate_to_tokens(text: str, max_tokens: int = 95000) -> str:
|
| 24 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
| 25 |
+
tokens = encoding.encode(text)
|
| 26 |
+
if len(tokens) <= max_tokens:
|
| 27 |
+
return text
|
| 28 |
+
truncated_tokens = tokens[:max_tokens]
|
| 29 |
+
return encoding.decode(truncated_tokens)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
OSS_JSON_FORMAT = """# Response Formats
|
| 33 |
+
## visit_content
|
| 34 |
+
{"properties":{"rational":{"type":"string","description":"Locate the **specific sections/data** directly related to the user's goal within the webpage content"},"evidence":{"type":"string","description":"Identify and extract the **most relevant information** from the content, never miss any important information, output the **full original context** of the content as far as possible, it can be more than three paragraphs.","summary":{"type":"string","description":"Organize into a concise paragraph with logical flow, prioritizing clarity and judge the contribution of the information to the goal."}}}}"""
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@register_tool('visit', allow_overwrite=True)
|
| 38 |
+
class Visit(BaseTool):
|
| 39 |
+
# The `description` tells the agent the functionality of this tool.
|
| 40 |
+
name = 'visit'
|
| 41 |
+
description = 'Visit webpage(s) and return the summary of the content.'
|
| 42 |
+
# The `parameters` tell the agent what input parameters the tool has.
|
| 43 |
+
parameters = {
|
| 44 |
+
"type": "object",
|
| 45 |
+
"properties": {
|
| 46 |
+
"url": {
|
| 47 |
+
"type": ["string", "array"],
|
| 48 |
+
"items": {
|
| 49 |
+
"type": "string"
|
| 50 |
+
},
|
| 51 |
+
"minItems": 1,
|
| 52 |
+
"description": "The URL(s) of the webpage(s) to visit. Can be a single URL or an array of URLs."
|
| 53 |
+
},
|
| 54 |
+
"goal": {
|
| 55 |
+
"type": "string",
|
| 56 |
+
"description": "The goal of the visit for webpage(s)."
|
| 57 |
+
}
|
| 58 |
+
},
|
| 59 |
+
"required": ["url", "goal"]
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
# The `call` method is the main function of the tool.
|
| 63 |
+
def call(self, params: Union[str, dict], **kwargs) -> str:
|
| 64 |
+
try:
|
| 65 |
+
params = json.loads(params)
|
| 66 |
+
url = params["url"]
|
| 67 |
+
goal = params["goal"]
|
| 68 |
+
except:
|
| 69 |
+
return "[Visit] Invalid request format: Input must be a JSON object containing 'url' and 'goal' fields"
|
| 70 |
+
|
| 71 |
+
start_time = time.time()
|
| 72 |
+
|
| 73 |
+
# Create log folder if it doesn't exist
|
| 74 |
+
log_folder = "log"
|
| 75 |
+
os.makedirs(log_folder, exist_ok=True)
|
| 76 |
+
|
| 77 |
+
if isinstance(url, str):
|
| 78 |
+
response = self.readpage_jina(url, goal)
|
| 79 |
+
else:
|
| 80 |
+
response = []
|
| 81 |
+
assert isinstance(url, List)
|
| 82 |
+
start_time = time.time()
|
| 83 |
+
for u in url:
|
| 84 |
+
if time.time() - start_time > 900:
|
| 85 |
+
cur_response = "The useful information in {url} for user goal {goal} as follows: \n\n".format(url=url, goal=goal)
|
| 86 |
+
cur_response += "Evidence in page: \n" + "The provided webpage content could not be accessed. Please check the URL or file format." + "\n\n"
|
| 87 |
+
cur_response += "Summary: \n" + "The webpage content could not be processed, and therefore, no information is available." + "\n\n"
|
| 88 |
+
else:
|
| 89 |
+
try:
|
| 90 |
+
cur_response = self.readpage_jina(u, goal)
|
| 91 |
+
except Exception as e:
|
| 92 |
+
cur_response = f"Error fetching {u}: {str(e)}"
|
| 93 |
+
response.append(cur_response)
|
| 94 |
+
response = "\n=======\n".join(response)
|
| 95 |
+
|
| 96 |
+
print(f'Summary Length {len(response)}; Summary Content {response}')
|
| 97 |
+
return response.strip()
|
| 98 |
+
|
| 99 |
+
def call_server(self, msgs, max_retries=2):
|
| 100 |
+
api_key = os.environ.get("API_KEY")
|
| 101 |
+
url_llm = os.environ.get("API_BASE")
|
| 102 |
+
model_name = os.environ.get("SUMMARY_MODEL_NAME", "qwen/qwen3-30b-a3b-instruct-2507")
|
| 103 |
+
|
| 104 |
+
client = OpenAI(
|
| 105 |
+
api_key=api_key,
|
| 106 |
+
base_url=url_llm,
|
| 107 |
+
)
|
| 108 |
+
for attempt in range(max_retries):
|
| 109 |
+
try:
|
| 110 |
+
chat_response = client.chat.completions.create(
|
| 111 |
+
model=model_name,
|
| 112 |
+
messages=msgs,
|
| 113 |
+
temperature=0.7
|
| 114 |
+
)
|
| 115 |
+
content = chat_response.choices[0].message.content
|
| 116 |
+
if content:
|
| 117 |
+
try:
|
| 118 |
+
json.loads(content)
|
| 119 |
+
except:
|
| 120 |
+
# extract json from string
|
| 121 |
+
left = content.find('{')
|
| 122 |
+
right = content.rfind('}')
|
| 123 |
+
if left != -1 and right != -1 and left <= right:
|
| 124 |
+
content = content[left:right+1]
|
| 125 |
+
return content
|
| 126 |
+
except Exception as e:
|
| 127 |
+
print(e)
|
| 128 |
+
if attempt == (max_retries - 1):
|
| 129 |
+
return ""
|
| 130 |
+
continue
|
| 131 |
+
|
| 132 |
+
def jina_readpage(self, url: str) -> str:
|
| 133 |
+
"""
|
| 134 |
+
Read webpage content using Jina service.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
url: The URL to read
|
| 138 |
+
goal: The goal/purpose of reading the page
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
str: The webpage content or error message
|
| 142 |
+
"""
|
| 143 |
+
max_retries = 3
|
| 144 |
+
timeout = 50
|
| 145 |
+
|
| 146 |
+
for attempt in range(max_retries):
|
| 147 |
+
headers = {
|
| 148 |
+
"Authorization": f"Bearer {JINA_API_KEYS}",
|
| 149 |
+
}
|
| 150 |
+
try:
|
| 151 |
+
response = requests.get(
|
| 152 |
+
f"https://r.jina.ai/{url}",
|
| 153 |
+
headers=headers,
|
| 154 |
+
timeout=timeout
|
| 155 |
+
)
|
| 156 |
+
if response.status_code == 200:
|
| 157 |
+
webpage_content = response.text
|
| 158 |
+
return webpage_content
|
| 159 |
+
else:
|
| 160 |
+
print(response.text)
|
| 161 |
+
raise ValueError("jina readpage error")
|
| 162 |
+
except Exception as e:
|
| 163 |
+
time.sleep(0.5)
|
| 164 |
+
if attempt == max_retries - 1:
|
| 165 |
+
return "[visit] Failed to read page."
|
| 166 |
+
|
| 167 |
+
return "[visit] Failed to read page."
|
| 168 |
+
|
| 169 |
+
def html_readpage_jina(self, url: str) -> str:
|
| 170 |
+
max_attempts = 8
|
| 171 |
+
for attempt in range(max_attempts):
|
| 172 |
+
content = self.jina_readpage(url)
|
| 173 |
+
service = "jina"
|
| 174 |
+
print(service)
|
| 175 |
+
if content and not content.startswith("[visit] Failed to read page.") and content != "[visit] Empty content." and not content.startswith("[document_parser]"):
|
| 176 |
+
return content
|
| 177 |
+
return "[visit] Failed to read page."
|
| 178 |
+
|
| 179 |
+
def readpage_jina(self, url: str, goal: str) -> str:
|
| 180 |
+
"""
|
| 181 |
+
Attempt to read webpage content by alternating between jina and aidata services.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
url: The URL to read
|
| 185 |
+
goal: The goal/purpose of reading the page
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
str: The webpage content or error message
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
summary_page_func = self.call_server
|
| 192 |
+
max_retries = int(os.getenv('VISIT_SERVER_MAX_RETRIES', 1))
|
| 193 |
+
|
| 194 |
+
content = self.html_readpage_jina(url)
|
| 195 |
+
|
| 196 |
+
if content and not content.startswith("[visit] Failed to read page.") and content != "[visit] Empty content." and not content.startswith("[document_parser]"):
|
| 197 |
+
content = truncate_to_tokens(content, max_tokens=95000)
|
| 198 |
+
messages = [{"role": "user", "content": EXTRACTOR_PROMPT.format(webpage_content=content, goal=goal)}]
|
| 199 |
+
parse_retry_times = 0
|
| 200 |
+
raw = summary_page_func(messages, max_retries=max_retries)
|
| 201 |
+
summary_retries = 3
|
| 202 |
+
while len(raw) < 10 and summary_retries >= 0:
|
| 203 |
+
truncate_length = int(0.7 * len(content)) if summary_retries > 0 else 25000
|
| 204 |
+
status_msg = (
|
| 205 |
+
f"[visit] Summary url[{url}] "
|
| 206 |
+
f"attempt {3 - summary_retries + 1}/3, "
|
| 207 |
+
f"content length: {len(content)}, "
|
| 208 |
+
f"truncating to {truncate_length} chars"
|
| 209 |
+
) if summary_retries > 0 else (
|
| 210 |
+
f"[visit] Summary url[{url}] failed after 3 attempts, "
|
| 211 |
+
f"final truncation to 25000 chars"
|
| 212 |
+
)
|
| 213 |
+
print(status_msg)
|
| 214 |
+
content = content[:truncate_length]
|
| 215 |
+
extraction_prompt = EXTRACTOR_PROMPT.format(
|
| 216 |
+
webpage_content=content,
|
| 217 |
+
goal=goal
|
| 218 |
+
)
|
| 219 |
+
messages = [{"role": "user", "content": extraction_prompt}]
|
| 220 |
+
raw = summary_page_func(messages, max_retries=max_retries)
|
| 221 |
+
summary_retries -= 1
|
| 222 |
+
|
| 223 |
+
parse_retry_times = 2
|
| 224 |
+
if isinstance(raw, str):
|
| 225 |
+
raw = raw.replace("```json", "").replace("```", "").strip()
|
| 226 |
+
while parse_retry_times < 3:
|
| 227 |
+
try:
|
| 228 |
+
raw = json.loads(raw)
|
| 229 |
+
break
|
| 230 |
+
except:
|
| 231 |
+
raw = summary_page_func(messages, max_retries=max_retries)
|
| 232 |
+
parse_retry_times += 1
|
| 233 |
+
|
| 234 |
+
if parse_retry_times >= 3:
|
| 235 |
+
useful_information = "The useful information in {url} for user goal {goal} as follows: \n\n".format(url=url, goal=goal)
|
| 236 |
+
useful_information += "Evidence in page: \n" + "The provided webpage content could not be accessed. Please check the URL or file format." + "\n\n"
|
| 237 |
+
useful_information += "Summary: \n" + "The webpage content could not be processed, and therefore, no information is available." + "\n\n"
|
| 238 |
+
else:
|
| 239 |
+
useful_information = "The useful information in {url} for user goal {goal} as follows: \n\n".format(url=url, goal=goal)
|
| 240 |
+
useful_information += "Evidence in page: \n" + str(raw["evidence"]) + "\n\n"
|
| 241 |
+
useful_information += "Summary: \n" + str(raw["summary"]) + "\n\n"
|
| 242 |
+
|
| 243 |
+
if len(useful_information) < 10 and summary_retries < 0:
|
| 244 |
+
print("[visit] Could not generate valid summary after maximum retries")
|
| 245 |
+
useful_information = "[visit] Failed to read page"
|
| 246 |
+
|
| 247 |
+
return useful_information
|
| 248 |
+
|
| 249 |
+
# If no valid content was obtained after all retries
|
| 250 |
+
else:
|
| 251 |
+
useful_information = "The useful information in {url} for user goal {goal} as follows: \n\n".format(url=url, goal=goal)
|
| 252 |
+
useful_information += "Evidence in page: \n" + "The provided webpage content could not be accessed. Please check the URL or file format." + "\n\n"
|
| 253 |
+
useful_information += "Summary: \n" + "The webpage content could not be processed, and therefore, no information is available." + "\n\n"
|
| 254 |
+
return useful_information
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
if __name__ == "__main__":
|
| 258 |
+
a = Visit()
|
| 259 |
+
print(a.call('{"url": ["https://2025.aclweb.org/"], "goal": "Find the important dates page and locate the Industry Track paper submission deadline"}'))
|
| 260 |
+
|