|
|
|
|
|
import os |
|
|
import json |
|
|
import random |
|
|
import re |
|
|
import time |
|
|
import logging |
|
|
from pathlib import Path |
|
|
from typing import Dict, Any, List, Optional |
|
|
|
|
|
import requests |
|
|
import dateutil.parser |
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
from urllib.parse import urlparse |
|
|
from bs4 import BeautifulSoup |
|
|
from dataclasses import dataclass |
|
|
|
|
|
try: |
|
|
from config.config import get_config |
|
|
except ImportError: |
|
|
import sys |
|
|
|
|
|
sys.path.append(str(Path(__file__).parent.parent.parent)) |
|
|
from config.config import get_config |
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class MCPToolResult: |
|
|
"""Standard result format for MCP tools""" |
|
|
success: bool |
|
|
data: Any = None |
|
|
error: str = None |
|
|
metadata: Dict[str, Any] = None |
|
|
|
|
|
def to_dict(self) -> Dict[str, Any]: |
|
|
return { |
|
|
"success": self.success, |
|
|
"data": self.data, |
|
|
"error": self.error, |
|
|
"metadata": self.metadata or {} |
|
|
} |
|
|
|
|
|
|
|
|
class MCPTools: |
|
|
"""Multi Agent System MCP Tools Implementation""" |
|
|
|
|
|
def __init__(self, workspace_path: str = None): |
|
|
self.config = get_config() |
|
|
self.workspace_path = Path(workspace_path) if workspace_path else Path.cwd() |
|
|
self.workspace_path.mkdir(exist_ok=True, parents=True) |
|
|
|
|
|
|
|
|
self.session_id = None |
|
|
self.session_workspace_path = None |
|
|
self.full_workspace_path = os.path.realpath(self.workspace_path) |
|
|
if not self.full_workspace_path.endswith(os.sep): |
|
|
self.full_workspace_path += os.sep |
|
|
|
|
|
def set_session_context(self, session_id: str, session_workspace_path: str): |
|
|
"""Set session context for workspace-aware operations""" |
|
|
self.session_id = session_id |
|
|
self.session_workspace_path = Path(session_workspace_path) |
|
|
|
|
|
self.workspace_path = self.session_workspace_path |
|
|
self.full_workspace_path = os.path.realpath(self.workspace_path) |
|
|
if not self.full_workspace_path.endswith(os.sep): |
|
|
self.full_workspace_path += os.sep |
|
|
logger.info(f"Set session context - ID: {session_id}, Workspace: {session_workspace_path}") |
|
|
|
|
|
def get_session_context(self) -> Dict[str, Any]: |
|
|
"""Get current session context""" |
|
|
return { |
|
|
"session_id": self.session_id, |
|
|
"session_workspace_path": str(self.session_workspace_path) if self.session_workspace_path else None, |
|
|
"workspace_path": str(self.workspace_path) |
|
|
} |
|
|
|
|
|
def _safe_join(self, path: str) -> Path: |
|
|
if os.path.isabs(path): |
|
|
raise Exception(f"Path '{path}' is absolute. Only relative paths are allowed.") |
|
|
joined_path = os.path.join(self.workspace_path, path) |
|
|
full_joined_path = os.path.realpath(joined_path) |
|
|
if not full_joined_path.startswith(self.full_workspace_path): |
|
|
raise Exception(f"Path '{path}' is outside workspace directory.") |
|
|
return Path(full_joined_path) |
|
|
|
|
|
|
|
|
|
|
|
def batch_web_search( |
|
|
self, |
|
|
queries: List[str], |
|
|
max_results_per_query: int = 15, |
|
|
max_workers: int = 5 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Batch web search using configurable search provider with concurrent processing. |
|
|
|
|
|
Users need to implement their own search provider. Below is an example available: |
|
|
[ |
|
|
{ |
|
|
"query": "search query", |
|
|
"search_results": [ |
|
|
{ |
|
|
"title": "Page title", |
|
|
"link": "https://example.com", |
|
|
"snippet": "Description snippet", |
|
|
"date": "Feb 8, 2022", |
|
|
}, |
|
|
... |
|
|
] |
|
|
}, |
|
|
... |
|
|
] |
|
|
|
|
|
Args: |
|
|
queries: List of search queries |
|
|
max_results_per_query: Maximum search results per query |
|
|
max_workers: Maximum number of concurrent search requests |
|
|
""" |
|
|
try: |
|
|
from config.config import get_search_engine_config |
|
|
search_config = get_search_engine_config() |
|
|
|
|
|
if not search_config: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error="Search engine not configured" |
|
|
) |
|
|
|
|
|
|
|
|
actual_max_results = min(max_results_per_query, 15) |
|
|
|
|
|
def search_single_query(query: str) -> Dict[str, Any]: |
|
|
"""Search a single query""" |
|
|
try: |
|
|
search_results = self._generic_search(query, actual_max_results, search_config) |
|
|
|
|
|
if not search_results.success: |
|
|
return { |
|
|
'query': query, |
|
|
'success': False, |
|
|
'error': search_results.error, |
|
|
'results': [] |
|
|
} |
|
|
|
|
|
|
|
|
search_data = search_results.data |
|
|
search_data["organic"] = search_data["organic"][:actual_max_results] |
|
|
|
|
|
return { |
|
|
'query': query, |
|
|
'success': True, |
|
|
'results': search_data, |
|
|
'timestamp': time.time() |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error searching query '{query}': {e}") |
|
|
return { |
|
|
'query': query, |
|
|
'success': False, |
|
|
'error': str(e), |
|
|
'results': [] |
|
|
} |
|
|
|
|
|
|
|
|
all_results = [] |
|
|
with ThreadPoolExecutor(max_workers=min(max_workers, len(queries))) as executor: |
|
|
|
|
|
future_to_query = {executor.submit(search_single_query, query): query for query in queries} |
|
|
|
|
|
|
|
|
for future in as_completed(future_to_query): |
|
|
try: |
|
|
result = future.result() |
|
|
all_results.append(result) |
|
|
except Exception as e: |
|
|
query = future_to_query[future] |
|
|
logger.error(f"Error processing search for '{query}': {e}") |
|
|
all_results.append({ |
|
|
'query': query, |
|
|
'success': False, |
|
|
'error': str(e), |
|
|
'results': [] |
|
|
}) |
|
|
|
|
|
|
|
|
query_order = {query: i for i, query in enumerate(queries)} |
|
|
all_results.sort(key=lambda x: query_order.get(x['query'], float('inf'))) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=all_results, |
|
|
metadata={ |
|
|
'total_queries': len(queries), |
|
|
'successful_queries': len([r for r in all_results if r.get('success', False)]), |
|
|
'concurrent_workers': min(max_workers, len(queries)) |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Batch web search failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def _generic_search(self, query: str, max_results: int, config: Dict[str, Any]) -> MCPToolResult: |
|
|
""" |
|
|
Generic search function that users need to implement. |
|
|
|
|
|
This function should return results in the standard format and be wrapped in MCPToolResult: |
|
|
|
|
|
search_res = { |
|
|
"organic": [ |
|
|
{ |
|
|
"title": "Page title", |
|
|
"link": "https://example.com", |
|
|
"snippet": "Description snippet", |
|
|
"date": "Feb 8, 2022" |
|
|
} |
|
|
] |
|
|
} |
|
|
|
|
|
return MCPToolResult(success=True, data=search_res) |
|
|
|
|
|
Users should implement their own search logic here based on their preferred search service. |
|
|
|
|
|
Notes: |
|
|
1. It is recommended to use search engine APIs that comply with relevant safety and regulatory requirements. |
|
|
The user assumes full responsibility for any safety issues, legal consequences, or policy violations |
|
|
arising from the use of the search engine results. |
|
|
|
|
|
2. User requests may be indirectly transmitted to the search engine API in the form of search queries. |
|
|
It is the user's responsibility to implement appropriate measures to protect personal privacy and |
|
|
sensitive information. We assume no liability for privacy-related issues arising from such transmission. |
|
|
""" |
|
|
try: |
|
|
|
|
|
raise NotImplementedError( |
|
|
"Generic search provider not implemented. Please implement your own search logic in _generic_search method. " |
|
|
"The return format should match the standard format with 'organic' results containing title, link, snippet, and date fields." |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
return MCPToolResult(success=False, error=f"Generic search failed: {e}") |
|
|
|
|
|
@staticmethod |
|
|
def _extract_publication_date_from_html(url: str) -> Optional[str]: |
|
|
"""Extract publication date directly from webpage HTML""" |
|
|
try: |
|
|
|
|
|
response = requests.get(url, timeout=10, headers={ |
|
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' |
|
|
}) |
|
|
response.raise_for_status() |
|
|
|
|
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
|
|
|
|
|
meta_selectors = [ |
|
|
'meta[property="article:published_time"]', |
|
|
'meta[property="article:modified_time"]', |
|
|
'meta[name="date"]', |
|
|
'meta[name="pubdate"]', |
|
|
'meta[name="published"]', |
|
|
'meta[name="datePublished"]', |
|
|
'meta[name="publication-date"]', |
|
|
'meta[property="og:published_time"]', |
|
|
'meta[name="DC.date"]', |
|
|
'meta[name="DC.date.created"]', |
|
|
'meta[itemprop="datePublished"]', |
|
|
'meta[itemprop="dateModified"]' |
|
|
] |
|
|
|
|
|
for selector in meta_selectors: |
|
|
meta_tag = soup.select_one(selector) |
|
|
if meta_tag: |
|
|
content = meta_tag.get('content') or meta_tag.get('datetime') |
|
|
if content: |
|
|
try: |
|
|
|
|
|
parsed_date = dateutil.parser.parse(content) |
|
|
return parsed_date.isoformat() |
|
|
except ValueError: |
|
|
continue |
|
|
|
|
|
|
|
|
time_tags = soup.find_all('time', {'datetime': True}) |
|
|
for time_tag in time_tags: |
|
|
try: |
|
|
parsed_date = dateutil.parser.parse(time_tag['datetime']) |
|
|
return parsed_date.isoformat() |
|
|
except ValueError: |
|
|
continue |
|
|
|
|
|
|
|
|
json_ld_scripts = soup.find_all('script', {'type': 'application/ld+json'}) |
|
|
for script in json_ld_scripts: |
|
|
try: |
|
|
data = json.loads(script.string) |
|
|
if isinstance(data, dict): |
|
|
date_fields = ['datePublished', 'dateCreated', 'dateModified'] |
|
|
for field in date_fields: |
|
|
if field in data: |
|
|
parsed_date = dateutil.parser.parse(data[field]) |
|
|
return parsed_date.isoformat() |
|
|
elif isinstance(data, list): |
|
|
for item in data: |
|
|
if isinstance(item, dict): |
|
|
for field in date_fields: |
|
|
if field in item: |
|
|
parsed_date = dateutil.parser.parse(item[field]) |
|
|
return parsed_date.isoformat() |
|
|
except ValueError: |
|
|
continue |
|
|
|
|
|
return None |
|
|
|
|
|
except Exception as e: |
|
|
logger.debug(f"Error extracting publication date from {url}: {e}") |
|
|
return None |
|
|
|
|
|
def _content_extractor(self, url: str, max_tokens: int, config: Dict[str, Any]) -> MCPToolResult: |
|
|
"""Get content using URL content extractor""" |
|
|
max_retry_num = 5 |
|
|
sleep_time = 5 |
|
|
retry_num = 0 |
|
|
while True: |
|
|
retry_num += 1 |
|
|
try: |
|
|
api_key = random.choice(config.get('api_keys', ['default_key'])) |
|
|
headers = { |
|
|
'Authorization': f'Bearer {api_key}', |
|
|
'Content-Type': 'application/json' |
|
|
} |
|
|
|
|
|
|
|
|
raise NotImplementedError( |
|
|
"URL crawler not implemented. Please implement your own URL crawling logic. " |
|
|
"The function should extract text content from URLs and return it in a structured format " |
|
|
"with metadata like title, publication date, and word count." |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
if retry_num == max_retry_num: |
|
|
return MCPToolResult(success=False, error=f"Content extractor failed: {e}") |
|
|
else: |
|
|
time.sleep(sleep_time) |
|
|
|
|
|
def url_crawler( |
|
|
self, |
|
|
documents: List[Dict], |
|
|
max_tokens_per_url: int = 100000, |
|
|
include_metadata: bool = True, |
|
|
max_workers: int = 10 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Extract LLM-friendly content from URLs using configurable crawler service. |
|
|
Content is saved to specified file paths. |
|
|
|
|
|
Users need to implement their own URL crawler. The return format should include: |
|
|
- Extracted text content from the URL |
|
|
- Metadata like title, publication date, word count |
|
|
- Success/error status for each URL |
|
|
|
|
|
Args: |
|
|
documents: List of document dictionaries containing: |
|
|
- url: Web page URL to extract |
|
|
- file_path: Local path to save extracted content |
|
|
- title: (Optional) Web page title |
|
|
- time: (Optional) Web page publication time |
|
|
max_tokens_per_url: Maximum tokens per URL result |
|
|
include_metadata: Whether to include metadata about extraction |
|
|
max_workers: Maximum number of concurrent extraction requests |
|
|
""" |
|
|
try: |
|
|
from config.config import get_url_crawler_config |
|
|
crawler_config = get_url_crawler_config() |
|
|
|
|
|
if not crawler_config: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error="URL crawler not configured" |
|
|
) |
|
|
|
|
|
def process_single_document(doc: Dict) -> Dict[str, Any]: |
|
|
"""Process a single document: extract content and save to file""" |
|
|
url = doc['url'] |
|
|
file_path = doc['file_path'] |
|
|
title = doc.get('title') |
|
|
doc_time = doc.get('time') |
|
|
|
|
|
result_base = { |
|
|
'url': url, |
|
|
'file_path': file_path, |
|
|
'title': title, |
|
|
'time': doc_time, |
|
|
'success': False, |
|
|
'error': None, |
|
|
'content_length': 0, |
|
|
'word_count': 0, |
|
|
'publication_date': None, |
|
|
'extraction_timestamp': time.time(), |
|
|
'write_success': False |
|
|
} |
|
|
|
|
|
try: |
|
|
|
|
|
publication_date = self._extract_publication_date_from_html(url) |
|
|
result_base["publication_date"] = publication_date |
|
|
|
|
|
|
|
|
content_result = self._content_extractor(url, max_tokens_per_url, crawler_config) |
|
|
|
|
|
if not content_result.success: |
|
|
result_base['error'] = content_result.error |
|
|
return result_base |
|
|
|
|
|
content = content_result.data |
|
|
if not content: |
|
|
result_base['error'] = "Extracted content is empty" |
|
|
return result_base |
|
|
|
|
|
|
|
|
write_result = self.file_write( |
|
|
file_path=file_path, |
|
|
content=content, |
|
|
create_dirs=True |
|
|
) |
|
|
|
|
|
if not write_result.success: |
|
|
result_base['error'] = f"File write failed: {write_result.error}" |
|
|
return result_base |
|
|
|
|
|
|
|
|
result = { |
|
|
**result_base, |
|
|
'success': True, |
|
|
'content_length': len(content), |
|
|
'word_count': len(content.split()), |
|
|
'publication_date': publication_date, |
|
|
'write_success': True |
|
|
} |
|
|
|
|
|
if include_metadata: |
|
|
result['metadata'] = { |
|
|
'truncated': len(content.split()) >= max_tokens_per_url, |
|
|
'has_publication_date': publication_date is not None, |
|
|
'date_extraction_method': 'html_parsing' if publication_date else None, |
|
|
'file_size': len(content.encode('utf-8')) |
|
|
} |
|
|
|
|
|
return result |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error processing document {url}: {e}") |
|
|
|
|
|
try: |
|
|
publication_date = self._extract_publication_date_from_html(url) |
|
|
except: |
|
|
publication_date = None |
|
|
|
|
|
return { |
|
|
**result_base, |
|
|
'error': str(e), |
|
|
'publication_date': publication_date |
|
|
} |
|
|
|
|
|
|
|
|
results = [] |
|
|
with ThreadPoolExecutor(max_workers=min(max_workers, len(documents))) as executor: |
|
|
|
|
|
future_to_doc = {executor.submit(process_single_document, doc): doc for doc in documents} |
|
|
|
|
|
|
|
|
for future in as_completed(future_to_doc): |
|
|
doc = future_to_doc[future] |
|
|
try: |
|
|
result = future.result() |
|
|
results.append(result) |
|
|
except Exception as e: |
|
|
url = doc['url'] |
|
|
logger.error(f"Error processing extraction for '{url}': {e}") |
|
|
|
|
|
|
|
|
try: |
|
|
publication_date = self._extract_publication_date_from_html(url) |
|
|
except: |
|
|
publication_date = None |
|
|
|
|
|
results.append({ |
|
|
'url': url, |
|
|
'file_path': doc['file_path'], |
|
|
'title': doc.get('title'), |
|
|
'time': doc.get('time'), |
|
|
'success': False, |
|
|
'error': str(e), |
|
|
'publication_date': publication_date, |
|
|
'extraction_timestamp': time.time(), |
|
|
'write_success': False |
|
|
}) |
|
|
|
|
|
|
|
|
url_order = {doc['url']: i for i, doc in enumerate(documents)} |
|
|
results.sort(key=lambda x: url_order.get(x['url'], float('inf'))) |
|
|
|
|
|
successful_extractions = len([r for r in results if r.get('success', False)]) |
|
|
successful_writes = len([r for r in results if r.get('write_success', False)]) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=results, |
|
|
metadata={ |
|
|
'total_documents': len(documents), |
|
|
'successful_extractions': successful_extractions, |
|
|
'successful_writes': successful_writes, |
|
|
'failed_processing': len(documents) - successful_extractions, |
|
|
'concurrent_workers': min(max_workers, len(documents)) |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"URL crawler batch processing failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def file_read_dq(self, file_path: str, encoding: str = 'utf-8') -> MCPToolResult: |
|
|
"""Read file content""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
content = full_path.read_text(encoding=encoding) |
|
|
if len(content) > 40000: |
|
|
content = ( |
|
|
"Due to the content being too long, only the first 30,000 and last 10,000 characters are returned.\n" |
|
|
"Below is the returned portion of the file content:\n\n" |
|
|
f"First 30,000 characters:\n\n{content[:30000]}\n\n" |
|
|
f"Last 10,000 characters:\n\n{content[-10000:]}" |
|
|
) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=content, |
|
|
metadata={ |
|
|
'file_size': len(content), |
|
|
'line_count': len(content.splitlines()), |
|
|
'encoding': encoding |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File read failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def load_json(self, file_path: str, encoding: str = 'utf-8') -> MCPToolResult: |
|
|
""" |
|
|
Read JSON format file |
|
|
""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
res = [] |
|
|
|
|
|
with open(full_path, "r", encoding=encoding, errors='ignore') as f: |
|
|
for idx, line in enumerate(f): |
|
|
try: |
|
|
ele = json.loads(line.strip()) |
|
|
res.append(ele) |
|
|
except Exception as e: |
|
|
logger.warning(f"Failed to process file: {e}") |
|
|
continue |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=res, |
|
|
metadata={ |
|
|
'line_count': len(res), |
|
|
'encoding': encoding |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File read failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def document_qa( |
|
|
self, |
|
|
tasks: List[Dict], |
|
|
model: str = "pangu_auto", |
|
|
temperature: Optional[float] = None, |
|
|
max_tokens: Optional[int] = None, |
|
|
max_workers: int = 5 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Answer questions based on content stored in local files. |
|
|
Each task contains a file path and a question to be answered using that file's content. |
|
|
|
|
|
Args: |
|
|
tasks: List of task dictionaries containing: |
|
|
- file_path: Relative path to the file (relative to workspace root) to read |
|
|
- question: Question to ask about this file |
|
|
model: AI model to use for generating answers |
|
|
temperature: Creativity level for the AI response (0-1) |
|
|
max_tokens: Maximum tokens for the AI response |
|
|
max_workers: Maximum number of concurrent model API requests |
|
|
""" |
|
|
try: |
|
|
|
|
|
from config.config import get_model_config, get_storage_config |
|
|
model_config = get_model_config() |
|
|
storage_config = get_storage_config() |
|
|
|
|
|
|
|
|
if temperature is None: |
|
|
temperature = model_config.get('temperature', 0.3) |
|
|
if max_tokens is None: |
|
|
max_tokens = model_config.get('max_tokens', 8192) |
|
|
|
|
|
logger.debug(f"Starting document QA processing: {tasks}") |
|
|
|
|
|
def process_single_task(task: Dict) -> Dict: |
|
|
file_path = task['file_path'] |
|
|
question = task['question'] |
|
|
|
|
|
|
|
|
read_result = self.file_read_dq(file_path) |
|
|
if not read_result.success: |
|
|
return { |
|
|
'file_path': file_path, |
|
|
'question': question, |
|
|
'success': False, |
|
|
'error': f"File read error: {read_result.error}", |
|
|
'answer': None |
|
|
} |
|
|
|
|
|
content = read_result.data |
|
|
|
|
|
|
|
|
system_prompt = ( |
|
|
"You are an expert document analyst. Answer the user's question " |
|
|
"based ONLY on the provided context. If the answer cannot be found " |
|
|
"in the context, say 'I don't know'. Answer in the same language as the user's question. " |
|
|
"If the document section you reference includes citation sources, such as URLs, you must also include the original citation sources from the document in your response.\n\n" |
|
|
"CONTEXT:\n{context}" |
|
|
).format(context=content) |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
model_url = model_config.get('url') or os.getenv('MODEL_REQUEST_URL', '') |
|
|
model_token = model_config.get('token') or os.getenv('MODEL_REQUEST_TOKEN', '') |
|
|
headers = {'Content-Type': 'application/json', 'csb-token': model_token} |
|
|
|
|
|
response = requests.post( |
|
|
url=model_url, |
|
|
headers=headers, |
|
|
json={ |
|
|
"model": model_config.get('model', 'pangu_auto'), |
|
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<s>[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{'<s>[unused9]系统:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}", |
|
|
"messages": [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": question+" /no_think"} |
|
|
], |
|
|
"spaces_between_special_tokens": False, |
|
|
"temperature": temperature, |
|
|
"max_tokens": max_tokens |
|
|
}, |
|
|
timeout=model_config.get("timeout", 180) |
|
|
) |
|
|
response = response.json() |
|
|
answer = response["choices"][0]["message"]["content"].split("[unused17]")[-1] |
|
|
|
|
|
return { |
|
|
'file_path': file_path, |
|
|
'question': question, |
|
|
'success': True, |
|
|
'answer': answer, |
|
|
'metadata': { |
|
|
'file_size': len(content), |
|
|
'line_count': len(content.splitlines()) |
|
|
} |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Model API call failed for file '{file_path}': {e}") |
|
|
return { |
|
|
'file_path': file_path, |
|
|
'question': question, |
|
|
'success': False, |
|
|
'error': f"Model API error: {str(e)}" |
|
|
} |
|
|
|
|
|
|
|
|
results = [] |
|
|
with ThreadPoolExecutor(max_workers=min(max_workers, len(tasks))) as executor: |
|
|
future_to_task = {executor.submit(process_single_task, task): task for task in tasks} |
|
|
|
|
|
for future in as_completed(future_to_task): |
|
|
try: |
|
|
result = future.result() |
|
|
results.append(result) |
|
|
except Exception as e: |
|
|
task = future_to_task[future] |
|
|
logger.error(f"Task processing failed for file '{task['file_path']}': {e}") |
|
|
results.append({ |
|
|
'file_path': task['file_path'], |
|
|
'question': task['question'], |
|
|
'success': False, |
|
|
'error': f"Task processing exception: {str(e)}" |
|
|
}) |
|
|
|
|
|
|
|
|
task_order = {task['file_path']: i for i, task in enumerate(tasks)} |
|
|
results.sort(key=lambda x: task_order.get(x['file_path'], float('inf'))) |
|
|
|
|
|
|
|
|
successful_tasks = len([r for r in results if r.get('success', False)]) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=results, |
|
|
metadata={ |
|
|
'total_tasks': len(tasks), |
|
|
'successful_tasks': successful_tasks, |
|
|
'failed_tasks': len(tasks) - successful_tasks, |
|
|
'concurrent_workers': min(max_workers, len(tasks)) |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Context-based QA batch processing failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def search_result_classifier( |
|
|
self, |
|
|
|
|
|
outline: str, |
|
|
key_files: List[Dict], |
|
|
model: str = "pangu_auto", |
|
|
temperature: Optional[float] = None, |
|
|
max_tokens: Optional[int] = None |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Classify and organize search result files according to a structured outline for comprehensive long-form content generation. |
|
|
|
|
|
Args: |
|
|
outline: Structured outline defining the sections and subsections for organizing the long-form content |
|
|
key_files: List of key files to classify |
|
|
model: AI model to use for classification and organization |
|
|
temperature: Creativity level for the AI classification (0-1) |
|
|
max_tokens: Maximum tokens for the AI response |
|
|
""" |
|
|
try: |
|
|
|
|
|
from config.config import get_model_config, get_storage_config |
|
|
model_config = get_model_config() |
|
|
storage_config = get_storage_config() |
|
|
|
|
|
|
|
|
if temperature is None: |
|
|
temperature = model_config.get('temperature', 0.3) |
|
|
if max_tokens is None: |
|
|
max_tokens = model_config.get('max_tokens', 2000) |
|
|
|
|
|
logger.debug(f"Starting search result classification: outline={outline}, key_files={key_files}") |
|
|
|
|
|
key_files_dict = {} |
|
|
|
|
|
analysis_path = storage_config.get('document_analysis_path', './doc_analysis') |
|
|
file_analysis_list = self.load_json(f"{analysis_path}/file_analysis.jsonl").data |
|
|
|
|
|
for file_info in file_analysis_list: |
|
|
if file_info.get('file_path'): |
|
|
key_files_dict[file_info.get('file_path')] = file_info |
|
|
|
|
|
prompt_files = "" |
|
|
if key_files: |
|
|
prompt_files += f"Key Files with Multi-Dimensional Analysis:\n" |
|
|
for i, file_info in enumerate(key_files, 1): |
|
|
if file_info.get('file_path') in key_files_dict: |
|
|
file_info = key_files_dict[file_info.get('file_path')] |
|
|
prompt_files += f"\n{i}. File: {file_info.get('file_path', 'Unknown')}\n" |
|
|
prompt_files += f" Document Time: {file_info.get('doc_time', 'Not specified')}\n" |
|
|
prompt_files += f" Source Authority: {file_info.get('source_authority', 'Not specified')}\n" |
|
|
prompt_files += f" Core Content: {file_info.get('core_content', 'Not specified')}\n" |
|
|
prompt_files += f" Task Relevance: {file_info.get('task_relevance', 'Not specified')}\n" |
|
|
prompt_files += f" Information Richness: {file_info.get('information_richness', 'Not specified')}\n" |
|
|
prompt_files += "\n" |
|
|
|
|
|
prompt_files += "\n" |
|
|
|
|
|
system_prompt = ( |
|
|
"You are an expert content organizer specializing in multi-dimensional file classification. " |
|
|
"Your task is to classify research files according to a given outline by according the five key dimensions.\n\n" |
|
|
|
|
|
"FIVE KEY DIMENSIONS:\n" |
|
|
"1. DOCUMENT TIME: Consider the temporal relevance and recency of the source\n" |
|
|
"2. SOURCE AUTHORITY: Consider the credibility and expertise of the source\n" |
|
|
"3. CORE CONTENT: Focus on the main themes and key insights\n" |
|
|
"4. TASK RELEVANCE: Assess alignment with the specific research goals\n" |
|
|
"5. INFORMATION RICHNESS: Determine the information richness of the document\n" |
|
|
|
|
|
"CLASSIFICATION INSTRUCTIONS:\n" |
|
|
"1. Read the outline and understand its structure\n" |
|
|
"2. Analyze each file across the five dimensions provided\n" |
|
|
"3. Accept files with moderate source authority, moderate information richness and reasonable task relevance\n" |
|
|
"4. Use core content insights to find connections to outline sections, ensure the relevance of the assignment.\n" |
|
|
"5. Files should be assigned to multiple sections when they contain information spanning different topics\n" |
|
|
"6. Prioritize comprehensive coverage: Ensure every paragraph/chapter gets file assignments when any relevant content exists\n" |
|
|
"7.Note that if the first chapter is an abstract or introduction, a corresponding file must be assigned and there cannot be an empty file.\n" |
|
|
|
|
|
"CRITICAL REQUIREMENT - OUTLINE STRUCTURE PRESERVATION:\n" |
|
|
"- The number of files assigned to each chapter cannot exceed 11, so if the number of files exceeds 11, you need the most relevant files.\n" |
|
|
"- When the outline is divided into paragraphs/sections, you must preserve the original content and structure exactly.\n " |
|
|
"- Do NOT modify, rephrase, or alter any paragraph titles, headings, or structural elements from the original outline. \n" |
|
|
"- Keep all outline content completely intact in your output, including formatting and wording.\n" |
|
|
"- You should split each paragraph according to the granularity of the first-level heading. You are not allowed to separate any second-level headings or smaller headings under the first-level heading into individual paragraphs.\n" |
|
|
"- Formatting requirements for split paragraphs: 1. You are not allowed to modify the format of the paragraphs in the original outline, including all bold symbols, etc.; 2. If the given outline includes the title of the article, you must include the article title in the first paragraph, maintaining its original bold symbols.\n" |
|
|
|
|
|
"Strictly follow the following format for output:\n" |
|
|
"paragraph 1: ...\n" |
|
|
"file_path_list: file_path1, file_path2, ...\n" |
|
|
"\n" |
|
|
"paragraph 2: ...\n" |
|
|
"file_path_list: file_path3, file_path4, ...\n" |
|
|
"..." |
|
|
) |
|
|
|
|
|
|
|
|
user_prompt = f""" |
|
|
OUTLINE TO ORGANIZE CONTENT: |
|
|
{outline} |
|
|
|
|
|
{prompt_files} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
config = get_config() |
|
|
model_config = config.get_custom_llm_config() |
|
|
|
|
|
model_url = model_config.get('url') or os.getenv('MODEL_REQUEST_URL', '') |
|
|
model_token = model_config.get('token') or os.getenv('MODEL_REQUEST_TOKEN', '') |
|
|
headers = {'Content-Type': 'application/json', 'csb-token': model_token} |
|
|
|
|
|
try: |
|
|
|
|
|
max_retries = 5 |
|
|
response = None |
|
|
|
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
response = requests.post( |
|
|
url=model_url, |
|
|
headers=headers, |
|
|
json={ |
|
|
"model": model_config.get('model', 'pangu_auto'), |
|
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<s>[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{'<s>[unused9]系统:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}", |
|
|
"messages": [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"} |
|
|
], |
|
|
"spaces_between_special_tokens": False, |
|
|
"max_tokens": max_tokens, |
|
|
"temperature": temperature, |
|
|
}, |
|
|
timeout=model_config.get("timeout", 180) |
|
|
) |
|
|
response = response.json() |
|
|
logger.debug(f"API response received") |
|
|
|
|
|
break |
|
|
except Exception as e: |
|
|
logger.warning(f"LLM API call attempt {attempt + 1} failed: {e}") |
|
|
if attempt == max_retries - 1: |
|
|
raise e |
|
|
time.sleep(5) |
|
|
|
|
|
if response is None: |
|
|
raise Exception("Failed to get response after all retries") |
|
|
|
|
|
|
|
|
ai_response = response["choices"][0]["message"]["content"].strip() |
|
|
|
|
|
session_context = self.get_session_context() |
|
|
session_id = session_context.get("session_id") |
|
|
|
|
|
|
|
|
conversation_history = [ |
|
|
|
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"}, |
|
|
{"role": "assistant", "content": ai_response} |
|
|
] |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=ai_response.split('think>')[-1].strip(), |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"AI model call failed: {e}") |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"AI classification failed: {str(e)}" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Search result classifier failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
@staticmethod |
|
|
def _correct_title_format(content: str, overall_outline: str) -> str: |
|
|
""" |
|
|
Correct title formats in content to match those in overall_outline. |
|
|
|
|
|
Args: |
|
|
content: The generated chapter content |
|
|
overall_outline: The overall outline containing correct title formats |
|
|
|
|
|
Returns: |
|
|
Content with corrected title formats |
|
|
""" |
|
|
|
|
|
outline_titles = {} |
|
|
|
|
|
for line in overall_outline.split('\n'): |
|
|
line = line.strip() |
|
|
if line: |
|
|
|
|
|
core_content = line |
|
|
|
|
|
|
|
|
core_content = re.sub(r'^[\*\-\s]+', '', core_content) |
|
|
|
|
|
core_content = re.sub(r'[\*\s]+$', '', core_content) |
|
|
core_content = core_content.strip() |
|
|
|
|
|
if core_content: |
|
|
|
|
|
outline_titles[core_content.lower()] = line |
|
|
|
|
|
|
|
|
content_lines = content.split('\n') |
|
|
corrected_lines = [] |
|
|
|
|
|
for line in content_lines: |
|
|
original_line = line |
|
|
line_stripped = line.strip() |
|
|
|
|
|
|
|
|
if line_stripped and re.match(r'^#+\s*[\*]*.*', line_stripped): |
|
|
|
|
|
core_content = line_stripped |
|
|
|
|
|
|
|
|
core_content = re.sub(r'^#+\s*', '', core_content) |
|
|
|
|
|
core_content = re.sub(r'^\*\*', '', core_content) |
|
|
core_content = re.sub(r'\*\*$', '', core_content) |
|
|
core_content = core_content.strip() |
|
|
|
|
|
|
|
|
found_match = False |
|
|
core_content_lower = core_content.lower() |
|
|
|
|
|
for outline_core, outline_format in outline_titles.items(): |
|
|
if outline_core == core_content_lower: |
|
|
|
|
|
corrected_lines.append(outline_format) |
|
|
found_match = True |
|
|
break |
|
|
|
|
|
if not found_match: |
|
|
|
|
|
corrected_lines.append(original_line) |
|
|
else: |
|
|
corrected_lines.append(original_line) |
|
|
|
|
|
return '\n'.join(corrected_lines) |
|
|
|
|
|
def section_writer( |
|
|
self, |
|
|
written_chapters_summary: str, |
|
|
task_content: str, |
|
|
user_query: str, |
|
|
current_chapter_outline: str, |
|
|
overall_outline: str, |
|
|
target_file_path: str, |
|
|
key_files: List[Dict], |
|
|
model: str = "pangu_auto", |
|
|
temperature: Optional[float] = None, |
|
|
max_tokens: Optional[int] = None |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Write the current chapter content based on given web information and chapter structure; also consider user questions, completed chapters, and overall outline to ensure content relevance while avoiding duplication or contradictions. |
|
|
|
|
|
Args: |
|
|
user_query: The user query, ensure the drafted content is highly relevant to the user's inquiry. |
|
|
current_chapter_outline: This field represents the current chapter structure to be drafted. When composing the chapter content, do not modify content and bold formatting symbols of the existing structure's titles!!! |
|
|
overall_outline: This field represents the overall outline of the article. When drafting the chapter content, you should consider the overall outline to ensure the chapter content is consistent with the overall outline. |
|
|
target_file_path: The path to save the chapter content |
|
|
key_files: These files are the source materials required for drafting the current chapter. |
|
|
model: AI model to use for writing the chapter content |
|
|
temperature: Creativity level for the AI response (0-1) |
|
|
max_tokens: Maximum tokens for the AI response |
|
|
""" |
|
|
try: |
|
|
|
|
|
from config.config import get_model_config, get_storage_config |
|
|
model_config = get_model_config() |
|
|
storage_config = get_storage_config() |
|
|
|
|
|
|
|
|
if temperature is None: |
|
|
temperature = model_config.get('temperature', 0.3) |
|
|
if max_tokens is None: |
|
|
max_tokens = model_config.get('max_tokens', 8192) |
|
|
|
|
|
|
|
|
key_files_dict = {} |
|
|
|
|
|
analysis_path = storage_config.get('document_analysis_path', './doc_analysis') |
|
|
file_analysis_list = self.load_json(f"{analysis_path}/file_analysis.jsonl").data |
|
|
logger.debug("File analysis loaded successfully") |
|
|
|
|
|
for i, file_info in enumerate(file_analysis_list, 1): |
|
|
file_info['index'] = i |
|
|
if file_info.get('file_path'): |
|
|
key_files_dict[file_info.get('file_path')] = file_info |
|
|
|
|
|
prompt_files = "" |
|
|
if key_files: |
|
|
prompt_files += f"Web Information Source(s) As Follows::\n" |
|
|
for i, file_info in enumerate(key_files, 1): |
|
|
if file_info.get('file_path') in key_files_dict: |
|
|
file_info = key_files_dict[file_info.get('file_path')] |
|
|
index = file_info.get('index') |
|
|
|
|
|
file_path = file_info.get('file_path') |
|
|
|
|
|
def get_file_head_content(file_path, max_length=10000): |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
if not full_path.exists(): |
|
|
return f"[Error: File does not exist: {file_path}]" |
|
|
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
|
content = f.read(max_length) |
|
|
return content |
|
|
except Exception as e: |
|
|
return f"[Error reading file {file_path}: {str(e)}]" |
|
|
|
|
|
file_content = get_file_head_content(file_path) |
|
|
doc_time = file_info.get('doc_time', 'Not specified') |
|
|
source_authority = file_info.get('source_authority', 'Not specified') |
|
|
task_relevance = file_info.get('task_relevance', 'Not specified') |
|
|
|
|
|
prompt_files += f"\n[webpaeg{index} begin]网页时间: {doc_time}|||网页权威性:{source_authority}|||网页相关性:{task_relevance}|||网页内容:{file_content}[webpaeg{index} end]" |
|
|
|
|
|
|
|
|
system_prompt = """You are a writing master. Next, you will receive web page information, user questions, and the structure of the current chapter. You need to integrate the user's questions with the provided web content and write the chapter based on its given structure. Additionally, an overall outline and summaries of previously completed chapters will be provided for reference to avoid repetition or contradictions and ensure logical consistency within the broader framework. Specific requirements will be detailed below. |
|
|
|
|
|
When drafting the current chapter content, strictly comply with the following requirements: |
|
|
- In the web page information I gave you, each result is in the format of [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the sentence when appropriate. Please cite the context in the corresponding part of the answer in the format of the reference number [citation:X]. If a sentence comes from multiple contexts, please list all relevant reference numbers, such as [citation:3][citation:5]. Remember not to collect the references at the end and return the reference numbers, but list them in the corresponding part of the answer. |
|
|
- You can only use the provided web page information for writing, don't make up any content, ensure the accuracy of the facts. Note that when there are contradictions between the facts described in the above search results, you should use your internal knowledge to reasonably identify the correct information. If identification is impossible, you may select the most factual result based on the authority of the web pages and a voting mechanism (e.g., the description consistent with the majority of web pages). If judgment remains impossible using these methods, you may appropriately list possible differing statements, but you must not conflate different claims—prioritize ensuring factual accuracy! |
|
|
- You are only permitted to write content strictly within the provided chapter framework. You are forbidden from creating additional subheadings or bullet points within the framework! However, there is a special exception: You may appropriately use tables for narration when necessary. Furthermore, you are not allowed to use concise or summarizing language for narration! We must strictly ensure the information density of the writing and avoid excessive compression. |
|
|
- You cannot make any changes to the structure of the chapter you are currently writing, such as the title content and the bold symbols in the title, you are not allowed to make any changes. **Important Note:** When writing Chapter 1, if you find the chapter lacks article title, you must create one based on user query. However, this rule only applies to Chapter 1 - do not add any titles to any other chapters in the work. |
|
|
- Be careful to ensure that the narrative content is highly relevant and does not contain any common sense errors, note that although you are asked to ensure the richness of information when writing, you must ensure that the content you write is highly relevant and that the context is logically coherent and readable. |
|
|
- Proceeding to explain the roles of other specified fields: |
|
|
* user_query: The user query, ensure the drafted content is highly relevant to the user's inquiry. |
|
|
* written_chapters: Reference written_chapters to avoid large amounts of repetitive or conflicting content |
|
|
* overall_outline: The purpose of giving an overall outline is to let you understand the summary of the article and avoid content inconsistent with other parts during your writing. In short, focus on writing the current chapter. |
|
|
* task_content: The task_content may provide the requirements for writing the current chapter as well as prompts for what to avoid. You can refer to this content when drafting. |
|
|
|
|
|
Other points to note:: |
|
|
- If the first chapter is an **Abstract** or **Introduction**, do not include subheadings (level-2 or finer bullet points)—begin the content directly under the level-1 heading. |
|
|
- CONTENT LENGTH: Each section should contain approximately 2500 words to ensure comprehensive coverage. |
|
|
- **CRITICAL TITLE PRESERVATION RULE:** You MUST preserve the exact format, structure, and content of chapter titles as provided in the current_chapter_outline. This includes: |
|
|
* DO NOT change any markdown formatting symbols (# ## ### ** etc.) |
|
|
* DO NOT add, remove, or rearrange any part of the title structure |
|
|
* Copy the title lines EXACTLY as they appear in current_chapter_outline |
|
|
* Only write content under the provided title structure - never modify the titles themselves |
|
|
* When the title symbols in the current chapter outline are inconsistent with those in the overall outline, use the overall outline's title symbols as the standard and maintain symbol consistency throughout the writing process |
|
|
- Note that in Chapter 1, omit any mention of research objectives, methodology, or procedural details. |
|
|
- Be sure to ensure that the language of your output is consistent with the language of the user's question. For example, if the user's question is in Chinese, your reply should also be in Chinese. |
|
|
|
|
|
Strictly follow the following format for output: |
|
|
<chapter_content>xxx</chapter_content> |
|
|
""" |
|
|
|
|
|
user_prompt = f"""TASK CONTENT: {task_content} |
|
|
WEB PAGE INFORMATION: {prompt_files} |
|
|
OVERALL OUTLINE: {overall_outline} |
|
|
CURRENT CHAPTER OUTLINE: {current_chapter_outline} |
|
|
PREVIOUSLY WRITTEN CHAPTERS SUMMARY: {written_chapters_summary} |
|
|
USER QUERY: {user_query}""" |
|
|
|
|
|
|
|
|
|
|
|
config = get_config() |
|
|
model_config = config.get_custom_llm_config() |
|
|
|
|
|
model_url = model_config.get('url') or os.getenv('MODEL_REQUEST_URL', '') |
|
|
model_token = model_config.get('token') or os.getenv('MODEL_REQUEST_TOKEN', '') |
|
|
headers = {'Content-Type': 'application/json', 'csb-token': model_token} |
|
|
try: |
|
|
max_retries = 5 |
|
|
response = None |
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
response = requests.post( |
|
|
url=model_url, |
|
|
headers=headers, |
|
|
json={ |
|
|
"model": model_config.get('model', 'pangu_auto'), |
|
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<s>[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{'<s>[unused9]系统:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}", |
|
|
"messages": [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"} |
|
|
], |
|
|
"spaces_between_special_tokens": False, |
|
|
"max_tokens": max_tokens, |
|
|
"temperature": temperature, |
|
|
}, |
|
|
timeout=model_config.get("timeout", 180) |
|
|
) |
|
|
response = response.json() |
|
|
logger.debug(f"API response received") |
|
|
|
|
|
break |
|
|
except Exception as e: |
|
|
logger.warning(f"LLM API call attempt {attempt + 1} failed: {e}") |
|
|
if attempt == max_retries - 1: |
|
|
raise e |
|
|
time.sleep(5) |
|
|
|
|
|
if response is None: |
|
|
raise Exception("Failed to get response after all retries") |
|
|
|
|
|
|
|
|
ai_response = response["choices"][0]["message"]["content"].strip() |
|
|
|
|
|
|
|
|
content = "" |
|
|
if "<chapter_content>" in ai_response: |
|
|
content = ai_response.split("<chapter_content>")[1].split("</chapter_content>")[0].strip() |
|
|
else: |
|
|
content = ai_response |
|
|
|
|
|
logger.debug(f"Content before correction: {content[:200]}...") |
|
|
logger.debug(f"Overall outline: {overall_outline[:200]}...") |
|
|
content = self._correct_title_format(content, overall_outline) |
|
|
logger.debug(f"Content after correction: {content[:200]}...") |
|
|
|
|
|
summary_prompt = "Please give a brief summary of the output chapter content. Be sure to ensure that the language of the summary is consistent with the language of the output chapter content. For example, if the chapter content is in Chinese, your summary should also be in Chinese." |
|
|
|
|
|
summary_response = None |
|
|
max_retries = 5 |
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
summary_response = requests.post( |
|
|
url=model_url, |
|
|
headers=headers, |
|
|
json={ |
|
|
"model": model_config.get('model', 'pangu_auto'), |
|
|
"chat_template":"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<s>[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{'<s>[unused9]系统:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}", |
|
|
"messages": [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"}, |
|
|
{"role": "assistant", "content": ai_response}, |
|
|
{"role": "user", "content": summary_prompt + " /no_think"} |
|
|
], |
|
|
"max_tokens": max_tokens, |
|
|
"spaces_between_special_tokens": False, |
|
|
"temperature": temperature, |
|
|
}, |
|
|
timeout=model_config.get("timeout", 180) |
|
|
) |
|
|
summary_response = summary_response.json() |
|
|
logger.debug(f"Summary API response received") |
|
|
|
|
|
break |
|
|
except Exception as e: |
|
|
logger.warning(f"Summary LLM API call attempt {attempt + 1} failed: {e}") |
|
|
if attempt == max_retries - 1: |
|
|
raise e |
|
|
time.sleep(5) |
|
|
|
|
|
if summary_response is None: |
|
|
raise Exception("Failed to get summary response after all retries") |
|
|
|
|
|
|
|
|
summary_ai_response = summary_response["choices"][0]["message"]["content"].strip() |
|
|
summary = summary_ai_response |
|
|
|
|
|
session_context = self.get_session_context() |
|
|
session_id = session_context.get("session_id") |
|
|
|
|
|
|
|
|
conversation_history = [ |
|
|
|
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"}, |
|
|
{"role": "assistant", "content": ai_response}, |
|
|
{"role": "user", "content": summary_prompt + " /no_think"}, |
|
|
{"role": "assistant", "content": summary_ai_response} |
|
|
] |
|
|
|
|
|
|
|
|
write_result = self.file_write(file_path=target_file_path, |
|
|
content=content, |
|
|
create_dirs=True) |
|
|
if not write_result.success: |
|
|
raise Exception(f"File write failed: {write_result.error}") |
|
|
|
|
|
results = [] |
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=results.append({ |
|
|
"chapter_summary": summary, |
|
|
}), |
|
|
metadata={ |
|
|
'content_length': len(content), |
|
|
'summary_length': len(summary) |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"AI model call failed: {e}") |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"section writer failed: {str(e)}" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"section writer failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def merge_reports(self, section_contents, output_file): |
|
|
report_files = [] |
|
|
for section_content in section_contents: |
|
|
|
|
|
if isinstance(section_content, dict): |
|
|
file_path = section_content.get('file_path') |
|
|
else: |
|
|
|
|
|
file_path = section_content |
|
|
|
|
|
if file_path: |
|
|
full_path = self._safe_join(file_path) |
|
|
report_files.append(full_path) |
|
|
|
|
|
|
|
|
def extract_index(file_path): |
|
|
"""从文件名中提取数字索引""" |
|
|
filename = os.path.basename(file_path) |
|
|
match = re.search(r'part_(\d+)\.md', filename) |
|
|
if match: |
|
|
return int(match.group(1)) |
|
|
return None |
|
|
|
|
|
|
|
|
indexed_files = [] |
|
|
for file_path in report_files: |
|
|
idx = extract_index(file_path) |
|
|
if idx is not None: |
|
|
indexed_files.append((idx, file_path)) |
|
|
|
|
|
|
|
|
indexed_files.sort(key=lambda x: x[0]) |
|
|
|
|
|
if not indexed_files: |
|
|
logger.warning("No report_*.md files found matching criteria") |
|
|
return |
|
|
|
|
|
|
|
|
try: |
|
|
with open(output_file, 'w', encoding='utf-8') as outfile: |
|
|
for idx, file_path in indexed_files: |
|
|
filename = os.path.basename(file_path) |
|
|
try: |
|
|
with open(file_path, 'r', encoding='utf-8') as infile: |
|
|
|
|
|
outfile.write(infile.read()) |
|
|
outfile.write("\n\n") |
|
|
|
|
|
|
|
|
logger.info(f"Merged: {filename}") |
|
|
except Exception as e: |
|
|
logger.warning(f"Unable to read file {filename}: {str(e)}") |
|
|
|
|
|
logger.info(f"\nMerge completed! The result is saved at: {output_file}") |
|
|
logger.info(f"A total of {len(indexed_files)} files have been merged.") |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error: Failed to write to output file - {str(e)}") |
|
|
|
|
|
if os.path.exists(output_file): |
|
|
os.remove(output_file) |
|
|
raise |
|
|
|
|
|
def concat_section_files( |
|
|
self, |
|
|
section_files: List[Dict], |
|
|
final_file_path: str |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Concatenate the content of the saved section files into a single file |
|
|
""" |
|
|
try: |
|
|
logger.debug(f"Starting file concatenation: section_files={section_files}, final_file={final_file_path}") |
|
|
|
|
|
|
|
|
final_file_path = self._safe_join(final_file_path) |
|
|
|
|
|
output_dir = os.path.dirname(final_file_path) |
|
|
if output_dir: |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
self.merge_reports(section_files, final_file_path) |
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data={"merged_files": len(section_files), "output_path": str(final_file_path)}, |
|
|
metadata={ |
|
|
'final_file_path': str(final_file_path), |
|
|
'section_count': len(section_files) |
|
|
} |
|
|
) |
|
|
except Exception as e: |
|
|
logger.error(f"Concatenate section files failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def document_extract( |
|
|
self, |
|
|
|
|
|
tasks: List[Dict], |
|
|
model: str = "pangu_auto", |
|
|
temperature: Optional[float] = None, |
|
|
max_tokens: Optional[int] = None, |
|
|
max_workers: int = 5 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Multi-dimensional analysis of locally stored files using AI models. |
|
|
Evaluates each file across four key dimensions: source authority, core content extraction, |
|
|
information richness, and query relevance scoring. |
|
|
|
|
|
Args: |
|
|
tasks: List of task dictionaries containing: |
|
|
- file_path: Relative path to the file (relative to workspace root) to read |
|
|
- task: task for relevance assessment |
|
|
model: AI model to use for multi-dimensional analysis |
|
|
temperature: Creativity level for the AI response (0-1) |
|
|
max_tokens: Maximum tokens for the AI response |
|
|
max_workers: Maximum number of concurrent model API requests |
|
|
""" |
|
|
try: |
|
|
|
|
|
from config.config import get_model_config, get_storage_config |
|
|
model_config = get_model_config() |
|
|
storage_config = get_storage_config() |
|
|
|
|
|
|
|
|
if temperature is None: |
|
|
temperature = model_config.get('temperature', 0.3) |
|
|
if max_tokens is None: |
|
|
max_tokens = model_config.get('max_tokens', 8192) |
|
|
|
|
|
logger.debug(f"Starting document extraction: tasks={tasks}") |
|
|
|
|
|
def process_single_task(task: Dict) -> Dict: |
|
|
file_path = task['file_path'] |
|
|
task_content = task['task'] |
|
|
|
|
|
|
|
|
read_result = self.file_read(file_path) |
|
|
if not read_result.success: |
|
|
return { |
|
|
'file_path': file_path, |
|
|
'task': task_content, |
|
|
'success': False, |
|
|
'error': f"File read error: {read_result.error}", |
|
|
'answer': None |
|
|
} |
|
|
|
|
|
content = read_result.data |
|
|
system_prompt = ( |
|
|
"You are a text expert. Next, you will be given a document and task content. You need to analyze this document carefully and then provide multiple dimensional information for this document.\n\n" |
|
|
|
|
|
"The following are some dimensional information extracted:\n" |
|
|
"1. Web page time: According to the content of the document, extract the web page time of the document content. If it cannot be judged, it is expressed as \"unable to determine the web page time\"; otherwise, the time of the web page is output, accurate to the month, in the format of \"YYYY year MM month\", such as \"2023 June\";\n" |
|
|
"2. Authority: According to the information of the document, judge the source of the web page to confirm the credibility of the web page.\n" |
|
|
"3. Relevance: According to the current task (task_content) and the given document, judge whether the current document is related to the current task.\n" |
|
|
"4. Core content: Based on this document, you make a core content summary to ensure the richness of information, with a word count of about 200 words.\n" |
|
|
"Information richness: Estimate the total word count of substantive content in the document. Less than 200 words indicates scarcity; over 800 words suggests high richness; between these thresholds denotes moderate richness. Be careful not to just give the word count results, but also give a corresponding text description of how informative the content is.\n" |
|
|
|
|
|
"Note:\n1. Ensure the document's language aligns with the extracted dimensions (e.g., Chinese content requires Chinese extraction).\n2. For **source_authority** and **task_relevance**, first provide a brief description before concluding. \n" |
|
|
"- **Authority**: Briefly assess the source's credibility (e.g., expertise, reputation). *Conclusion*: [High/Medium/Low]. \n" |
|
|
"- **Relevance**: Summarize content alignment with the topic. *Conclusion*: [High/Medium/Low].\n" |
|
|
|
|
|
"The final output format must be a valid JSON object:\n" |
|
|
"{\n" |
|
|
" \"doc_time\": \"xxx\",\n" |
|
|
" \"source_authority\": \"xxx\",\n" |
|
|
" \"task_relevance\": \"xxx\",\n" |
|
|
" \"core_content\": \"xxx\",\n" |
|
|
" \"information_richness\": \"xxx\"\n" |
|
|
"}\n\n" |
|
|
"Important: Return ONLY the JSON object, no additional text or formatting." |
|
|
) |
|
|
|
|
|
|
|
|
user_prompt = ( |
|
|
f"DOCUMENT CONTENT:\n{content}\n" |
|
|
|
|
|
f"TASK FOR RELEVANCE ASSESSMENT: {task_content}" |
|
|
) |
|
|
|
|
|
|
|
|
config = get_config() |
|
|
model_config = config.get_custom_llm_config() |
|
|
|
|
|
model_url = model_config.get('url') or os.getenv('MODEL_REQUEST_URL', '') |
|
|
model_token = model_config.get('token') or os.getenv('MODEL_REQUEST_TOKEN', '') |
|
|
headers = {'Content-Type': 'application/json', 'csb-token': model_token} |
|
|
|
|
|
try: |
|
|
|
|
|
max_retries = 5 |
|
|
response = None |
|
|
|
|
|
for attempt in range(max_retries): |
|
|
try: |
|
|
response = requests.post( |
|
|
url=model_url, |
|
|
headers=headers, |
|
|
json={ |
|
|
"model": model_config.get('model', 'pangu_auto'), |
|
|
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<s>[unused9]系统:[unused10]' }}{% endif %}{% if message['role'] == 'system' %}{{'<s>[unused9]系统:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'assistant' %}{{'[unused9]助手:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'tool' %}{{'[unused9]工具:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'function' %}{{'[unused9]方法:' + message['content'] + '[unused10]'}}{% endif %}{% if message['role'] == 'user' %}{{'[unused9]用户:' + message['content'] + '[unused10]'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '[unused9]助手:' }}{% endif %}", |
|
|
"messages": [ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"} |
|
|
], |
|
|
"max_tokens": max_tokens, |
|
|
"spaces_between_special_tokens": False, |
|
|
"temperature": temperature, |
|
|
}, |
|
|
timeout=model_config.get("timeout", 180) |
|
|
) |
|
|
response = response.json() |
|
|
logger.info(f"LLM API response: {response}") |
|
|
|
|
|
break |
|
|
except Exception as e: |
|
|
logger.warning(f"LLM API call attempt {attempt + 1} failed: {e}") |
|
|
if attempt == max_retries - 1: |
|
|
raise e |
|
|
time.sleep(4) |
|
|
|
|
|
if response is None: |
|
|
raise Exception("Failed to get response after all retries") |
|
|
|
|
|
|
|
|
answer = response["choices"][0]["message"]["content"] |
|
|
|
|
|
session_context = self.get_session_context() |
|
|
session_id = session_context.get("session_id") |
|
|
|
|
|
conversation_history = [ |
|
|
|
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": user_prompt + " /no_think"}, |
|
|
{"role": "assistant", "content": answer} |
|
|
] |
|
|
|
|
|
return { |
|
|
'file_path': file_path, |
|
|
'task': task_content, |
|
|
'success': True, |
|
|
'answer': answer, |
|
|
'metadata': { |
|
|
'file_size': len(content), |
|
|
'line_count': len(content.splitlines()) |
|
|
} |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Model API call failed for file '{file_path}': {e}") |
|
|
return { |
|
|
'file_path': file_path, |
|
|
'task': task_content, |
|
|
'success': False, |
|
|
'error': f"Model API error: {str(e)}" |
|
|
} |
|
|
|
|
|
|
|
|
results = [] |
|
|
with ThreadPoolExecutor(max_workers=min(max_workers, len(tasks))) as executor: |
|
|
future_to_task = {executor.submit(process_single_task, task): task for task in tasks} |
|
|
|
|
|
for future in as_completed(future_to_task): |
|
|
try: |
|
|
result = future.result() |
|
|
results.append(result) |
|
|
except Exception as e: |
|
|
task = future_to_task[future] |
|
|
logger.error(f"Task processing failed for file '{task['file_path']}': {e}") |
|
|
results.append({ |
|
|
'file_path': task['file_path'], |
|
|
'task': task['task'], |
|
|
'success': False, |
|
|
'error': f"Task processing exception: {str(e)}" |
|
|
}) |
|
|
|
|
|
|
|
|
task_order = {task['file_path']: i for i, task in enumerate(tasks)} |
|
|
results.sort(key=lambda x: task_order.get(x['file_path'], float('inf'))) |
|
|
|
|
|
def parse_answer_to_structured_data(answer_text: str, file_path: str) -> Dict[str, str]: |
|
|
"""Parse the AI JSON response into structured data""" |
|
|
|
|
|
structured_data = { |
|
|
"file_path": file_path, |
|
|
"doc_time": "Unknown", |
|
|
"source_authority": "Unknown", |
|
|
"task_relevance": "Unknown", |
|
|
"information_richness": "Unknown", |
|
|
"core_content": "Unknown" |
|
|
} |
|
|
|
|
|
if not answer_text: |
|
|
return structured_data |
|
|
|
|
|
try: |
|
|
|
|
|
answer_text = answer_text.strip() |
|
|
|
|
|
|
|
|
if answer_text.startswith('```'): |
|
|
lines = answer_text.split('\n') |
|
|
|
|
|
start_idx = 0 |
|
|
end_idx = len(lines) |
|
|
for i, line in enumerate(lines): |
|
|
if line.strip().startswith('{'): |
|
|
start_idx = i |
|
|
break |
|
|
for i in range(len(lines) - 1, -1, -1): |
|
|
if lines[i].strip().endswith('}'): |
|
|
end_idx = i + 1 |
|
|
break |
|
|
answer_text = '\n'.join(lines[start_idx:end_idx]) |
|
|
|
|
|
|
|
|
parsed_data = json.loads(answer_text) |
|
|
|
|
|
|
|
|
if isinstance(parsed_data, dict): |
|
|
structured_data.update({ |
|
|
"file_path": file_path, |
|
|
"doc_time": parsed_data.get("doc_time", "Unknown"), |
|
|
"source_authority": parsed_data.get("source_authority", "Unknown"), |
|
|
"task_relevance": parsed_data.get("task_relevance", "Unknown"), |
|
|
"core_content": parsed_data.get("core_content", "Unknown"), |
|
|
"information_richness": parsed_data.get("information_richness", "Unknown") |
|
|
}) |
|
|
|
|
|
return structured_data |
|
|
|
|
|
except json.JSONDecodeError as e: |
|
|
|
|
|
structured_data[ |
|
|
"core_content"] = f"JSON parsing error: {str(e)}. Raw response: {answer_text[:200]}..." |
|
|
return structured_data |
|
|
except Exception as e: |
|
|
|
|
|
structured_data["core_content"] = f"Parsing error: {str(e)}" |
|
|
return structured_data |
|
|
|
|
|
|
|
|
structured_results = [] |
|
|
for result in results: |
|
|
if result.get('success', False) and result.get('answer'): |
|
|
structured_data = parse_answer_to_structured_data( |
|
|
result['answer'], |
|
|
result['file_path'] |
|
|
) |
|
|
structured_results.append(structured_data) |
|
|
else: |
|
|
|
|
|
structured_results.append({ |
|
|
"file_path": result.get('file_path', 'Unknown'), |
|
|
"doc_time": "Processing failed", |
|
|
"source_authority": "Processing failed", |
|
|
"task_relevance": "Processing failed", |
|
|
"information_richness": "Unknown", |
|
|
"core_content": f"Error: {result.get('error', 'Unknown error')}" |
|
|
}) |
|
|
|
|
|
|
|
|
|
|
|
analysis_path = storage_config.get('document_analysis_path', './doc_analysis') |
|
|
full_save_path = self.workspace_path / analysis_path / "file_analysis.jsonl" |
|
|
full_save_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with open(full_save_path, mode="a", encoding='utf-8') as f: |
|
|
for ii in structured_results: |
|
|
f.write(json.dumps(ii, ensure_ascii=False) + "\n") |
|
|
|
|
|
|
|
|
successful_tasks = len([r for r in results if r.get('success', False)]) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=results, |
|
|
metadata={ |
|
|
'total_tasks': len(tasks), |
|
|
'successful_tasks': successful_tasks, |
|
|
'failed_tasks': len(tasks) - successful_tasks, |
|
|
'model': model, |
|
|
'concurrent_workers': min(max_workers, len(tasks)) |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Context-based document extraction failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
def download_files( |
|
|
self, |
|
|
urls: List[str], |
|
|
target_directory: str = None, |
|
|
overwrite: bool = False, |
|
|
max_file_size_mb: int = 100 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Download human-readable research files such as PDFs, documents, and data files. |
|
|
|
|
|
Use this tool for downloading research papers, documentation, reports, data files (CSV, JSON, XML), |
|
|
academic publications, and other human-readable content that you can analyze. |
|
|
|
|
|
WARNING: Do NOT use this tool for downloading web pages (HTML/HTM files) or other non-readable formats. |
|
|
For web page content extraction, use the url_crawler tool instead. |
|
|
|
|
|
Args: |
|
|
urls: List of URLs to download (PDFs, DOCs, research papers, data files, etc.) |
|
|
target_directory: Directory to save files (relative to session workspace) |
|
|
overwrite: Whether to overwrite existing files |
|
|
max_file_size_mb: Maximum file size in MB |
|
|
""" |
|
|
try: |
|
|
if target_directory: |
|
|
|
|
|
download_dir = self._safe_join(target_directory) |
|
|
else: |
|
|
download_dir = self.workspace_path / "downloads" |
|
|
|
|
|
download_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
def download_single_file(url: str) -> Dict[str, Any]: |
|
|
"""Download a single file""" |
|
|
try: |
|
|
|
|
|
parsed_url = urlparse(url) |
|
|
filename = os.path.basename(parsed_url.path) or 'downloaded_file' |
|
|
|
|
|
|
|
|
if '.' not in filename: |
|
|
filename += '.html' |
|
|
|
|
|
if os.path.isabs(filename): |
|
|
raise Exception(f"Path '{filename}' is absolute. Only relative paths are allowed.") |
|
|
file_path = download_dir / filename |
|
|
if not os.path.realpath(file_path).startswith(self.full_workspace_path): |
|
|
raise Exception(f"Path '{filename}' is outside workspace directory.") |
|
|
|
|
|
|
|
|
if file_path.exists() and not overwrite: |
|
|
return { |
|
|
'url': url, |
|
|
'success': False, |
|
|
'error': 'File already exists', |
|
|
'file_path': str(file_path) |
|
|
} |
|
|
|
|
|
|
|
|
response = requests.get(url, stream=True, timeout=30) |
|
|
response.raise_for_status() |
|
|
|
|
|
|
|
|
content_length = response.headers.get('content-length') |
|
|
if content_length and int(content_length) > max_file_size_mb * 1024 * 1024: |
|
|
return { |
|
|
'url': url, |
|
|
'success': False, |
|
|
'error': f'File too large (>{max_file_size_mb}MB)', |
|
|
'file_path': None |
|
|
} |
|
|
|
|
|
|
|
|
with open(file_path, 'wb') as f: |
|
|
for chunk in response.iter_content(chunk_size=8192): |
|
|
f.write(chunk) |
|
|
|
|
|
return { |
|
|
'url': url, |
|
|
'success': True, |
|
|
'file_path': str(file_path), |
|
|
'file_size': file_path.stat().st_size |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
return { |
|
|
'url': url, |
|
|
'success': False, |
|
|
'error': str(e), |
|
|
'file_path': None |
|
|
} |
|
|
|
|
|
|
|
|
results = [] |
|
|
max_concurrent_downloads = min(5, len(urls)) |
|
|
with ThreadPoolExecutor(max_workers=max_concurrent_downloads) as executor: |
|
|
|
|
|
future_to_url = {executor.submit(download_single_file, url): url for url in urls} |
|
|
|
|
|
|
|
|
for future in as_completed(future_to_url): |
|
|
try: |
|
|
result = future.result() |
|
|
results.append(result) |
|
|
except Exception as e: |
|
|
url = future_to_url[future] |
|
|
logger.error(f"Download task failed for '{url}': {e}") |
|
|
results.append({ |
|
|
'url': url, |
|
|
'success': False, |
|
|
'error': f"Download task exception: {str(e)}", |
|
|
'file_path': None |
|
|
}) |
|
|
|
|
|
|
|
|
url_order = {urls[i]: i for i in range(len(urls))} |
|
|
results.sort(key=lambda x: url_order.get(x['url'], float('inf'))) |
|
|
|
|
|
|
|
|
successful_downloads = len([r for r in results if r.get('success', False)]) |
|
|
failed_downloads = len(results) - successful_downloads |
|
|
|
|
|
status_msg = f"File download task completed. Processed {len(urls)} URLs with {successful_downloads} successful downloads and {failed_downloads} failures. Files saved to {download_dir.relative_to(self.workspace_path)}. Use file reading tools to examine the downloaded files." |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=status_msg, |
|
|
metadata={ |
|
|
'download_directory': str(download_dir), |
|
|
'total_urls': len(urls), |
|
|
'successful_downloads': successful_downloads, |
|
|
'failed_downloads': failed_downloads |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Download files failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
def list_workspace( |
|
|
self, |
|
|
path: str = None, |
|
|
recursive: bool = False, |
|
|
include_hidden: bool = False, |
|
|
max_depth: int = 3 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
List files and directories in workspace with tree structure visualization |
|
|
|
|
|
Args: |
|
|
path: Specific path to list (relative to session workspace) |
|
|
recursive: Whether to list recursively |
|
|
include_hidden: Whether to include hidden files |
|
|
max_depth: Maximum recursion depth |
|
|
""" |
|
|
try: |
|
|
if path: |
|
|
|
|
|
target_path = self._safe_join(path) |
|
|
else: |
|
|
target_path = self.workspace_path |
|
|
|
|
|
if not target_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"Path does not exist: {target_path}" |
|
|
) |
|
|
|
|
|
if not target_path.is_dir(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"Path is not a directory: {target_path}" |
|
|
) |
|
|
|
|
|
items = [] |
|
|
tree_structure = [] |
|
|
|
|
|
def _list_items(current_path: Path, current_depth: int = 0): |
|
|
if current_depth > max_depth: |
|
|
return |
|
|
|
|
|
try: |
|
|
|
|
|
all_items = list(current_path.iterdir()) |
|
|
if not include_hidden: |
|
|
all_items = [item for item in all_items if not item.name.startswith('.')] |
|
|
|
|
|
|
|
|
all_items.sort(key=lambda x: (not x.is_dir(), x.name.lower())) |
|
|
|
|
|
for item in all_items: |
|
|
item_info = { |
|
|
'name': item.name, |
|
|
'path': str(item.relative_to(self.workspace_path)), |
|
|
'type': 'directory' if item.is_dir() else 'file', |
|
|
'size': item.stat().st_size if item.is_file() else None, |
|
|
'modified': item.stat().st_mtime, |
|
|
'depth': current_depth |
|
|
} |
|
|
|
|
|
items.append(item_info) |
|
|
|
|
|
|
|
|
if recursive and item.is_dir(): |
|
|
_list_items(item, current_depth + 1) |
|
|
|
|
|
except PermissionError: |
|
|
pass |
|
|
|
|
|
def _generate_tree_structure(current_path: Path, prefix: str = "", is_last: bool = True, |
|
|
current_depth: int = 0): |
|
|
"""Generate ASCII tree structure recursively""" |
|
|
if current_depth > max_depth: |
|
|
return |
|
|
|
|
|
try: |
|
|
|
|
|
all_items = list(current_path.iterdir()) |
|
|
if not include_hidden: |
|
|
all_items = [item for item in all_items if not item.name.startswith('.')] |
|
|
|
|
|
|
|
|
all_items.sort(key=lambda x: (not x.is_dir(), x.name.lower())) |
|
|
|
|
|
for i, item in enumerate(all_items): |
|
|
is_last_item = i == len(all_items) - 1 |
|
|
|
|
|
|
|
|
if is_last_item: |
|
|
current_symbol = "└── " |
|
|
extension = " " |
|
|
else: |
|
|
current_symbol = "├── " |
|
|
extension = "│ " |
|
|
|
|
|
|
|
|
if item.is_dir(): |
|
|
name_with_indicator = f"📁 {item.name}/" |
|
|
else: |
|
|
|
|
|
try: |
|
|
size = item.stat().st_size |
|
|
if size < 1024: |
|
|
size_str = f"{size}B" |
|
|
elif size < 1024 * 1024: |
|
|
size_str = f"{size / 1024:.1f}KB" |
|
|
else: |
|
|
size_str = f"{size / (1024 * 1024):.1f}MB" |
|
|
name_with_indicator = f"📄 {item.name} ({size_str})" |
|
|
except: |
|
|
name_with_indicator = f"📄 {item.name}" |
|
|
|
|
|
tree_line = prefix + current_symbol + name_with_indicator |
|
|
tree_structure.append(tree_line) |
|
|
|
|
|
|
|
|
if recursive and item.is_dir(): |
|
|
_generate_tree_structure( |
|
|
item, |
|
|
prefix + extension, |
|
|
is_last_item, |
|
|
current_depth + 1 |
|
|
) |
|
|
|
|
|
except PermissionError: |
|
|
tree_structure.append(prefix + "└── [Permission Denied]") |
|
|
|
|
|
|
|
|
_list_items(target_path) |
|
|
|
|
|
|
|
|
root_name = target_path.name if target_path.name else "workspace" |
|
|
tree_structure.append(f"📁 {root_name}/") |
|
|
|
|
|
if recursive: |
|
|
_generate_tree_structure(target_path) |
|
|
else: |
|
|
|
|
|
try: |
|
|
all_items = list(target_path.iterdir()) |
|
|
if not include_hidden: |
|
|
all_items = [item for item in all_items if not item.name.startswith('.')] |
|
|
|
|
|
all_items.sort(key=lambda x: (not x.is_dir(), x.name.lower())) |
|
|
|
|
|
for i, item in enumerate(all_items): |
|
|
is_last_item = i == len(all_items) - 1 |
|
|
symbol = "└── " if is_last_item else "├── " |
|
|
|
|
|
if item.is_dir(): |
|
|
name_with_indicator = f"📁 {item.name}/" |
|
|
else: |
|
|
try: |
|
|
size = item.stat().st_size |
|
|
if size < 1024: |
|
|
size_str = f"{size}B" |
|
|
elif size < 1024 * 1024: |
|
|
size_str = f"{size / 1024:.1f}KB" |
|
|
else: |
|
|
size_str = f"{size / (1024 * 1024):.1f}MB" |
|
|
name_with_indicator = f"📄 {item.name} ({size_str})" |
|
|
except: |
|
|
name_with_indicator = f"📄 {item.name}" |
|
|
|
|
|
tree_structure.append(symbol + name_with_indicator) |
|
|
|
|
|
except PermissionError: |
|
|
tree_structure.append("└── [Permission Denied]") |
|
|
|
|
|
|
|
|
tree_string = "\n".join(tree_structure) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data={ |
|
|
'items': items, |
|
|
'tree_structure': tree_string, |
|
|
'tree_lines': tree_structure |
|
|
}, |
|
|
metadata={ |
|
|
'target_path': str(target_path.relative_to(self.workspace_path)) if path else '.', |
|
|
'total_items': len(items), |
|
|
'recursive': recursive, |
|
|
'max_depth': max_depth, |
|
|
'include_hidden': include_hidden |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"List workspace failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
def str_replace_based_edit_tool( |
|
|
self, |
|
|
action: str, |
|
|
file_path: str, |
|
|
content: str = None, |
|
|
old_str: str = None, |
|
|
new_str: str = None, |
|
|
line_number: int = None, |
|
|
max_char_len: int = 10000, |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Comprehensive file editing tool |
|
|
|
|
|
Args: |
|
|
action: 'create', 'view', 'str_replace', 'insert', 'append', 'delete' |
|
|
file_path: Path to the file |
|
|
content: Content for create action |
|
|
old_str: String to replace (for str_replace) |
|
|
new_str: Replacement string (for str_replace) |
|
|
line_number: Line number for insert action |
|
|
""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if action == 'create': |
|
|
if full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File already exists: {file_path}" |
|
|
) |
|
|
|
|
|
full_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
full_path.write_text(content or '', encoding='utf-8') |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"File created: {file_path}", |
|
|
metadata={'file_size': full_path.stat().st_size} |
|
|
) |
|
|
|
|
|
elif action == 'view': |
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
content = full_path.read_text(encoding='utf-8') |
|
|
if len(content) > max_char_len: |
|
|
content = ("Due to the content being too long, only the first 10,000 characters are returned. " |
|
|
"It is recommended to use other tools such as `document_qa` to extract the required content from the file. " |
|
|
"Below is the returned portion of the file content: \n\n") + content[:max_char_len] |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=content, |
|
|
metadata={ |
|
|
'file_size': len(content), |
|
|
'line_count': len(content.splitlines()) |
|
|
} |
|
|
) |
|
|
|
|
|
elif action == 'str_replace': |
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
if not old_str or new_str is None: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error="Both old_str and new_str are required for str_replace" |
|
|
) |
|
|
|
|
|
original_content = full_path.read_text(encoding='utf-8') |
|
|
|
|
|
if old_str not in original_content: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"String not found: {old_str[:50]}..." |
|
|
) |
|
|
|
|
|
new_content = original_content.replace(old_str, new_str) |
|
|
full_path.write_text(new_content, encoding='utf-8') |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"Replaced {original_content.count(old_str)} occurrence(s)", |
|
|
metadata={ |
|
|
'old_size': len(original_content), |
|
|
'new_size': len(new_content) |
|
|
} |
|
|
) |
|
|
|
|
|
elif action == 'insert': |
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
if line_number is None or content is None: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error="Both line_number and content are required for insert" |
|
|
) |
|
|
|
|
|
lines = full_path.read_text(encoding='utf-8').splitlines() |
|
|
|
|
|
if line_number < 0 or line_number > len(lines): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"Invalid line number: {line_number}" |
|
|
) |
|
|
|
|
|
lines.insert(line_number, content) |
|
|
full_path.write_text('\n'.join(lines), encoding='utf-8') |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"Inserted content at line {line_number}", |
|
|
metadata={'new_line_count': len(lines)} |
|
|
) |
|
|
|
|
|
elif action == 'append': |
|
|
if not full_path.exists(): |
|
|
full_path.touch() |
|
|
|
|
|
with open(full_path, 'a', encoding='utf-8') as f: |
|
|
f.write(content or '') |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"Appended content to {file_path}", |
|
|
metadata={'file_size': full_path.stat().st_size} |
|
|
) |
|
|
|
|
|
elif action == 'delete': |
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
full_path.unlink() |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"Deleted file: {file_path}" |
|
|
) |
|
|
|
|
|
else: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"Unknown action: {action}" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File edit failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
def file_read(self, file_path: str, encoding: str = 'utf-8', max_char_len: int = 10000) -> MCPToolResult: |
|
|
"""Read file content""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
content = full_path.read_text(encoding=encoding) |
|
|
if len(content) > max_char_len: |
|
|
content = "Due to the content being too long, only the first 10,000 characters are returned. It is recommended to use other tools such as `document_qa` to extract the required content from the file. Below is the returned portion of the file content: \n\n" + content[:max_char_len] |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=content, |
|
|
metadata={ |
|
|
'file_size': len(content), |
|
|
'line_count': len(content.splitlines()), |
|
|
'encoding': encoding |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File read failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
def file_stats(self, file_path: str) -> MCPToolResult: |
|
|
""" |
|
|
Get comprehensive file statistics without reading full content. |
|
|
Perfect for deciding whether to read full file or use targeted extraction. |
|
|
|
|
|
Args: |
|
|
file_path: Path to the file (relative to workspace) |
|
|
""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
if not full_path.is_file(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"Path is not a file: {file_path}" |
|
|
) |
|
|
|
|
|
|
|
|
stat_info = full_path.stat() |
|
|
file_size = stat_info.st_size |
|
|
|
|
|
|
|
|
encoding = 'utf-8' |
|
|
line_count = 0 |
|
|
word_count = 0 |
|
|
char_count = 0 |
|
|
first_lines = [] |
|
|
last_lines = [] |
|
|
|
|
|
try: |
|
|
with open(full_path, 'r', encoding=encoding, errors='ignore') as f: |
|
|
|
|
|
for i, line in enumerate(f): |
|
|
line_count += 1 |
|
|
if i < 5: |
|
|
first_lines.append(line.rstrip()) |
|
|
|
|
|
char_count += len(line) |
|
|
word_count += len(line.split()) |
|
|
|
|
|
|
|
|
if line_count > 10000: |
|
|
|
|
|
remaining_size = file_size - f.tell() |
|
|
if remaining_size > 0: |
|
|
avg_line_size = f.tell() / line_count |
|
|
estimated_remaining_lines = int(remaining_size / avg_line_size) |
|
|
line_count += estimated_remaining_lines |
|
|
|
|
|
|
|
|
avg_chars_per_line = char_count / min(line_count, 10000) |
|
|
avg_words_per_line = word_count / min(line_count, 10000) |
|
|
char_count += int(remaining_size) |
|
|
word_count += int(estimated_remaining_lines * avg_words_per_line) |
|
|
break |
|
|
|
|
|
|
|
|
if file_size < 1024 * 1024: |
|
|
with open(full_path, 'r', encoding=encoding, errors='ignore') as f: |
|
|
lines = f.readlines() |
|
|
last_lines = [line.rstrip() for line in lines[-5:]] |
|
|
if line_count <= 10000: |
|
|
line_count = len(lines) |
|
|
char_count = sum(len(line) for line in lines) |
|
|
word_count = sum(len(line.split()) for line in lines) |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
encoding = 'binary' |
|
|
char_count = file_size |
|
|
|
|
|
|
|
|
file_extension = full_path.suffix.lower() |
|
|
file_type = self._detect_file_type(full_path, file_extension) |
|
|
|
|
|
|
|
|
reading_recommendation = self._get_reading_recommendation( |
|
|
file_size, line_count, word_count, file_type |
|
|
) |
|
|
|
|
|
stats = { |
|
|
'file_path': file_path, |
|
|
'file_size_bytes': file_size, |
|
|
'file_size_human': self._format_file_size(file_size), |
|
|
'line_count': line_count, |
|
|
'word_count': word_count, |
|
|
'character_count': char_count, |
|
|
'encoding': encoding, |
|
|
'file_type': file_type, |
|
|
'file_extension': file_extension, |
|
|
'modified_time': stat_info.st_mtime, |
|
|
'is_large_file': file_size > 1024 * 1024, |
|
|
'is_very_large_file': file_size > 10 * 1024 * 1024, |
|
|
'first_lines_preview': first_lines, |
|
|
'last_lines_preview': last_lines, |
|
|
'reading_recommendation': reading_recommendation |
|
|
} |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=stats, |
|
|
metadata={ |
|
|
'analysis_method': 'efficient_sampling' if line_count > 10000 else 'full_analysis' |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File stats failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
@staticmethod |
|
|
def _detect_file_type(file_path: Path, extension: str) -> str: |
|
|
"""Detect file type based on extension and content""" |
|
|
|
|
|
|
|
|
type_map = { |
|
|
'.py': 'python_code', |
|
|
'.js': 'javascript_code', |
|
|
'.ts': 'typescript_code', |
|
|
'.java': 'java_code', |
|
|
'.cpp': 'cpp_code', |
|
|
'.c': 'c_code', |
|
|
'.html': 'html_markup', |
|
|
'.css': 'css_stylesheet', |
|
|
'.json': 'json_data', |
|
|
'.xml': 'xml_data', |
|
|
'.yaml': 'yaml_config', |
|
|
'.yml': 'yaml_config', |
|
|
'.md': 'markdown_document', |
|
|
'.txt': 'plain_text', |
|
|
'.csv': 'csv_data', |
|
|
'.sql': 'sql_code', |
|
|
'.sh': 'shell_script', |
|
|
'.dockerfile': 'docker_config', |
|
|
'.env': 'environment_config' |
|
|
} |
|
|
|
|
|
if extension in type_map: |
|
|
return type_map[extension] |
|
|
|
|
|
|
|
|
try: |
|
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
|
first_line = f.readline().strip() |
|
|
|
|
|
if first_line.startswith('#!'): |
|
|
return 'executable_script' |
|
|
elif first_line.startswith('<?xml'): |
|
|
return 'xml_data' |
|
|
elif first_line.startswith('{') or first_line.startswith('['): |
|
|
return 'json_data' |
|
|
elif 'DOCTYPE html' in first_line or '<html' in first_line: |
|
|
return 'html_markup' |
|
|
except: |
|
|
pass |
|
|
|
|
|
return 'unknown_text' |
|
|
|
|
|
def _format_file_size(self, size_bytes: int) -> str: |
|
|
"""Format file size in human readable format""" |
|
|
if size_bytes < 1024: |
|
|
return f"{size_bytes} B" |
|
|
elif size_bytes < 1024 * 1024: |
|
|
return f"{size_bytes / 1024:.1f} KB" |
|
|
elif size_bytes < 1024 * 1024 * 1024: |
|
|
return f"{size_bytes / (1024 * 1024):.1f} MB" |
|
|
else: |
|
|
return f"{size_bytes / (1024 * 1024 * 1024):.1f} GB" |
|
|
|
|
|
def _get_reading_recommendation(self, file_size: int, line_count: int, |
|
|
word_count: int, file_type: str) -> Dict[str, Any]: |
|
|
"""Provide intelligent recommendations for how to read the file""" |
|
|
|
|
|
recommendations = { |
|
|
'strategy': 'full_read', |
|
|
'reason': 'File is small enough for full reading', |
|
|
'alternatives': [] |
|
|
} |
|
|
|
|
|
|
|
|
if file_size > 1024 * 1024: |
|
|
recommendations['strategy'] = 'selective_read' |
|
|
recommendations['reason'] = 'File is large, consider targeted approaches' |
|
|
recommendations['alternatives'] = [ |
|
|
'Use file_grep_with_context to search for specific content', |
|
|
'Use content_preview to get overview before full read', |
|
|
'Use file_read_lines to read specific sections', |
|
|
'Content indexing has been disabled' |
|
|
] |
|
|
|
|
|
elif line_count > 1000: |
|
|
recommendations['strategy'] = 'preview_first' |
|
|
recommendations['reason'] = 'Many lines, preview recommended before full read' |
|
|
recommendations['alternatives'] = [ |
|
|
'Use content_preview for quick overview', |
|
|
'Use file_grep_with_context for specific searches' |
|
|
] |
|
|
|
|
|
|
|
|
if file_type in ['json_data', 'xml_data']: |
|
|
recommendations['alternatives'].append('Consider parsing structure instead of full text read') |
|
|
elif file_type.endswith('_code'): |
|
|
recommendations['alternatives'].append('Use grep to find specific functions/classes') |
|
|
elif file_type == 'csv_data': |
|
|
recommendations['alternatives'].append('Consider reading headers first with file_read_lines') |
|
|
|
|
|
return recommendations |
|
|
|
|
|
|
|
|
def file_write( |
|
|
self, |
|
|
file_path: str, |
|
|
content: str, |
|
|
encoding: str = 'utf-8', |
|
|
create_dirs: bool = True |
|
|
) -> MCPToolResult: |
|
|
"""Write content to file""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if create_dirs: |
|
|
full_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
full_path.write_text(content, encoding=encoding) |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=f"Written {len(content)} characters to {file_path}", |
|
|
metadata={ |
|
|
'file_size': full_path.stat().st_size, |
|
|
'encoding': encoding |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File write failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
def file_find_by_name( |
|
|
self, |
|
|
name_pattern: str, |
|
|
recursive: bool = True, |
|
|
case_sensitive: bool = False, |
|
|
max_results: int = 100 |
|
|
) -> MCPToolResult: |
|
|
"""Find files by name pattern""" |
|
|
try: |
|
|
import fnmatch |
|
|
|
|
|
if not case_sensitive: |
|
|
name_pattern = name_pattern.lower() |
|
|
|
|
|
matches = [] |
|
|
search_path = self.workspace_path |
|
|
|
|
|
def _match_name(file_path: Path) -> bool: |
|
|
name = file_path.name |
|
|
if not case_sensitive: |
|
|
name = name.lower() |
|
|
|
|
|
return fnmatch.fnmatch(name, name_pattern) |
|
|
|
|
|
|
|
|
if recursive: |
|
|
for file_path in search_path.rglob("*"): |
|
|
if _match_name(file_path): |
|
|
matches.append({ |
|
|
'name': file_path.name, |
|
|
'path': str(file_path.relative_to(self.workspace_path)), |
|
|
'type': 'directory' if file_path.is_dir() else 'file', |
|
|
'size': file_path.stat().st_size if file_path.is_file() else None |
|
|
}) |
|
|
|
|
|
if len(matches) >= max_results: |
|
|
break |
|
|
else: |
|
|
for file_path in search_path.iterdir(): |
|
|
if _match_name(file_path): |
|
|
matches.append({ |
|
|
'name': file_path.name, |
|
|
'path': str(file_path.relative_to(self.workspace_path)), |
|
|
'type': 'directory' if file_path.is_dir() else 'file', |
|
|
'size': file_path.stat().st_size if file_path.is_file() else None |
|
|
}) |
|
|
|
|
|
if len(matches) >= max_results: |
|
|
break |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data=matches, |
|
|
metadata={ |
|
|
'pattern': name_pattern, |
|
|
'total_matches': len(matches), |
|
|
'truncated': len(matches) >= max_results |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File find failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
def file_read_lines( |
|
|
self, |
|
|
file_path: str, |
|
|
start_line: int = 1, |
|
|
end_line: int = None, |
|
|
max_lines: int = 1000 |
|
|
) -> MCPToolResult: |
|
|
""" |
|
|
Read specific line ranges from a file without loading the entire file. |
|
|
Perfect for reading specific sections after grep or for large files. |
|
|
|
|
|
Args: |
|
|
file_path: Path to the file |
|
|
start_line: Starting line number (1-based) |
|
|
end_line: Ending line number (1-based, None for end of file) |
|
|
max_lines: Maximum number of lines to read (safety limit) |
|
|
""" |
|
|
try: |
|
|
full_path = self._safe_join(file_path) |
|
|
|
|
|
if not full_path.exists(): |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error=f"File does not exist: {file_path}" |
|
|
) |
|
|
|
|
|
if start_line < 1: |
|
|
return MCPToolResult( |
|
|
success=False, |
|
|
error="start_line must be >= 1" |
|
|
) |
|
|
|
|
|
lines_read = [] |
|
|
current_line = 0 |
|
|
|
|
|
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f: |
|
|
for line in f: |
|
|
current_line += 1 |
|
|
|
|
|
|
|
|
if current_line < start_line: |
|
|
continue |
|
|
|
|
|
|
|
|
if end_line and current_line > end_line: |
|
|
break |
|
|
|
|
|
|
|
|
if len(lines_read) >= max_lines: |
|
|
break |
|
|
|
|
|
lines_read.append({ |
|
|
'line_number': current_line, |
|
|
'content': line.rstrip('\n\r') |
|
|
}) |
|
|
|
|
|
|
|
|
actual_end_line = lines_read[-1]['line_number'] if lines_read else start_line - 1 |
|
|
|
|
|
return MCPToolResult( |
|
|
success=True, |
|
|
data={ |
|
|
'file_path': file_path, |
|
|
'start_line': start_line, |
|
|
'end_line': actual_end_line, |
|
|
'lines': lines_read, |
|
|
'line_count': len(lines_read) |
|
|
}, |
|
|
metadata={ |
|
|
'total_lines_read': len(lines_read), |
|
|
'truncated_due_to_max_lines': len(lines_read) >= max_lines |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"File read lines failed: {e}") |
|
|
return MCPToolResult(success=False, error=str(e)) |
|
|
|
|
|
|
|
|
|
|
|
MCP_TOOL_SCHEMAS = { |
|
|
"think": { |
|
|
"name": "think", |
|
|
"description": "Use the tool to think about something. It will not obtain new information or make any changes to the repository, but just log the thought. Use it when complex reasoning or brainstorming is needed.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"thought": { |
|
|
"type": "string", |
|
|
"description": "Your thoughts." |
|
|
} |
|
|
}, |
|
|
"required": ["thought"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"reflect": { |
|
|
"name": "reflect", |
|
|
"description": "When multiple attempts yield no progress, use this tool to reflect on previous reasoning and planning, considering possible overlooked clues and exploring more possibilities. It will not obtain new information or make any changes to the repository.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"reflect": { |
|
|
"type": "string", |
|
|
"description": "The specific content of your reflection" |
|
|
} |
|
|
}, |
|
|
"required": ["reflect"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"batch_web_search": { |
|
|
"name": "batch_web_search", |
|
|
"description": "Search multiple queries using configurable search API with concurrent processing (no more than 8 search queries)", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"queries": { |
|
|
"type": "array", |
|
|
"items": {"type": "string"}, |
|
|
"description": "List of search queries" |
|
|
}, |
|
|
"max_results_per_query": { |
|
|
"type": "integer", |
|
|
"default": 4, |
|
|
"description": "Maximum search results per query (limited to 10)" |
|
|
}, |
|
|
"max_workers": { |
|
|
"type": "integer", |
|
|
"default": 5, |
|
|
"description": "Maximum number of concurrent search requests" |
|
|
} |
|
|
}, |
|
|
"required": ["queries"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"url_crawler": { |
|
|
"name": "url_crawler", |
|
|
"description": "Extract content from web pages using configurable URL crawler API. Input is a list of documents with metadata including URL and local file path for saving extracted content.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"documents": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"url": { |
|
|
"type": "string", |
|
|
"description": "Web page URL to extract content from" |
|
|
}, |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Local path to save extracted full text content" |
|
|
}, |
|
|
"title": { |
|
|
"type": "string", |
|
|
"description": "Title of the web page" |
|
|
}, |
|
|
"time": { |
|
|
"type": "string", |
|
|
"description": "Publication time of the web page" |
|
|
} |
|
|
}, |
|
|
"required": ["url", "file_path"] |
|
|
}, |
|
|
"description": "List of documents with metadata including URL and save path" |
|
|
}, |
|
|
"max_tokens_per_url": { |
|
|
"type": "integer", |
|
|
"default": 4000, |
|
|
"description": "Maximum tokens per URL result" |
|
|
}, |
|
|
"include_metadata": { |
|
|
"type": "boolean", |
|
|
"default": True, |
|
|
"description": "Whether to include extraction metadata" |
|
|
}, |
|
|
"max_workers": { |
|
|
"type": "integer", |
|
|
"default": 10, |
|
|
"description": "Maximum number of concurrent extraction requests" |
|
|
} |
|
|
}, |
|
|
"required": ["documents"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"document_qa": { |
|
|
"name": "document_qa", |
|
|
"description": "Answer questions based on content stored in local files. Each file has a corresponding question. Reads files and uses an AI model to answer each question using the respective file content as context.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"tasks": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file (relative to workspace root)" |
|
|
}, |
|
|
"question": { |
|
|
"type": "string", |
|
|
"description": "Question to ask about this file" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path", "question"] |
|
|
}, |
|
|
"description": "List of tasks, each containing a file path and a question" |
|
|
}, |
|
|
"max_tokens": { |
|
|
"type": "integer", |
|
|
"default": 2000, |
|
|
"description": "Maximum tokens for the AI response" |
|
|
}, |
|
|
"max_workers": { |
|
|
"type": "integer", |
|
|
"default": 5, |
|
|
"description": "Maximum number of concurrent model API requests" |
|
|
} |
|
|
}, |
|
|
"required": ["tasks"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"download_files": { |
|
|
"name": "download_files", |
|
|
"description": "Download files from URLs to the workspace", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"urls": { |
|
|
"type": "array", |
|
|
"items": {"type": "string"}, |
|
|
"description": "List of URLs to download" |
|
|
}, |
|
|
"target_directory": { |
|
|
"type": "string", |
|
|
"description": "Directory to save files" |
|
|
}, |
|
|
"overwrite": { |
|
|
"type": "boolean", |
|
|
"default": False, |
|
|
"description": "Whether to overwrite existing files" |
|
|
}, |
|
|
"max_file_size_mb": { |
|
|
"type": "integer", |
|
|
"default": 100, |
|
|
"description": "Maximum file size in MB" |
|
|
} |
|
|
}, |
|
|
"required": ["urls"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"list_workspace": { |
|
|
"name": "list_workspace", |
|
|
"description": "List files and directories in the workspace", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"path": { |
|
|
"type": "string", |
|
|
"description": "Specify the directory path to list, using a relative path" |
|
|
}, |
|
|
"recursive": { |
|
|
"type": "boolean", |
|
|
"default": False, |
|
|
"description": "Whether to list recursively" |
|
|
}, |
|
|
"include_hidden": { |
|
|
"type": "boolean", |
|
|
"default": False, |
|
|
"description": "Whether to include hidden files" |
|
|
}, |
|
|
"max_depth": { |
|
|
"type": "integer", |
|
|
"default": 3, |
|
|
"description": "Maximum recursion depth" |
|
|
} |
|
|
}, |
|
|
"required": [] |
|
|
} |
|
|
}, |
|
|
|
|
|
"str_replace_based_edit_tool": { |
|
|
"name": "str_replace_based_edit_tool", |
|
|
"description": "Create, view, and edit files with various operations", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"action": { |
|
|
"type": "string", |
|
|
"enum": ["create", "view", "str_replace", "insert", "append", "delete"], |
|
|
"description": "Action to perform" |
|
|
}, |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Path to the file" |
|
|
}, |
|
|
"content": { |
|
|
"type": "string", |
|
|
"description": "Content for create/insert/append actions" |
|
|
}, |
|
|
"old_str": { |
|
|
"type": "string", |
|
|
"description": "String to replace (for str_replace)" |
|
|
}, |
|
|
"new_str": { |
|
|
"type": "string", |
|
|
"description": "Replacement string (for str_replace)" |
|
|
}, |
|
|
"line_number": { |
|
|
"type": "integer", |
|
|
"description": "Line number for insert action" |
|
|
} |
|
|
}, |
|
|
"required": ["action", "file_path"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"file_read": { |
|
|
"name": "file_read", |
|
|
"description": "Read file content", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file (relative to workspace root)" |
|
|
}, |
|
|
"encoding": { |
|
|
"type": "string", |
|
|
"default": "utf-8", |
|
|
"description": "File encoding" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"load_json": { |
|
|
"name": "load_json", |
|
|
"description": "Read json format file", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file (relative to workspace root)" |
|
|
}, |
|
|
"encoding": { |
|
|
"type": "string", |
|
|
"default": "utf-8", |
|
|
"description": "File encoding" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"file_write": { |
|
|
"name": "file_write", |
|
|
"description": "Write content to file", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file (relative to workspace root)" |
|
|
}, |
|
|
"content": { |
|
|
"type": "string", |
|
|
"description": "Content to write" |
|
|
}, |
|
|
"encoding": { |
|
|
"type": "string", |
|
|
"default": "utf-8", |
|
|
"description": "File encoding" |
|
|
}, |
|
|
"create_dirs": { |
|
|
"type": "boolean", |
|
|
"default": True, |
|
|
"description": "Create parent directories" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path", "content"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"file_find_by_name": { |
|
|
"name": "file_find_by_name", |
|
|
"description": "Find files by name pattern", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"name_pattern": { |
|
|
"type": "string", |
|
|
"description": "Name pattern to search for" |
|
|
}, |
|
|
"recursive": { |
|
|
"type": "boolean", |
|
|
"default": True, |
|
|
"description": "Search recursively" |
|
|
}, |
|
|
"case_sensitive": { |
|
|
"type": "boolean", |
|
|
"default": False, |
|
|
"description": "Case sensitive search" |
|
|
}, |
|
|
"max_results": { |
|
|
"type": "integer", |
|
|
"default": 100, |
|
|
"description": "Maximum number of results" |
|
|
} |
|
|
}, |
|
|
"required": ["name_pattern"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"concat_section_files": { |
|
|
"name": "concat_section_files", |
|
|
"description": "Concatenate the content of the saved section files into a single file", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"final_file_path": { |
|
|
"type": "string", |
|
|
"description": "The final file path to save the concatenated content, save the file in the workspace **under the relative path `./report/`**, and specify the final_file_path as `./report/final_report.md`" |
|
|
}, |
|
|
"section_files": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the saved section file" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
}, |
|
|
"description": "List of section files to concatenate" |
|
|
} |
|
|
}, |
|
|
"required": ["section_files", "final_file_path"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"search_result_classifier": { |
|
|
"name": "search_result_classifier", |
|
|
"description": "Intelligently classify and organize search result files according to a structured outline for comprehensive long-form content generation. Analyzes files across fouer key dimensions (document time, source authority, core content, and task relevance) and assigns relevant files to appropriate outline sections. Files may be assigned to multiple sections when their content spans different topics.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"outline": { |
|
|
"type": "string", |
|
|
"description": "The outline here must be consistent with the content and structure of the outline generated above" |
|
|
}, |
|
|
"key_files": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file containing research content" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
}, |
|
|
"description": "List of research files to be classified according to the outline" |
|
|
}, |
|
|
"model": { |
|
|
"type": "string", |
|
|
"default": "pangu_auto", |
|
|
"description": "AI model to use for classification and organization" |
|
|
}, |
|
|
"temperature": { |
|
|
"type": "number", |
|
|
"default": 0.3, |
|
|
"description": "Creativity level for the AI classification (0-1)" |
|
|
}, |
|
|
"max_tokens": { |
|
|
"type": "integer", |
|
|
"default": 2000, |
|
|
"description": "Maximum tokens for the AI response" |
|
|
} |
|
|
}, |
|
|
"required": ["key_files", "outline"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"document_extract": { |
|
|
"name": "document_extract", |
|
|
"description": "Multi-dimensional analysis of locally stored files using AI models. Evaluates each file across four key dimensions: web page time extraction, source authority assessment, task relevance evaluation, and core content summarization (~300 words). Provides structured document analysis for research and content evaluation purposes.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"tasks": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file (relative to workspace root)" |
|
|
}, |
|
|
"task": { |
|
|
"type": "string", |
|
|
"description": "The content of the currently executed subtask" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path", "task"] |
|
|
}, |
|
|
"description": "List of tasks, each containing a file path and the current task" |
|
|
}, |
|
|
"model": { |
|
|
"type": "string", |
|
|
"default": "pangu_auto", |
|
|
"description": "AI model to use for generating answers" |
|
|
}, |
|
|
"temperature": { |
|
|
"type": "number", |
|
|
"default": 0.3, |
|
|
"description": "Creativity level for the AI response (0-1)" |
|
|
}, |
|
|
"max_tokens": { |
|
|
"type": "integer", |
|
|
"default": 2000, |
|
|
"description": "Maximum tokens for the AI response" |
|
|
}, |
|
|
"max_workers": { |
|
|
"type": "integer", |
|
|
"default": 5, |
|
|
"description": "Maximum number of concurrent model API requests" |
|
|
} |
|
|
}, |
|
|
"required": ["tasks"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"section_writer": { |
|
|
"name": "section_writer", |
|
|
"description": "Write the current chapter content based on given web information and chapter structure; also consider user questions, completed chapters, and overall outline to ensure content relevance while avoiding duplication or contradictions.", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"written_chapters_summary": { |
|
|
"type": "string", |
|
|
"description": "The summary of the written chapters, including the content of the chapters and the reflections on the chapters. Note that this field should be concatenated with the summaries of all previously written chapters with '\\n', and do not modify the original summary. For example, if the current chapter is the third chapter, the value of this field is 'chapter 1 summary \\n chapter 2 summary'. If not, the value is set to 'No previous chapters written yet.'" |
|
|
}, |
|
|
"task_content": { |
|
|
"type": "string", |
|
|
"description": "Detailed description of some requirements for writing the current chapter and avoidance prompts. If there are reflections from the `think` tool on previously written chapters, they can be added to this field." |
|
|
}, |
|
|
"user_query": { |
|
|
"type": "string", |
|
|
"description": "The user query, ensure the drafted content is highly relevant to the user's inquiry." |
|
|
}, |
|
|
"current_chapter_outline": { |
|
|
"type": "string", |
|
|
"description": "This field represents the current chapter structure to be drafted. When composing the chapter content, do not modify content and bold formatting symbols of the existing structure's titles!!!" |
|
|
}, |
|
|
"overall_outline": { |
|
|
"type": "string", |
|
|
"description": "This field represents the overall outline of the article. When drafting the chapter content, you should consider the overall outline to ensure the chapter content is consistent with the overall outline." |
|
|
}, |
|
|
"target_file_path": { |
|
|
"type": "string", |
|
|
"description": "The path to save the chapter content" |
|
|
}, |
|
|
"key_files": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Relative path to the file containing research content" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
}, |
|
|
"description": "These files are the source materials required for drafting the current chapter." |
|
|
}, |
|
|
"model": { |
|
|
"type": "string", |
|
|
"default": "pangu_auto", |
|
|
"description": "AI model to use for classification and organization" |
|
|
}, |
|
|
"temperature": { |
|
|
"type": "number", |
|
|
"default": 0.3, |
|
|
"description": "Creativity level for the AI classification (0-1)" |
|
|
}, |
|
|
"max_tokens": { |
|
|
"type": "integer", |
|
|
"default": 5000, |
|
|
"description": "Maximum tokens for the AI response" |
|
|
}, |
|
|
}, |
|
|
"required": ["user_query", "current_chapter_outline", "overall_outline", "target_file_path", "key_files"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"file_stats": { |
|
|
"name": "file_stats", |
|
|
"description": "Get comprehensive file statistics without reading full content - perfect for deciding reading strategy", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Path to the file (relative to workspace)" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
} |
|
|
}, |
|
|
|
|
|
"file_read_lines": { |
|
|
"name": "file_read_lines", |
|
|
"description": "Read specific line ranges from a file without loading entire file - perfect for large files", |
|
|
"inputSchema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"file_path": { |
|
|
"type": "string", |
|
|
"description": "Path to the file" |
|
|
}, |
|
|
"start_line": { |
|
|
"type": "integer", |
|
|
"default": 1, |
|
|
"description": "Starting line number (1-based)" |
|
|
}, |
|
|
"end_line": { |
|
|
"type": "integer", |
|
|
"description": "Ending line number (1-based, None for end of file)" |
|
|
}, |
|
|
"max_lines": { |
|
|
"type": "integer", |
|
|
"default": 1000, |
|
|
"description": "Maximum number of lines to read (safety limit)" |
|
|
} |
|
|
}, |
|
|
"required": ["file_path"] |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_tool_schemas() -> Dict[str, Any]: |
|
|
"""Get all tool schemas for MCP registration""" |
|
|
return MCP_TOOL_SCHEMAS |
|
|
|