| |
| """ |
| Smart PDF Downloader MCP Tool |
| |
| A standardized MCP tool using FastMCP for intelligent file downloading and document conversion. |
| Supports natural language instructions for downloading files from URLs, moving local files, |
| and automatic conversion to Markdown format with image extraction. |
| |
| Features: |
| - Natural language instruction parsing |
| - URL and local path extraction |
| - Automatic document conversion (PDF, DOCX, PPTX, HTML, etc.) |
| - Image extraction and preservation |
| - Multi-format support with fallback options |
| """ |
|
|
| import os |
| import re |
| import aiohttp |
| import aiofiles |
| import shutil |
| import sys |
| import io |
| from typing import List, Dict, Optional, Any |
| from urllib.parse import urlparse, unquote |
| from datetime import datetime |
|
|
| from mcp.server import FastMCP |
|
|
| |
| try: |
| from docling.document_converter import DocumentConverter |
| from docling.datamodel.base_models import InputFormat |
| from docling.datamodel.pipeline_options import PdfPipelineOptions |
| from docling.document_converter import PdfFormatOption |
|
|
| DOCLING_AVAILABLE = True |
| except ImportError: |
| DOCLING_AVAILABLE = False |
| print( |
| "Warning: docling package not available. Document conversion will be disabled." |
| ) |
|
|
| |
| try: |
| import PyPDF2 |
|
|
| PYPDF2_AVAILABLE = True |
| except ImportError: |
| PYPDF2_AVAILABLE = False |
| print( |
| "Warning: PyPDF2 package not available. Fallback PDF extraction will be disabled." |
| ) |
|
|
| |
| if sys.stdout.encoding != "utf-8": |
| try: |
| if hasattr(sys.stdout, "reconfigure"): |
| sys.stdout.reconfigure(encoding="utf-8") |
| sys.stderr.reconfigure(encoding="utf-8") |
| else: |
| sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding="utf-8") |
| sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding="utf-8") |
| except Exception as e: |
| print(f"Warning: Could not set UTF-8 encoding: {e}") |
|
|
| |
| mcp = FastMCP("smart-pdf-downloader") |
|
|
|
|
| |
| def format_success_message(action: str, details: Dict[str, Any]) -> str: |
| """格式化成功消息""" |
| return f"✅ {action}\n" + "\n".join(f" {k}: {v}" for k, v in details.items()) |
|
|
|
|
| def format_error_message(action: str, error: str) -> str: |
| """格式化错误消息""" |
| return f"❌ {action}\n Error: {error}" |
|
|
|
|
| def format_warning_message(action: str, warning: str) -> str: |
| """格式化警告消息""" |
| return f"⚠️ {action}\n Warning: {warning}" |
|
|
|
|
| async def perform_document_conversion( |
| file_path: str, extract_images: bool = True |
| ) -> Optional[str]: |
| """ |
| 执行文档转换的共用逻辑 |
| |
| Args: |
| file_path: 文件路径 |
| extract_images: 是否提取图片 |
| |
| Returns: |
| 转换信息字符串,如果没有转换则返回None |
| """ |
| if not file_path: |
| return None |
|
|
| conversion_msg = "" |
|
|
| |
| |
| is_pdf_file = False |
| if PYPDF2_AVAILABLE: |
| try: |
| with open(file_path, "rb") as f: |
| header = f.read(8) |
| is_pdf_file = header.startswith(b"%PDF") |
| except Exception: |
| is_pdf_file = file_path.lower().endswith(".pdf") |
|
|
| if is_pdf_file and PYPDF2_AVAILABLE: |
| try: |
| simple_converter = SimplePdfConverter() |
| conversion_result = simple_converter.convert_pdf_to_markdown(file_path) |
| if conversion_result["success"]: |
| conversion_msg = "\n [INFO] PDF converted to Markdown (PyPDF2)" |
| conversion_msg += ( |
| f"\n Markdown file: {conversion_result['output_file']}" |
| ) |
| conversion_msg += ( |
| f"\n Conversion time: {conversion_result['duration']:.2f} seconds" |
| ) |
| conversion_msg += ( |
| f"\n Pages extracted: {conversion_result['pages_extracted']}" |
| ) |
|
|
| else: |
| conversion_msg = f"\n [WARNING] PDF conversion failed: {conversion_result['error']}" |
| except Exception as conv_error: |
| conversion_msg = f"\n [WARNING] PDF conversion error: {str(conv_error)}" |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| return conversion_msg if conversion_msg else None |
|
|
|
|
| def format_file_operation_result( |
| operation: str, |
| source: str, |
| destination: str, |
| result: Dict[str, Any], |
| conversion_msg: Optional[str] = None, |
| ) -> str: |
| """ |
| 格式化文件操作结果的共用逻辑 |
| |
| Args: |
| operation: 操作类型 ("download" 或 "move") |
| source: 源文件/URL |
| destination: 目标路径 |
| result: 操作结果字典 |
| conversion_msg: 转换消息 |
| |
| Returns: |
| 格式化的结果消息 |
| """ |
| if result["success"]: |
| size_mb = result["size"] / (1024 * 1024) |
| msg = f"[SUCCESS] Successfully {operation}d: {source}\n" |
|
|
| if operation == "download": |
| msg += f" File: {destination}\n" |
| msg += f" Size: {size_mb:.2f} MB\n" |
| msg += f" Time: {result['duration']:.2f} seconds\n" |
| speed_mb = result.get("speed", 0) / (1024 * 1024) |
| msg += f" Speed: {speed_mb:.2f} MB/s" |
| else: |
| msg += f" To: {destination}\n" |
| msg += f" Size: {size_mb:.2f} MB\n" |
| msg += f" Time: {result['duration']:.2f} seconds" |
|
|
| if conversion_msg: |
| msg += conversion_msg |
|
|
| return msg |
| else: |
| return f"[ERROR] Failed to {operation}: {source}\n Error: {result.get('error', 'Unknown error')}" |
|
|
|
|
| class LocalPathExtractor: |
| """本地路径提取器""" |
|
|
| @staticmethod |
| def is_local_path(path: str) -> bool: |
| """判断是否为本地路径""" |
| path = path.strip("\"'") |
|
|
| |
| if re.match(r"^https?://", path, re.IGNORECASE) or re.match( |
| r"^ftp://", path, re.IGNORECASE |
| ): |
| return False |
|
|
| |
| path_indicators = [os.path.sep, "/", "\\", "~", ".", ".."] |
| has_extension = bool(os.path.splitext(path)[1]) |
|
|
| if any(indicator in path for indicator in path_indicators) or has_extension: |
| expanded_path = os.path.expanduser(path) |
| return os.path.exists(expanded_path) or any( |
| indicator in path for indicator in path_indicators |
| ) |
|
|
| return False |
|
|
| @staticmethod |
| def extract_local_paths(text: str) -> List[str]: |
| """从文本中提取本地文件路径""" |
| patterns = [ |
| r'"([^"]+)"', |
| r"'([^']+)'", |
| r"(?:^|\s)((?:[~./\\]|[A-Za-z]:)?(?:[^/\\\s]+[/\\])*[^/\\\s]+\.[A-Za-z0-9]+)(?:\s|$)", |
| r"(?:^|\s)((?:~|\.{1,2})?/[^\s]+)(?:\s|$)", |
| r"(?:^|\s)([A-Za-z]:[/\\][^\s]+)(?:\s|$)", |
| r"(?:^|\s)(\.{1,2}[/\\][^\s]+)(?:\s|$)", |
| ] |
|
|
| local_paths = [] |
| potential_paths = [] |
|
|
| for pattern in patterns: |
| matches = re.findall(pattern, text, re.MULTILINE) |
| potential_paths.extend(matches) |
|
|
| for path in potential_paths: |
| path = path.strip() |
| if path and LocalPathExtractor.is_local_path(path): |
| expanded_path = os.path.expanduser(path) |
| if expanded_path not in local_paths: |
| local_paths.append(expanded_path) |
|
|
| return local_paths |
|
|
|
|
| class URLExtractor: |
| """URL提取器""" |
|
|
| URL_PATTERNS = [ |
| r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+(?:/(?:[-\w._~!$&\'()*+,;=:@]|%[\da-fA-F]{2})*)*(?:\?(?:[-\w._~!$&\'()*+,;=:@/?]|%[\da-fA-F]{2})*)?(?:#(?:[-\w._~!$&\'()*+,;=:@/?]|%[\da-fA-F]{2})*)?", |
| r"ftp://(?:[-\w.]|(?:%[\da-fA-F]{2}))+(?:/(?:[-\w._~!$&\'()*+,;=:@]|%[\da-fA-F]{2})*)*", |
| r"(?<!\S)(?:www\.)?[-\w]+(?:\.[-\w]+)+/(?:[-\w._~!$&\'()*+,;=:@/]|%[\da-fA-F]{2})+", |
| ] |
|
|
| @staticmethod |
| def convert_arxiv_url(url: str) -> str: |
| """将arXiv网页链接转换为PDF下载链接""" |
| |
| arxiv_pattern = r"arxiv\.org/abs/(\d+\.\d+)(?:v\d+)?" |
| match = re.search(arxiv_pattern, url, re.IGNORECASE) |
| if match: |
| paper_id = match.group(1) |
| return f"https://arxiv.org/pdf/{paper_id}.pdf" |
| return url |
|
|
| @classmethod |
| def extract_urls(cls, text: str) -> List[str]: |
| """从文本中提取URL""" |
| urls = [] |
|
|
| |
| at_url_pattern = r"@(https?://[^\s]+)" |
| at_matches = re.findall(at_url_pattern, text, re.IGNORECASE) |
| for match in at_matches: |
| |
| url = cls.convert_arxiv_url(match.rstrip("/")) |
| urls.append(url) |
|
|
| |
| for pattern in cls.URL_PATTERNS: |
| matches = re.findall(pattern, text, re.IGNORECASE) |
| for match in matches: |
| |
| if not match.startswith(("http://", "https://", "ftp://")): |
| |
| if match.startswith("www."): |
| match = "https://" + match |
| else: |
| |
| match = "https://" + match |
|
|
| |
| url = cls.convert_arxiv_url(match.rstrip("/")) |
| urls.append(url) |
|
|
| |
| seen = set() |
| unique_urls = [] |
| for url in urls: |
| if url not in seen: |
| seen.add(url) |
| unique_urls.append(url) |
|
|
| return unique_urls |
|
|
| @staticmethod |
| def infer_filename_from_url(url: str) -> str: |
| """从URL推断文件名""" |
| parsed = urlparse(url) |
| path = unquote(parsed.path) |
|
|
| |
| filename = os.path.basename(path) |
|
|
| |
| if "arxiv.org" in parsed.netloc and "/pdf/" in path: |
| if filename: |
| |
| if not filename.lower().endswith((".pdf", ".doc", ".docx", ".txt")): |
| filename = f"{filename}.pdf" |
| else: |
| path_parts = [p for p in path.split("/") if p] |
| if path_parts and path_parts[-1]: |
| filename = f"{path_parts[-1]}.pdf" |
| else: |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
| filename = f"arxiv_paper_{timestamp}.pdf" |
|
|
| |
| elif not filename or "." not in filename: |
| |
| domain = parsed.netloc.replace("www.", "").replace(".", "_") |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") |
|
|
| |
| if not path or path == "/": |
| filename = f"{domain}_{timestamp}.html" |
| else: |
| |
| path_parts = [p for p in path.split("/") if p] |
| if path_parts: |
| filename = f"{path_parts[-1]}_{timestamp}" |
| else: |
| filename = f"{domain}_{timestamp}" |
|
|
| |
| if "." not in filename: |
| |
| if "/pdf/" in path.lower() or path.lower().endswith("pdf"): |
| filename += ".pdf" |
| elif any( |
| ext in path.lower() for ext in ["/doc/", "/word/", ".docx"] |
| ): |
| filename += ".docx" |
| elif any( |
| ext in path.lower() |
| for ext in ["/ppt/", "/powerpoint/", ".pptx"] |
| ): |
| filename += ".pptx" |
| elif any(ext in path.lower() for ext in ["/csv/", ".csv"]): |
| filename += ".csv" |
| elif any(ext in path.lower() for ext in ["/zip/", ".zip"]): |
| filename += ".zip" |
| else: |
| filename += ".html" |
|
|
| return filename |
|
|
|
|
| class PathExtractor: |
| """路径提取器""" |
|
|
| @staticmethod |
| def extract_target_path(text: str) -> Optional[str]: |
| """从文本中提取目标路径""" |
| patterns = [ |
| r'(?:save|download|store|put|place|write|copy|move)\s+(?:to|into|in|at)\s+["\']?([^\s"\']+)["\']?', |
| r'(?:to|into|in|at)\s+(?:folder|directory|dir|path|location)\s*["\']?([^\s"\']+)["\']?', |
| r'(?:destination|target|output)\s*(?:is|:)?\s*["\']?([^\s"\']+)["\']?', |
| r'(?:保存|下载|存储|放到|写入|复制|移动)(?:到|至|去)\s*["\']?([^\s"\']+)["\']?', |
| r'(?:到|在|至)\s*["\']?([^\s"\']+)["\']?\s*(?:文件夹|目录|路径|位置)', |
| ] |
|
|
| filter_words = { |
| "here", |
| "there", |
| "current", |
| "local", |
| "this", |
| "that", |
| "这里", |
| "那里", |
| "当前", |
| "本地", |
| "这个", |
| "那个", |
| } |
|
|
| for pattern in patterns: |
| match = re.search(pattern, text, re.IGNORECASE) |
| if match: |
| path = match.group(1).strip("。,,.、") |
| if path and path.lower() not in filter_words: |
| return path |
|
|
| return None |
|
|
|
|
| class SimplePdfConverter: |
| """简单的PDF转换器,使用PyPDF2提取文本""" |
|
|
| def convert_pdf_to_markdown( |
| self, input_file: str, output_file: Optional[str] = None |
| ) -> Dict[str, Any]: |
| """ |
| 使用PyPDF2将PDF转换为Markdown格式 |
| |
| Args: |
| input_file: 输入PDF文件路径 |
| output_file: 输出Markdown文件路径(可选) |
| |
| Returns: |
| 转换结果字典 |
| """ |
| if not PYPDF2_AVAILABLE: |
| return {"success": False, "error": "PyPDF2 package is not available"} |
|
|
| try: |
| |
| if not os.path.exists(input_file): |
| return { |
| "success": False, |
| "error": f"Input file not found: {input_file}", |
| } |
|
|
| |
| if not output_file: |
| base_name = os.path.splitext(input_file)[0] |
| output_file = f"{base_name}.md" |
|
|
| |
| output_dir = os.path.dirname(output_file) |
| if output_dir: |
| os.makedirs(output_dir, exist_ok=True) |
|
|
| |
| start_time = datetime.now() |
|
|
| |
| with open(input_file, "rb") as file: |
| pdf_reader = PyPDF2.PdfReader(file) |
| text_content = [] |
|
|
| |
| for page_num, page in enumerate(pdf_reader.pages, 1): |
| text = page.extract_text() |
| if text.strip(): |
| text_content.append(f"## Page {page_num}\n\n{text.strip()}\n\n") |
|
|
| |
| markdown_content = f"# Extracted from {os.path.basename(input_file)}\n\n" |
| markdown_content += f"*Total pages: {len(pdf_reader.pages)}*\n\n" |
| markdown_content += "---\n\n" |
| markdown_content += "".join(text_content) |
|
|
| |
| with open(output_file, "w", encoding="utf-8") as f: |
| f.write(markdown_content) |
|
|
| |
| duration = (datetime.now() - start_time).total_seconds() |
|
|
| |
| input_size = os.path.getsize(input_file) |
| output_size = os.path.getsize(output_file) |
|
|
| return { |
| "success": True, |
| "input_file": input_file, |
| "output_file": output_file, |
| "input_size": input_size, |
| "output_size": output_size, |
| "duration": duration, |
| "markdown_content": markdown_content, |
| "pages_extracted": len(pdf_reader.pages), |
| } |
|
|
| except Exception as e: |
| return { |
| "success": False, |
| "input_file": input_file, |
| "error": f"Conversion failed: {str(e)}", |
| } |
|
|
|
|
| class DoclingConverter: |
| """文档转换器,使用docling将文档转换为Markdown格式,支持图片提取""" |
|
|
| def __init__(self): |
| if not DOCLING_AVAILABLE: |
| raise ImportError( |
| "docling package is not available. Please install it first." |
| ) |
|
|
| |
| pdf_pipeline_options = PdfPipelineOptions() |
| pdf_pipeline_options.do_ocr = False |
| pdf_pipeline_options.do_table_structure = False |
|
|
| |
| try: |
| self.converter = DocumentConverter( |
| format_options={ |
| InputFormat.PDF: PdfFormatOption( |
| pipeline_options=pdf_pipeline_options |
| ) |
| } |
| ) |
| except Exception: |
| |
| self.converter = DocumentConverter() |
|
|
| def is_supported_format(self, file_path: str) -> bool: |
| """检查文件格式是否支持转换""" |
| if not DOCLING_AVAILABLE: |
| return False |
|
|
| supported_extensions = {".pdf", ".docx", ".pptx", ".html", ".md", ".txt"} |
| file_extension = os.path.splitext(file_path)[1].lower() |
| return file_extension in supported_extensions |
|
|
| def is_url(self, path: str) -> bool: |
| """检查路径是否为URL""" |
| try: |
| result = urlparse(path) |
| return result.scheme in ("http", "https") |
| except Exception: |
| return False |
|
|
| def extract_images(self, doc, output_dir: str) -> Dict[str, str]: |
| """ |
| 提取文档中的图片并保存到本地 |
| |
| Args: |
| doc: docling文档对象 |
| output_dir: 输出目录 |
| |
| Returns: |
| 图片ID到本地文件路径的映射 |
| """ |
| images_dir = os.path.join(output_dir, "images") |
| os.makedirs(images_dir, exist_ok=True) |
| image_map = {} |
|
|
| try: |
| |
| images = getattr(doc, "images", []) |
|
|
| for idx, img in enumerate(images): |
| try: |
| |
| ext = getattr(img, "format", None) or "png" |
| if ext.lower() not in ["png", "jpg", "jpeg", "gif", "bmp", "webp"]: |
| ext = "png" |
|
|
| |
| filename = f"image_{idx+1}.{ext}" |
| filepath = os.path.join(images_dir, filename) |
|
|
| |
| img_data = getattr(img, "data", None) |
| if img_data: |
| with open(filepath, "wb") as f: |
| f.write(img_data) |
|
|
| |
| rel_path = os.path.relpath(filepath, output_dir) |
| img_id = getattr(img, "id", str(idx + 1)) |
| image_map[img_id] = rel_path |
|
|
| except Exception as img_error: |
| print(f"Warning: Failed to extract image {idx+1}: {img_error}") |
| continue |
|
|
| except Exception as e: |
| print(f"Warning: Failed to extract images: {e}") |
|
|
| return image_map |
|
|
| def process_markdown_with_images( |
| self, markdown_content: str, image_map: Dict[str, str] |
| ) -> str: |
| """ |
| 处理Markdown内容,替换图片占位符为实际的图片路径 |
| |
| Args: |
| markdown_content: 原始Markdown内容 |
| image_map: 图片ID到本地路径的映射 |
| |
| Returns: |
| 处理后的Markdown内容 |
| """ |
|
|
| def replace_img(match): |
| img_id = match.group(1) |
| if img_id in image_map: |
| return f"" |
| else: |
| return match.group(0) |
|
|
| |
| processed_content = re.sub( |
| r"!\[Image\]\(docling://image/([^)]+)\)", replace_img, markdown_content |
| ) |
|
|
| return processed_content |
|
|
| def convert_to_markdown( |
| self, |
| input_file: str, |
| output_file: Optional[str] = None, |
| extract_images: bool = True, |
| ) -> Dict[str, Any]: |
| """ |
| 将文档转换为Markdown格式,支持图片提取 |
| |
| Args: |
| input_file: 输入文件路径或URL |
| output_file: 输出Markdown文件路径(可选) |
| extract_images: 是否提取图片(默认True) |
| |
| Returns: |
| 转换结果字典 |
| """ |
| if not DOCLING_AVAILABLE: |
| return {"success": False, "error": "docling package is not available"} |
|
|
| try: |
| |
| if not self.is_url(input_file): |
| if not os.path.exists(input_file): |
| return { |
| "success": False, |
| "error": f"Input file not found: {input_file}", |
| } |
|
|
| |
| if not self.is_supported_format(input_file): |
| return { |
| "success": False, |
| "error": f"Unsupported file format: {os.path.splitext(input_file)[1]}", |
| } |
| else: |
| |
| if not input_file.lower().endswith( |
| (".pdf", ".docx", ".pptx", ".html", ".md", ".txt") |
| ): |
| return { |
| "success": False, |
| "error": f"Unsupported URL format: {input_file}", |
| } |
|
|
| |
| if not output_file: |
| if self.is_url(input_file): |
| |
| filename = URLExtractor.infer_filename_from_url(input_file) |
| base_name = os.path.splitext(filename)[0] |
| else: |
| base_name = os.path.splitext(input_file)[0] |
| output_file = f"{base_name}.md" |
|
|
| |
| output_dir = os.path.dirname(output_file) or "." |
| os.makedirs(output_dir, exist_ok=True) |
|
|
| |
| start_time = datetime.now() |
| result = self.converter.convert(input_file) |
| doc = result.document |
|
|
| |
| image_map = {} |
| images_extracted = 0 |
| if extract_images: |
| image_map = self.extract_images(doc, output_dir) |
| images_extracted = len(image_map) |
|
|
| |
| markdown_content = doc.export_to_markdown() |
|
|
| |
| if extract_images and image_map: |
| markdown_content = self.process_markdown_with_images( |
| markdown_content, image_map |
| ) |
|
|
| |
| with open(output_file, "w", encoding="utf-8") as f: |
| f.write(markdown_content) |
|
|
| |
| duration = (datetime.now() - start_time).total_seconds() |
|
|
| |
| if self.is_url(input_file): |
| input_size = 0 |
| else: |
| input_size = os.path.getsize(input_file) |
| output_size = os.path.getsize(output_file) |
|
|
| return { |
| "success": True, |
| "input_file": input_file, |
| "output_file": output_file, |
| "input_size": input_size, |
| "output_size": output_size, |
| "duration": duration, |
| "markdown_content": markdown_content, |
| "images_extracted": images_extracted, |
| "image_map": image_map, |
| } |
|
|
| except Exception as e: |
| return { |
| "success": False, |
| "input_file": input_file, |
| "error": f"Conversion failed: {str(e)}", |
| } |
|
|
|
|
| async def check_url_accessible(url: str) -> Dict[str, Any]: |
| """检查URL是否可访问""" |
| try: |
| timeout = aiohttp.ClientTimeout(total=10) |
| async with aiohttp.ClientSession(timeout=timeout) as session: |
| async with session.head(url, allow_redirects=True) as response: |
| return { |
| "accessible": response.status < 400, |
| "status": response.status, |
| "content_type": response.headers.get("Content-Type", ""), |
| "content_length": response.headers.get("Content-Length", 0), |
| } |
| except Exception: |
| return { |
| "accessible": False, |
| "status": 0, |
| "content_type": "", |
| "content_length": 0, |
| } |
|
|
|
|
| async def download_file(url: str, destination: str) -> Dict[str, Any]: |
| """下载单个文件""" |
| start_time = datetime.now() |
| chunk_size = 8192 |
|
|
| try: |
| timeout = aiohttp.ClientTimeout(total=300) |
| async with aiohttp.ClientSession(timeout=timeout) as session: |
| async with session.get(url) as response: |
| |
| response.raise_for_status() |
|
|
| |
| content_type = response.headers.get( |
| "Content-Type", "application/octet-stream" |
| ) |
|
|
| |
| parent_dir = os.path.dirname(destination) |
| if parent_dir: |
| os.makedirs(parent_dir, exist_ok=True) |
|
|
| |
| downloaded = 0 |
| async with aiofiles.open(destination, "wb") as file: |
| async for chunk in response.content.iter_chunked(chunk_size): |
| await file.write(chunk) |
| downloaded += len(chunk) |
|
|
| |
| duration = (datetime.now() - start_time).total_seconds() |
|
|
| return { |
| "success": True, |
| "url": url, |
| "destination": destination, |
| "size": downloaded, |
| "content_type": content_type, |
| "duration": duration, |
| "speed": downloaded / duration if duration > 0 else 0, |
| } |
|
|
| except aiohttp.ClientError as e: |
| return { |
| "success": False, |
| "url": url, |
| "destination": destination, |
| "error": f"Network error: {str(e)}", |
| } |
| except Exception as e: |
| return { |
| "success": False, |
| "url": url, |
| "destination": destination, |
| "error": f"Download error: {str(e)}", |
| } |
|
|
|
|
| async def move_local_file(source_path: str, destination: str) -> Dict[str, Any]: |
| """移动本地文件到目标位置""" |
| start_time = datetime.now() |
|
|
| try: |
| |
| if not os.path.exists(source_path): |
| return { |
| "success": False, |
| "source": source_path, |
| "destination": destination, |
| "error": f"Source file not found: {source_path}", |
| } |
|
|
| |
| source_size = os.path.getsize(source_path) |
|
|
| |
| parent_dir = os.path.dirname(destination) |
| if parent_dir: |
| os.makedirs(parent_dir, exist_ok=True) |
|
|
| |
| shutil.move(source_path, destination) |
|
|
| |
| duration = (datetime.now() - start_time).total_seconds() |
|
|
| return { |
| "success": True, |
| "source": source_path, |
| "destination": destination, |
| "size": source_size, |
| "duration": duration, |
| "operation": "move", |
| } |
|
|
| except Exception as e: |
| return { |
| "success": False, |
| "source": source_path, |
| "destination": destination, |
| "error": f"Move error: {str(e)}", |
| } |
|
|
|
|
| @mcp.tool() |
| async def download_files(instruction: str) -> str: |
| """ |
| Download files from URLs or move local files mentioned in natural language instructions. |
| |
| Args: |
| instruction: Natural language instruction containing URLs/local paths and optional destination paths |
| |
| Returns: |
| Status message about the download/move operations |
| |
| Examples: |
| - "Download https://example.com/file.pdf to documents folder" |
| - "Move /home/user/file.pdf to documents folder" |
| - "Please get https://raw.githubusercontent.com/user/repo/main/data.csv and save it to ~/downloads" |
| - "移动 ~/Desktop/report.docx 到 /tmp/documents/" |
| - "Download www.example.com/report.xlsx" |
| """ |
| urls = URLExtractor.extract_urls(instruction) |
| local_paths = LocalPathExtractor.extract_local_paths(instruction) |
|
|
| if not urls and not local_paths: |
| return format_error_message( |
| "Failed to parse instruction", |
| "No downloadable URLs or movable local files found", |
| ) |
|
|
| target_path = PathExtractor.extract_target_path(instruction) |
|
|
| |
| results = [] |
|
|
| |
| for url in urls: |
| try: |
| |
| filename = URLExtractor.infer_filename_from_url(url) |
|
|
| |
| if target_path: |
| |
| if target_path.startswith("~"): |
| target_path = os.path.expanduser(target_path) |
|
|
| |
| if not os.path.isabs(target_path): |
| target_path = os.path.normpath(target_path) |
|
|
| |
| if os.path.splitext(target_path)[1]: |
| destination = target_path |
| else: |
| destination = os.path.join(target_path, filename) |
| else: |
| |
| destination = filename |
|
|
| |
| if os.path.exists(destination): |
| results.append( |
| f"[WARNING] Skipped {url}: File already exists at {destination}" |
| ) |
| continue |
|
|
| |
| check_result = await check_url_accessible(url) |
| if not check_result["accessible"]: |
| results.append( |
| f"[ERROR] Failed to access {url}: HTTP {check_result['status'] or 'Connection failed'}" |
| ) |
| continue |
|
|
| |
| result = await download_file(url, destination) |
|
|
| |
| conversion_msg = None |
| if result["success"]: |
| conversion_msg = await perform_document_conversion( |
| destination, extract_images=True |
| ) |
|
|
| |
| msg = format_file_operation_result( |
| "download", url, destination, result, conversion_msg |
| ) |
|
|
| except Exception as e: |
| msg = f"[ERROR] Failed to download: {url}\n" |
| msg += f" Error: {str(e)}" |
|
|
| results.append(msg) |
|
|
| |
| for local_path in local_paths: |
| try: |
| |
| filename = os.path.basename(local_path) |
|
|
| |
| if target_path: |
| |
| if target_path.startswith("~"): |
| target_path = os.path.expanduser(target_path) |
|
|
| |
| if not os.path.isabs(target_path): |
| target_path = os.path.normpath(target_path) |
|
|
| |
| if os.path.splitext(target_path)[1]: |
| destination = target_path |
| else: |
| destination = os.path.join(target_path, filename) |
| else: |
| |
| destination = filename |
|
|
| |
| if os.path.exists(destination): |
| results.append( |
| f"[WARNING] Skipped {local_path}: File already exists at {destination}" |
| ) |
| continue |
|
|
| |
| result = await move_local_file(local_path, destination) |
|
|
| |
| conversion_msg = None |
| if result["success"]: |
| conversion_msg = await perform_document_conversion( |
| destination, extract_images=True |
| ) |
|
|
| |
| msg = format_file_operation_result( |
| "move", local_path, destination, result, conversion_msg |
| ) |
|
|
| except Exception as e: |
| msg = f"[ERROR] Failed to move: {local_path}\n" |
| msg += f" Error: {str(e)}" |
|
|
| results.append(msg) |
|
|
| return "\n\n".join(results) |
|
|
|
|
| @mcp.tool() |
| async def parse_download_urls(text: str) -> str: |
| """ |
| Extract URLs, local paths and target paths from text without downloading or moving. |
| |
| Args: |
| text: Text containing URLs, local paths and optional destination paths |
| |
| Returns: |
| Parsed URLs, local paths and target path information |
| """ |
| urls = URLExtractor.extract_urls(text) |
| local_paths = LocalPathExtractor.extract_local_paths(text) |
| target_path = PathExtractor.extract_target_path(text) |
|
|
| content = "📋 Parsed file operation information:\n\n" |
|
|
| if urls: |
| content += f"🔗 URLs found ({len(urls)}):\n" |
| for i, url in enumerate(urls, 1): |
| filename = URLExtractor.infer_filename_from_url(url) |
| content += f" {i}. {url}\n 📄 Filename: {filename}\n" |
| else: |
| content += "🔗 No URLs found\n" |
|
|
| if local_paths: |
| content += f"\n📁 Local files found ({len(local_paths)}):\n" |
| for i, path in enumerate(local_paths, 1): |
| exists = os.path.exists(path) |
| content += f" {i}. {path}\n" |
| content += f" ✅ Exists: {'Yes' if exists else 'No'}\n" |
| if exists: |
| size_mb = os.path.getsize(path) / (1024 * 1024) |
| content += f" 📊 Size: {size_mb:.2f} MB\n" |
| else: |
| content += "\n📁 No local files found\n" |
|
|
| if target_path: |
| content += f"\n🎯 Target path: {target_path}" |
| if target_path.startswith("~"): |
| content += f"\n (Expanded: {os.path.expanduser(target_path)})" |
| else: |
| content += "\n🎯 Target path: Not specified (will use current directory)" |
|
|
| return content |
|
|
|
|
| @mcp.tool() |
| async def download_file_to( |
| url: str, destination: Optional[str] = None, filename: Optional[str] = None |
| ) -> str: |
| """ |
| Download a specific file with detailed options. |
| |
| Args: |
| url: URL to download from |
| destination: Target directory or full file path (optional) |
| filename: Specific filename to use (optional, ignored if destination is a full file path) |
| |
| Returns: |
| Status message about the download operation |
| """ |
| |
| if not filename: |
| filename = URLExtractor.infer_filename_from_url(url) |
|
|
| |
| if destination: |
| |
| if destination.startswith("~"): |
| destination = os.path.expanduser(destination) |
|
|
| |
| if os.path.splitext(destination)[1]: |
| target_path = destination |
| else: |
| target_path = os.path.join(destination, filename) |
| else: |
| target_path = filename |
|
|
| |
| if not os.path.isabs(target_path): |
| target_path = os.path.normpath(target_path) |
|
|
| |
| if os.path.exists(target_path): |
| return format_error_message( |
| "Download aborted", f"File already exists at {target_path}" |
| ) |
|
|
| |
| check_result = await check_url_accessible(url) |
| if not check_result["accessible"]: |
| return format_error_message( |
| "Cannot access URL", |
| f"{url} (HTTP {check_result['status'] or 'Connection failed'})", |
| ) |
|
|
| |
| size_mb = ( |
| int(check_result["content_length"]) / (1024 * 1024) |
| if check_result["content_length"] |
| else 0 |
| ) |
| msg = "[INFO] Downloading file:\n" |
| msg += f" URL: {url}\n" |
| msg += f" Target: {target_path}\n" |
| if size_mb > 0: |
| msg += f" Expected size: {size_mb:.2f} MB\n" |
| msg += "\n" |
|
|
| |
| result = await download_file(url, target_path) |
|
|
| |
| conversion_msg = None |
| if result["success"]: |
| conversion_msg = await perform_document_conversion( |
| target_path, extract_images=True |
| ) |
|
|
| |
| actual_size_mb = result["size"] / (1024 * 1024) |
| speed_mb = result["speed"] / (1024 * 1024) |
| info_msg = "[SUCCESS] Download completed!\n" |
| info_msg += f" Saved to: {target_path}\n" |
| info_msg += f" Size: {actual_size_mb:.2f} MB\n" |
| info_msg += f" Duration: {result['duration']:.2f} seconds\n" |
| info_msg += f" Speed: {speed_mb:.2f} MB/s\n" |
| info_msg += f" Type: {result['content_type']}" |
|
|
| if conversion_msg: |
| info_msg += conversion_msg |
|
|
| return msg + info_msg |
| else: |
| return msg + f"[ERROR] Download failed!\n Error: {result['error']}" |
|
|
|
|
| @mcp.tool() |
| async def move_file_to( |
| source: str, destination: Optional[str] = None, filename: Optional[str] = None |
| ) -> str: |
| """ |
| Move a local file to a new location with detailed options. |
| |
| Args: |
| source: Source file path to move |
| destination: Target directory or full file path (optional) |
| filename: Specific filename to use (optional, ignored if destination is a full file path) |
| |
| Returns: |
| Status message about the move operation |
| """ |
| |
| if source.startswith("~"): |
| source = os.path.expanduser(source) |
|
|
| |
| if not os.path.exists(source): |
| return format_error_message("Move aborted", f"Source file not found: {source}") |
|
|
| |
| if not filename: |
| filename = os.path.basename(source) |
|
|
| |
| if destination: |
| |
| if destination.startswith("~"): |
| destination = os.path.expanduser(destination) |
|
|
| |
| if os.path.splitext(destination)[1]: |
| target_path = destination |
| else: |
| target_path = os.path.join(destination, filename) |
| else: |
| target_path = filename |
|
|
| |
| if not os.path.isabs(target_path): |
| target_path = os.path.normpath(target_path) |
|
|
| |
| if os.path.exists(target_path): |
| return f"[ERROR] Target file already exists: {target_path}" |
|
|
| |
| source_size_mb = os.path.getsize(source) / (1024 * 1024) |
| msg = "[INFO] Moving file:\n" |
| msg += f" Source: {source}\n" |
| msg += f" Target: {target_path}\n" |
| msg += f" Size: {source_size_mb:.2f} MB\n" |
| msg += "\n" |
|
|
| |
| result = await move_local_file(source, target_path) |
|
|
| |
| conversion_msg = None |
| if result["success"]: |
| conversion_msg = await perform_document_conversion( |
| target_path, extract_images=True |
| ) |
|
|
| |
| info_msg = "[SUCCESS] File moved successfully!\n" |
| info_msg += f" From: {source}\n" |
| info_msg += f" To: {target_path}\n" |
| info_msg += f" Duration: {result['duration']:.2f} seconds" |
|
|
| if conversion_msg: |
| info_msg += conversion_msg |
|
|
| return msg + info_msg |
| else: |
| return msg + f"[ERROR] Move failed!\n Error: {result['error']}" |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
|
|
| if __name__ == "__main__": |
| print("📄 Smart PDF Downloader MCP Tool") |
| print("📝 Starting server with FastMCP...") |
|
|
| if DOCLING_AVAILABLE: |
| print("✅ Document conversion to Markdown is ENABLED (docling available)") |
| else: |
| print("❌ Document conversion to Markdown is DISABLED (docling not available)") |
| print(" Install docling to enable: pip install docling") |
|
|
| print("\nAvailable tools:") |
| print( |
| " • download_files - Download files or move local files from natural language" |
| ) |
| print(" • parse_download_urls - Extract URLs, local paths and destination paths") |
| print(" • download_file_to - Download a specific file with options") |
| print(" • move_file_to - Move a specific local file with options") |
| print(" • convert_document_to_markdown - Convert documents to Markdown format") |
|
|
| if DOCLING_AVAILABLE: |
| print("\nSupported formats: PDF, DOCX, PPTX, HTML, TXT, MD") |
| print("Features: Image extraction, Layout preservation, Automatic conversion") |
|
|
| print("") |
|
|
| |
| mcp.run() |
|
|