Add files using upload-large-folder tool
Browse files- projects/ui/DeepCode/tools/pdf_converter.py +640 -0
- projects/ui/DeepCode/tools/pdf_downloader.py +1379 -0
- projects/ui/DeepCode/tools/pdf_utils.py +52 -0
- projects/ui/DeepCode/ui/__init__.py +43 -0
- projects/ui/DeepCode/ui/app.py +13 -0
- projects/ui/DeepCode/ui/components.py +1450 -0
- projects/ui/DeepCode/ui/handlers.py +773 -0
- projects/ui/DeepCode/ui/layout.py +106 -0
- projects/ui/DeepCode/ui/streamlit_app.py +38 -0
- projects/ui/DeepCode/ui/styles.py +2590 -0
- projects/ui/DeepCode/utils/__init__.py +17 -0
- projects/ui/DeepCode/utils/cli_interface.py +459 -0
- projects/ui/DeepCode/utils/dialogue_logger.py +671 -0
- projects/ui/DeepCode/utils/file_processor.py +438 -0
- projects/ui/DeepCode/utils/llm_utils.py +231 -0
- projects/ui/DeepCode/utils/simple_llm_logger.py +198 -0
- projects/ui/DeepCode/workflows/__init__.py +31 -0
- projects/ui/DeepCode/workflows/agent_orchestration_engine.py +1572 -0
- projects/ui/DeepCode/workflows/code_implementation_workflow.py +993 -0
- projects/ui/DeepCode/workflows/code_implementation_workflow_index.py +997 -0
projects/ui/DeepCode/tools/pdf_converter.py
ADDED
|
@@ -0,0 +1,640 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
PDF Converter Utility
|
| 4 |
+
|
| 5 |
+
This module provides functionality for converting various document formats to PDF,
|
| 6 |
+
including Office documents (.doc, .docx, .ppt, .pptx, .xls, .xlsx) and text files (.txt, .md).
|
| 7 |
+
|
| 8 |
+
Requirements:
|
| 9 |
+
- LibreOffice for Office document conversion
|
| 10 |
+
- ReportLab for text-to-PDF conversion
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import logging
|
| 17 |
+
import subprocess
|
| 18 |
+
import tempfile
|
| 19 |
+
import shutil
|
| 20 |
+
import platform
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from typing import Union, Optional, Dict, Any
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class PDFConverter:
|
| 26 |
+
"""
|
| 27 |
+
PDF conversion utility class.
|
| 28 |
+
|
| 29 |
+
Provides methods to convert Office documents and text files to PDF format.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
# Define supported file formats
|
| 33 |
+
OFFICE_FORMATS = {".doc", ".docx", ".ppt", ".pptx", ".xls", ".xlsx"}
|
| 34 |
+
TEXT_FORMATS = {".txt", ".md"}
|
| 35 |
+
|
| 36 |
+
# Class-level logger
|
| 37 |
+
logger = logging.getLogger(__name__)
|
| 38 |
+
|
| 39 |
+
def __init__(self) -> None:
|
| 40 |
+
"""Initialize the PDF converter."""
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def convert_office_to_pdf(
|
| 45 |
+
doc_path: Union[str, Path], output_dir: Optional[str] = None
|
| 46 |
+
) -> Path:
|
| 47 |
+
"""
|
| 48 |
+
Convert Office document (.doc, .docx, .ppt, .pptx, .xls, .xlsx) to PDF.
|
| 49 |
+
Requires LibreOffice to be installed.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
doc_path: Path to the Office document file
|
| 53 |
+
output_dir: Output directory for the PDF file
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
Path to the generated PDF file
|
| 57 |
+
"""
|
| 58 |
+
try:
|
| 59 |
+
# Convert to Path object for easier handling
|
| 60 |
+
doc_path = Path(doc_path)
|
| 61 |
+
if not doc_path.exists():
|
| 62 |
+
raise FileNotFoundError(f"Office document does not exist: {doc_path}")
|
| 63 |
+
|
| 64 |
+
name_without_suff = doc_path.stem
|
| 65 |
+
|
| 66 |
+
# Prepare output directory
|
| 67 |
+
if output_dir:
|
| 68 |
+
base_output_dir = Path(output_dir)
|
| 69 |
+
else:
|
| 70 |
+
base_output_dir = doc_path.parent / "pdf_output"
|
| 71 |
+
|
| 72 |
+
base_output_dir.mkdir(parents=True, exist_ok=True)
|
| 73 |
+
|
| 74 |
+
# Check if LibreOffice is available
|
| 75 |
+
libreoffice_available = False
|
| 76 |
+
working_libreoffice_cmd: Optional[str] = None
|
| 77 |
+
|
| 78 |
+
# Prepare subprocess parameters to hide console window on Windows
|
| 79 |
+
subprocess_kwargs: Dict[str, Any] = {
|
| 80 |
+
"capture_output": True,
|
| 81 |
+
"check": True,
|
| 82 |
+
"timeout": 10,
|
| 83 |
+
"encoding": "utf-8",
|
| 84 |
+
"errors": "ignore",
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
# Hide console window on Windows
|
| 88 |
+
if platform.system() == "Windows":
|
| 89 |
+
subprocess_kwargs["creationflags"] = (
|
| 90 |
+
0x08000000 # subprocess.CREATE_NO_WINDOW
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
result = subprocess.run(
|
| 95 |
+
["libreoffice", "--version"], **subprocess_kwargs
|
| 96 |
+
)
|
| 97 |
+
libreoffice_available = True
|
| 98 |
+
working_libreoffice_cmd = "libreoffice"
|
| 99 |
+
logging.info(f"LibreOffice detected: {result.stdout.strip()}") # type: ignore
|
| 100 |
+
except (
|
| 101 |
+
subprocess.CalledProcessError,
|
| 102 |
+
FileNotFoundError,
|
| 103 |
+
subprocess.TimeoutExpired,
|
| 104 |
+
):
|
| 105 |
+
pass
|
| 106 |
+
|
| 107 |
+
# Try alternative commands for LibreOffice
|
| 108 |
+
if not libreoffice_available:
|
| 109 |
+
for cmd in ["soffice", "libreoffice"]:
|
| 110 |
+
try:
|
| 111 |
+
result = subprocess.run([cmd, "--version"], **subprocess_kwargs)
|
| 112 |
+
libreoffice_available = True
|
| 113 |
+
working_libreoffice_cmd = cmd
|
| 114 |
+
logging.info(
|
| 115 |
+
f"LibreOffice detected with command '{cmd}': {result.stdout.strip()}" # type: ignore
|
| 116 |
+
)
|
| 117 |
+
break
|
| 118 |
+
except (
|
| 119 |
+
subprocess.CalledProcessError,
|
| 120 |
+
FileNotFoundError,
|
| 121 |
+
subprocess.TimeoutExpired,
|
| 122 |
+
):
|
| 123 |
+
continue
|
| 124 |
+
|
| 125 |
+
if not libreoffice_available:
|
| 126 |
+
raise RuntimeError(
|
| 127 |
+
"LibreOffice is required for Office document conversion but was not found.\n"
|
| 128 |
+
"Please install LibreOffice:\n"
|
| 129 |
+
"- Windows: Download from https://www.libreoffice.org/download/download/\n"
|
| 130 |
+
"- macOS: brew install --cask libreoffice\n"
|
| 131 |
+
"- Ubuntu/Debian: sudo apt-get install libreoffice\n"
|
| 132 |
+
"- CentOS/RHEL: sudo yum install libreoffice\n"
|
| 133 |
+
"Alternatively, convert the document to PDF manually."
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Create temporary directory for PDF conversion
|
| 137 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 138 |
+
temp_path = Path(temp_dir)
|
| 139 |
+
|
| 140 |
+
# Convert to PDF using LibreOffice
|
| 141 |
+
logging.info(f"Converting {doc_path.name} to PDF using LibreOffice...")
|
| 142 |
+
|
| 143 |
+
# Use the working LibreOffice command first, then try alternatives if it fails
|
| 144 |
+
commands_to_try = [working_libreoffice_cmd]
|
| 145 |
+
if working_libreoffice_cmd == "libreoffice":
|
| 146 |
+
commands_to_try.append("soffice")
|
| 147 |
+
else:
|
| 148 |
+
commands_to_try.append("libreoffice")
|
| 149 |
+
|
| 150 |
+
conversion_successful = False
|
| 151 |
+
for cmd in commands_to_try:
|
| 152 |
+
if cmd is None:
|
| 153 |
+
continue
|
| 154 |
+
try:
|
| 155 |
+
convert_cmd = [
|
| 156 |
+
cmd,
|
| 157 |
+
"--headless",
|
| 158 |
+
"--convert-to",
|
| 159 |
+
"pdf",
|
| 160 |
+
"--outdir",
|
| 161 |
+
str(temp_path),
|
| 162 |
+
str(doc_path),
|
| 163 |
+
]
|
| 164 |
+
|
| 165 |
+
# Prepare conversion subprocess parameters
|
| 166 |
+
convert_subprocess_kwargs: Dict[str, Any] = {
|
| 167 |
+
"capture_output": True,
|
| 168 |
+
"text": True,
|
| 169 |
+
"timeout": 60, # 60 second timeout
|
| 170 |
+
"encoding": "utf-8",
|
| 171 |
+
"errors": "ignore",
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# Hide console window on Windows
|
| 175 |
+
if platform.system() == "Windows":
|
| 176 |
+
convert_subprocess_kwargs["creationflags"] = (
|
| 177 |
+
0x08000000 # subprocess.CREATE_NO_WINDOW
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
result = subprocess.run(
|
| 181 |
+
convert_cmd, **convert_subprocess_kwargs
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
if result.returncode == 0: # type: ignore
|
| 185 |
+
conversion_successful = True
|
| 186 |
+
logging.info(
|
| 187 |
+
f"Successfully converted {doc_path.name} to PDF"
|
| 188 |
+
)
|
| 189 |
+
break
|
| 190 |
+
else:
|
| 191 |
+
logging.warning(
|
| 192 |
+
f"LibreOffice command '{cmd}' failed: {result.stderr}" # type: ignore
|
| 193 |
+
)
|
| 194 |
+
except subprocess.TimeoutExpired:
|
| 195 |
+
logging.warning(f"LibreOffice command '{cmd}' timed out")
|
| 196 |
+
except Exception as e:
|
| 197 |
+
logging.error(
|
| 198 |
+
f"LibreOffice command '{cmd}' failed with exception: {e}"
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
if not conversion_successful:
|
| 202 |
+
raise RuntimeError(
|
| 203 |
+
f"LibreOffice conversion failed for {doc_path.name}. "
|
| 204 |
+
f"Please check if the file is corrupted or try converting manually."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Find the generated PDF
|
| 208 |
+
pdf_files = list(temp_path.glob("*.pdf"))
|
| 209 |
+
if not pdf_files:
|
| 210 |
+
raise RuntimeError(
|
| 211 |
+
f"PDF conversion failed for {doc_path.name} - no PDF file generated. "
|
| 212 |
+
f"Please check LibreOffice installation or try manual conversion."
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
pdf_path = pdf_files[0]
|
| 216 |
+
logging.info(
|
| 217 |
+
f"Generated PDF: {pdf_path.name} ({pdf_path.stat().st_size} bytes)"
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Validate the generated PDF
|
| 221 |
+
if pdf_path.stat().st_size < 100: # Very small file, likely empty
|
| 222 |
+
raise RuntimeError(
|
| 223 |
+
"Generated PDF appears to be empty or corrupted. "
|
| 224 |
+
"Original file may have issues or LibreOffice conversion failed."
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# Copy PDF to final output directory
|
| 228 |
+
final_pdf_path = base_output_dir / f"{name_without_suff}.pdf"
|
| 229 |
+
shutil.copy2(pdf_path, final_pdf_path)
|
| 230 |
+
|
| 231 |
+
return final_pdf_path
|
| 232 |
+
|
| 233 |
+
except Exception as e:
|
| 234 |
+
logging.error(f"Error in convert_office_to_pdf: {str(e)}")
|
| 235 |
+
raise
|
| 236 |
+
|
| 237 |
+
@staticmethod
|
| 238 |
+
def convert_text_to_pdf(
|
| 239 |
+
text_path: Union[str, Path], output_dir: Optional[str] = None
|
| 240 |
+
) -> Path:
|
| 241 |
+
"""
|
| 242 |
+
Convert text file (.txt, .md) to PDF using ReportLab with full markdown support.
|
| 243 |
+
|
| 244 |
+
Args:
|
| 245 |
+
text_path: Path to the text file
|
| 246 |
+
output_dir: Output directory for the PDF file
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
Path to the generated PDF file
|
| 250 |
+
"""
|
| 251 |
+
try:
|
| 252 |
+
text_path = Path(text_path)
|
| 253 |
+
if not text_path.exists():
|
| 254 |
+
raise FileNotFoundError(f"Text file does not exist: {text_path}")
|
| 255 |
+
|
| 256 |
+
# Supported text formats
|
| 257 |
+
supported_text_formats = {".txt", ".md"}
|
| 258 |
+
if text_path.suffix.lower() not in supported_text_formats:
|
| 259 |
+
raise ValueError(f"Unsupported text format: {text_path.suffix}")
|
| 260 |
+
|
| 261 |
+
# Read the text content
|
| 262 |
+
try:
|
| 263 |
+
with open(text_path, "r", encoding="utf-8") as f:
|
| 264 |
+
text_content = f.read()
|
| 265 |
+
except UnicodeDecodeError:
|
| 266 |
+
# Try with different encodings
|
| 267 |
+
for encoding in ["gbk", "latin-1", "cp1252"]:
|
| 268 |
+
try:
|
| 269 |
+
with open(text_path, "r", encoding=encoding) as f:
|
| 270 |
+
text_content = f.read()
|
| 271 |
+
logging.info(f"Successfully read file with {encoding} encoding")
|
| 272 |
+
break
|
| 273 |
+
except UnicodeDecodeError:
|
| 274 |
+
continue
|
| 275 |
+
else:
|
| 276 |
+
raise RuntimeError(
|
| 277 |
+
f"Could not decode text file {text_path.name} with any supported encoding"
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
# Prepare output directory
|
| 281 |
+
if output_dir:
|
| 282 |
+
base_output_dir = Path(output_dir)
|
| 283 |
+
else:
|
| 284 |
+
base_output_dir = text_path.parent / "pdf_output"
|
| 285 |
+
|
| 286 |
+
base_output_dir.mkdir(parents=True, exist_ok=True)
|
| 287 |
+
pdf_path = base_output_dir / f"{text_path.stem}.pdf"
|
| 288 |
+
|
| 289 |
+
# Convert text to PDF
|
| 290 |
+
logging.info(f"Converting {text_path.name} to PDF...")
|
| 291 |
+
|
| 292 |
+
try:
|
| 293 |
+
from reportlab.lib.pagesizes import A4
|
| 294 |
+
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
| 295 |
+
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
| 296 |
+
from reportlab.lib.units import inch
|
| 297 |
+
from reportlab.pdfbase import pdfmetrics
|
| 298 |
+
|
| 299 |
+
# Create PDF document
|
| 300 |
+
doc = SimpleDocTemplate(
|
| 301 |
+
str(pdf_path),
|
| 302 |
+
pagesize=A4,
|
| 303 |
+
leftMargin=inch,
|
| 304 |
+
rightMargin=inch,
|
| 305 |
+
topMargin=inch,
|
| 306 |
+
bottomMargin=inch,
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# Get styles
|
| 310 |
+
styles = getSampleStyleSheet()
|
| 311 |
+
normal_style = styles["Normal"]
|
| 312 |
+
heading_style = styles["Heading1"]
|
| 313 |
+
|
| 314 |
+
# Try to register a font that supports Chinese characters
|
| 315 |
+
try:
|
| 316 |
+
# Try to use system fonts that support Chinese
|
| 317 |
+
system = platform.system()
|
| 318 |
+
if system == "Windows":
|
| 319 |
+
# Try common Windows fonts
|
| 320 |
+
for font_name in ["SimSun", "SimHei", "Microsoft YaHei"]:
|
| 321 |
+
try:
|
| 322 |
+
from reportlab.pdfbase.cidfonts import (
|
| 323 |
+
UnicodeCIDFont,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
pdfmetrics.registerFont(UnicodeCIDFont(font_name)) # type: ignore
|
| 327 |
+
normal_style.fontName = font_name
|
| 328 |
+
heading_style.fontName = font_name
|
| 329 |
+
break
|
| 330 |
+
except Exception:
|
| 331 |
+
continue
|
| 332 |
+
elif system == "Darwin": # macOS
|
| 333 |
+
for font_name in ["STSong-Light", "STHeiti"]:
|
| 334 |
+
try:
|
| 335 |
+
from reportlab.pdfbase.cidfonts import (
|
| 336 |
+
UnicodeCIDFont,
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
pdfmetrics.registerFont(UnicodeCIDFont(font_name)) # type: ignore
|
| 340 |
+
normal_style.fontName = font_name
|
| 341 |
+
heading_style.fontName = font_name
|
| 342 |
+
break
|
| 343 |
+
except Exception:
|
| 344 |
+
continue
|
| 345 |
+
except Exception:
|
| 346 |
+
pass # Use default fonts if Chinese font setup fails
|
| 347 |
+
|
| 348 |
+
# Build content
|
| 349 |
+
story = []
|
| 350 |
+
|
| 351 |
+
# Handle markdown or plain text
|
| 352 |
+
if text_path.suffix.lower() == ".md":
|
| 353 |
+
# Handle markdown content - simplified implementation
|
| 354 |
+
lines = text_content.split("\n")
|
| 355 |
+
for line in lines:
|
| 356 |
+
line = line.strip()
|
| 357 |
+
if not line:
|
| 358 |
+
story.append(Spacer(1, 12))
|
| 359 |
+
continue
|
| 360 |
+
|
| 361 |
+
# Headers
|
| 362 |
+
if line.startswith("#"):
|
| 363 |
+
level = len(line) - len(line.lstrip("#"))
|
| 364 |
+
header_text = line.lstrip("#").strip()
|
| 365 |
+
if header_text:
|
| 366 |
+
header_style = ParagraphStyle(
|
| 367 |
+
name=f"Heading{level}",
|
| 368 |
+
parent=heading_style,
|
| 369 |
+
fontSize=max(16 - level, 10),
|
| 370 |
+
spaceAfter=8,
|
| 371 |
+
spaceBefore=16 if level <= 2 else 12,
|
| 372 |
+
)
|
| 373 |
+
story.append(Paragraph(header_text, header_style))
|
| 374 |
+
else:
|
| 375 |
+
# Regular text
|
| 376 |
+
processed_line = PDFConverter._process_inline_markdown(line)
|
| 377 |
+
story.append(Paragraph(processed_line, normal_style))
|
| 378 |
+
story.append(Spacer(1, 6))
|
| 379 |
+
else:
|
| 380 |
+
# Handle plain text files (.txt)
|
| 381 |
+
logging.info(
|
| 382 |
+
f"Processing plain text file with {len(text_content)} characters..."
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Split text into lines and process each line
|
| 386 |
+
lines = text_content.split("\n")
|
| 387 |
+
line_count = 0
|
| 388 |
+
|
| 389 |
+
for line in lines:
|
| 390 |
+
line = line.rstrip()
|
| 391 |
+
line_count += 1
|
| 392 |
+
|
| 393 |
+
# Empty lines
|
| 394 |
+
if not line.strip():
|
| 395 |
+
story.append(Spacer(1, 6))
|
| 396 |
+
continue
|
| 397 |
+
|
| 398 |
+
# Regular text lines
|
| 399 |
+
# Escape special characters for ReportLab
|
| 400 |
+
safe_line = (
|
| 401 |
+
line.replace("&", "&")
|
| 402 |
+
.replace("<", "<")
|
| 403 |
+
.replace(">", ">")
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
# Create paragraph
|
| 407 |
+
story.append(Paragraph(safe_line, normal_style))
|
| 408 |
+
story.append(Spacer(1, 3))
|
| 409 |
+
|
| 410 |
+
logging.info(f"Added {line_count} lines to PDF")
|
| 411 |
+
|
| 412 |
+
# If no content was added, add a placeholder
|
| 413 |
+
if not story:
|
| 414 |
+
story.append(Paragraph("(Empty text file)", normal_style))
|
| 415 |
+
|
| 416 |
+
# Build PDF
|
| 417 |
+
doc.build(story)
|
| 418 |
+
logging.info(
|
| 419 |
+
f"Successfully converted {text_path.name} to PDF ({pdf_path.stat().st_size / 1024:.1f} KB)"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
except ImportError:
|
| 423 |
+
raise RuntimeError(
|
| 424 |
+
"reportlab is required for text-to-PDF conversion. "
|
| 425 |
+
"Please install it using: pip install reportlab"
|
| 426 |
+
)
|
| 427 |
+
except Exception as e:
|
| 428 |
+
raise RuntimeError(
|
| 429 |
+
f"Failed to convert text file {text_path.name} to PDF: {str(e)}"
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# Validate the generated PDF
|
| 433 |
+
if not pdf_path.exists() or pdf_path.stat().st_size < 100:
|
| 434 |
+
raise RuntimeError(
|
| 435 |
+
f"PDF conversion failed for {text_path.name} - generated PDF is empty or corrupted."
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
return pdf_path
|
| 439 |
+
|
| 440 |
+
except Exception as e:
|
| 441 |
+
logging.error(f"Error in convert_text_to_pdf: {str(e)}")
|
| 442 |
+
raise
|
| 443 |
+
|
| 444 |
+
@staticmethod
|
| 445 |
+
def _process_inline_markdown(text: str) -> str:
|
| 446 |
+
"""
|
| 447 |
+
Process inline markdown formatting (bold, italic, code, links)
|
| 448 |
+
|
| 449 |
+
Args:
|
| 450 |
+
text: Raw text with markdown formatting
|
| 451 |
+
|
| 452 |
+
Returns:
|
| 453 |
+
Text with ReportLab markup
|
| 454 |
+
"""
|
| 455 |
+
import re
|
| 456 |
+
|
| 457 |
+
# Escape special characters for ReportLab
|
| 458 |
+
text = text.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 459 |
+
|
| 460 |
+
# Bold text: **text** or __text__
|
| 461 |
+
text = re.sub(r"\*\*(.*?)\*\*", r"<b>\1</b>", text)
|
| 462 |
+
text = re.sub(r"__(.*?)__", r"<b>\1</b>", text)
|
| 463 |
+
|
| 464 |
+
# Italic text: *text* or _text_ (but not in the middle of words)
|
| 465 |
+
text = re.sub(r"(?<!\w)\*([^*\n]+?)\*(?!\w)", r"<i>\1</i>", text)
|
| 466 |
+
text = re.sub(r"(?<!\w)_([^_\n]+?)_(?!\w)", r"<i>\1</i>", text)
|
| 467 |
+
|
| 468 |
+
# Inline code: `code`
|
| 469 |
+
text = re.sub(
|
| 470 |
+
r"`([^`]+?)`",
|
| 471 |
+
r'<font name="Courier" size="9" color="darkred">\1</font>',
|
| 472 |
+
text,
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
# Links: [text](url) - convert to text with URL annotation
|
| 476 |
+
def link_replacer(match):
|
| 477 |
+
link_text = match.group(1)
|
| 478 |
+
url = match.group(2)
|
| 479 |
+
return f'<link href="{url}" color="blue"><u>{link_text}</u></link>'
|
| 480 |
+
|
| 481 |
+
text = re.sub(r"\[([^\]]+?)\]\(([^)]+?)\)", link_replacer, text)
|
| 482 |
+
|
| 483 |
+
# Strikethrough: ~~text~~
|
| 484 |
+
text = re.sub(r"~~(.*?)~~", r"<strike>\1</strike>", text)
|
| 485 |
+
|
| 486 |
+
return text
|
| 487 |
+
|
| 488 |
+
def convert_to_pdf(
|
| 489 |
+
self,
|
| 490 |
+
file_path: Union[str, Path],
|
| 491 |
+
output_dir: Optional[str] = None,
|
| 492 |
+
) -> Path:
|
| 493 |
+
"""
|
| 494 |
+
Convert document to PDF based on file extension
|
| 495 |
+
|
| 496 |
+
Args:
|
| 497 |
+
file_path: Path to the file to be converted
|
| 498 |
+
output_dir: Output directory path
|
| 499 |
+
|
| 500 |
+
Returns:
|
| 501 |
+
Path to the generated PDF file
|
| 502 |
+
"""
|
| 503 |
+
# Convert to Path object
|
| 504 |
+
file_path = Path(file_path)
|
| 505 |
+
if not file_path.exists():
|
| 506 |
+
raise FileNotFoundError(f"File does not exist: {file_path}")
|
| 507 |
+
|
| 508 |
+
# Get file extension
|
| 509 |
+
ext = file_path.suffix.lower()
|
| 510 |
+
|
| 511 |
+
# Choose appropriate conversion method based on file type
|
| 512 |
+
if ext in self.OFFICE_FORMATS:
|
| 513 |
+
return self.convert_office_to_pdf(file_path, output_dir)
|
| 514 |
+
elif ext in self.TEXT_FORMATS:
|
| 515 |
+
return self.convert_text_to_pdf(file_path, output_dir)
|
| 516 |
+
else:
|
| 517 |
+
raise ValueError(
|
| 518 |
+
f"Unsupported file format: {ext}. "
|
| 519 |
+
f"Supported formats: {', '.join(self.OFFICE_FORMATS | self.TEXT_FORMATS)}"
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
def check_dependencies(self) -> dict:
|
| 523 |
+
"""
|
| 524 |
+
Check if required dependencies are available
|
| 525 |
+
|
| 526 |
+
Returns:
|
| 527 |
+
dict: Dictionary with dependency check results
|
| 528 |
+
"""
|
| 529 |
+
results = {
|
| 530 |
+
"libreoffice": False,
|
| 531 |
+
"reportlab": False,
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
# Check LibreOffice
|
| 535 |
+
try:
|
| 536 |
+
subprocess_kwargs: Dict[str, Any] = {
|
| 537 |
+
"capture_output": True,
|
| 538 |
+
"text": True,
|
| 539 |
+
"check": True,
|
| 540 |
+
"encoding": "utf-8",
|
| 541 |
+
"errors": "ignore",
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
if platform.system() == "Windows":
|
| 545 |
+
subprocess_kwargs["creationflags"] = (
|
| 546 |
+
0x08000000 # subprocess.CREATE_NO_WINDOW
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
subprocess.run(["libreoffice", "--version"], **subprocess_kwargs)
|
| 550 |
+
results["libreoffice"] = True
|
| 551 |
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
| 552 |
+
try:
|
| 553 |
+
subprocess.run(["soffice", "--version"], **subprocess_kwargs)
|
| 554 |
+
results["libreoffice"] = True
|
| 555 |
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
| 556 |
+
pass
|
| 557 |
+
|
| 558 |
+
# Check ReportLab
|
| 559 |
+
import importlib.util
|
| 560 |
+
|
| 561 |
+
if importlib.util.find_spec("reportlab") is not None:
|
| 562 |
+
results["reportlab"] = True
|
| 563 |
+
|
| 564 |
+
return results
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
def main():
|
| 568 |
+
"""
|
| 569 |
+
Main function to run the PDF converter from command line
|
| 570 |
+
"""
|
| 571 |
+
parser = argparse.ArgumentParser(description="Convert documents to PDF format")
|
| 572 |
+
parser.add_argument("file_path", nargs="?", help="Path to the document to convert")
|
| 573 |
+
parser.add_argument("--output", "-o", help="Output directory path")
|
| 574 |
+
parser.add_argument(
|
| 575 |
+
"--check",
|
| 576 |
+
action="store_true",
|
| 577 |
+
help="Check dependencies installation",
|
| 578 |
+
)
|
| 579 |
+
parser.add_argument(
|
| 580 |
+
"--verbose", "-v", action="store_true", help="Enable verbose logging"
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
args = parser.parse_args()
|
| 584 |
+
|
| 585 |
+
# Configure logging
|
| 586 |
+
log_level = logging.INFO if args.verbose else logging.WARNING
|
| 587 |
+
logging.basicConfig(
|
| 588 |
+
level=log_level,
|
| 589 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
| 590 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
# Initialize converter
|
| 594 |
+
converter = PDFConverter()
|
| 595 |
+
|
| 596 |
+
# Check dependencies if requested
|
| 597 |
+
if args.check:
|
| 598 |
+
print("🔍 Checking dependencies...")
|
| 599 |
+
deps = converter.check_dependencies()
|
| 600 |
+
|
| 601 |
+
print(
|
| 602 |
+
f"LibreOffice: {'✅ Available' if deps['libreoffice'] else '❌ Not found'}"
|
| 603 |
+
)
|
| 604 |
+
print(f"ReportLab: {'✅ Available' if deps['reportlab'] else '❌ Not found'}")
|
| 605 |
+
|
| 606 |
+
if not deps["libreoffice"]:
|
| 607 |
+
print("\n📋 To install LibreOffice:")
|
| 608 |
+
print(" - Windows: Download from https://www.libreoffice.org/")
|
| 609 |
+
print(" - macOS: brew install --cask libreoffice")
|
| 610 |
+
print(" - Ubuntu/Debian: sudo apt-get install libreoffice")
|
| 611 |
+
|
| 612 |
+
if not deps["reportlab"]:
|
| 613 |
+
print("\n📋 To install ReportLab:")
|
| 614 |
+
print(" pip install reportlab")
|
| 615 |
+
|
| 616 |
+
return 0
|
| 617 |
+
|
| 618 |
+
# If not checking dependencies, file_path is required
|
| 619 |
+
if not args.file_path:
|
| 620 |
+
parser.error("file_path is required when not using --check")
|
| 621 |
+
|
| 622 |
+
try:
|
| 623 |
+
# Convert the file
|
| 624 |
+
output_pdf = converter.convert_to_pdf(
|
| 625 |
+
file_path=args.file_path,
|
| 626 |
+
output_dir=args.output,
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
print(f"✅ Successfully converted to PDF: {output_pdf}")
|
| 630 |
+
print(f"📄 File size: {output_pdf.stat().st_size / 1024:.1f} KB")
|
| 631 |
+
|
| 632 |
+
except Exception as e:
|
| 633 |
+
print(f"❌ Error: {str(e)}")
|
| 634 |
+
return 1
|
| 635 |
+
|
| 636 |
+
return 0
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
if __name__ == "__main__":
|
| 640 |
+
exit(main())
|
projects/ui/DeepCode/tools/pdf_downloader.py
ADDED
|
@@ -0,0 +1,1379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Smart PDF Downloader MCP Tool
|
| 4 |
+
|
| 5 |
+
A standardized MCP tool using FastMCP for intelligent file downloading and document conversion.
|
| 6 |
+
Supports natural language instructions for downloading files from URLs, moving local files,
|
| 7 |
+
and automatic conversion to Markdown format with image extraction.
|
| 8 |
+
|
| 9 |
+
Features:
|
| 10 |
+
- Natural language instruction parsing
|
| 11 |
+
- URL and local path extraction
|
| 12 |
+
- Automatic document conversion (PDF, DOCX, PPTX, HTML, etc.)
|
| 13 |
+
- Image extraction and preservation
|
| 14 |
+
- Multi-format support with fallback options
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import re
|
| 19 |
+
import aiohttp
|
| 20 |
+
import aiofiles
|
| 21 |
+
import shutil
|
| 22 |
+
import sys
|
| 23 |
+
import io
|
| 24 |
+
from typing import List, Dict, Optional, Any
|
| 25 |
+
from urllib.parse import urlparse, unquote
|
| 26 |
+
from datetime import datetime
|
| 27 |
+
|
| 28 |
+
from mcp.server import FastMCP
|
| 29 |
+
|
| 30 |
+
# Docling imports for document conversion
|
| 31 |
+
try:
|
| 32 |
+
from docling.document_converter import DocumentConverter
|
| 33 |
+
from docling.datamodel.base_models import InputFormat
|
| 34 |
+
from docling.datamodel.pipeline_options import PdfPipelineOptions
|
| 35 |
+
from docling.document_converter import PdfFormatOption
|
| 36 |
+
|
| 37 |
+
DOCLING_AVAILABLE = True
|
| 38 |
+
except ImportError:
|
| 39 |
+
DOCLING_AVAILABLE = False
|
| 40 |
+
print(
|
| 41 |
+
"Warning: docling package not available. Document conversion will be disabled."
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# Fallback PDF text extraction
|
| 45 |
+
try:
|
| 46 |
+
import PyPDF2
|
| 47 |
+
|
| 48 |
+
PYPDF2_AVAILABLE = True
|
| 49 |
+
except ImportError:
|
| 50 |
+
PYPDF2_AVAILABLE = False
|
| 51 |
+
print(
|
| 52 |
+
"Warning: PyPDF2 package not available. Fallback PDF extraction will be disabled."
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# 设置标准输出编码为UTF-8
|
| 56 |
+
if sys.stdout.encoding != "utf-8":
|
| 57 |
+
try:
|
| 58 |
+
if hasattr(sys.stdout, "reconfigure"):
|
| 59 |
+
sys.stdout.reconfigure(encoding="utf-8")
|
| 60 |
+
sys.stderr.reconfigure(encoding="utf-8")
|
| 61 |
+
else:
|
| 62 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding="utf-8")
|
| 63 |
+
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding="utf-8")
|
| 64 |
+
except Exception as e:
|
| 65 |
+
print(f"Warning: Could not set UTF-8 encoding: {e}")
|
| 66 |
+
|
| 67 |
+
# 创建 FastMCP 实例
|
| 68 |
+
mcp = FastMCP("smart-pdf-downloader")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# 辅助函数
|
| 72 |
+
def format_success_message(action: str, details: Dict[str, Any]) -> str:
|
| 73 |
+
"""格式化成功消息"""
|
| 74 |
+
return f"✅ {action}\n" + "\n".join(f" {k}: {v}" for k, v in details.items())
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def format_error_message(action: str, error: str) -> str:
|
| 78 |
+
"""格式化错误消息"""
|
| 79 |
+
return f"❌ {action}\n Error: {error}"
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def format_warning_message(action: str, warning: str) -> str:
|
| 83 |
+
"""格式化警告消息"""
|
| 84 |
+
return f"⚠️ {action}\n Warning: {warning}"
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
async def perform_document_conversion(
|
| 88 |
+
file_path: str, extract_images: bool = True
|
| 89 |
+
) -> Optional[str]:
|
| 90 |
+
"""
|
| 91 |
+
执行文档转换的共用逻辑
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
file_path: 文件路径
|
| 95 |
+
extract_images: 是否提取图片
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
转换信息字符串,如果没有转换则返回None
|
| 99 |
+
"""
|
| 100 |
+
if not file_path:
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
conversion_msg = ""
|
| 104 |
+
|
| 105 |
+
# 首先尝试使用简单的PDF转换器(对于PDF文件)
|
| 106 |
+
# 检查文件是否实际为PDF(无论扩展名如何)
|
| 107 |
+
is_pdf_file = False
|
| 108 |
+
if PYPDF2_AVAILABLE:
|
| 109 |
+
try:
|
| 110 |
+
with open(file_path, "rb") as f:
|
| 111 |
+
header = f.read(8)
|
| 112 |
+
is_pdf_file = header.startswith(b"%PDF")
|
| 113 |
+
except Exception:
|
| 114 |
+
is_pdf_file = file_path.lower().endswith(".pdf")
|
| 115 |
+
|
| 116 |
+
if is_pdf_file and PYPDF2_AVAILABLE:
|
| 117 |
+
try:
|
| 118 |
+
simple_converter = SimplePdfConverter()
|
| 119 |
+
conversion_result = simple_converter.convert_pdf_to_markdown(file_path)
|
| 120 |
+
if conversion_result["success"]:
|
| 121 |
+
conversion_msg = "\n [INFO] PDF converted to Markdown (PyPDF2)"
|
| 122 |
+
conversion_msg += (
|
| 123 |
+
f"\n Markdown file: {conversion_result['output_file']}"
|
| 124 |
+
)
|
| 125 |
+
conversion_msg += (
|
| 126 |
+
f"\n Conversion time: {conversion_result['duration']:.2f} seconds"
|
| 127 |
+
)
|
| 128 |
+
conversion_msg += (
|
| 129 |
+
f"\n Pages extracted: {conversion_result['pages_extracted']}"
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
conversion_msg = f"\n [WARNING] PDF conversion failed: {conversion_result['error']}"
|
| 134 |
+
except Exception as conv_error:
|
| 135 |
+
conversion_msg = f"\n [WARNING] PDF conversion error: {str(conv_error)}"
|
| 136 |
+
|
| 137 |
+
# 如果简单转换失败,尝试使用docling(支持图片提取)
|
| 138 |
+
# if not conversion_success and DOCLING_AVAILABLE:
|
| 139 |
+
# try:
|
| 140 |
+
# converter = DoclingConverter()
|
| 141 |
+
# if converter.is_supported_format(file_path):
|
| 142 |
+
# conversion_result = converter.convert_to_markdown(
|
| 143 |
+
# file_path, extract_images=extract_images
|
| 144 |
+
# )
|
| 145 |
+
# if conversion_result["success"]:
|
| 146 |
+
# conversion_msg = (
|
| 147 |
+
# "\n [INFO] Document converted to Markdown (docling)"
|
| 148 |
+
# )
|
| 149 |
+
# conversion_msg += (
|
| 150 |
+
# f"\n Markdown file: {conversion_result['output_file']}"
|
| 151 |
+
# )
|
| 152 |
+
# conversion_msg += f"\n Conversion time: {conversion_result['duration']:.2f} seconds"
|
| 153 |
+
# if conversion_result.get("images_extracted", 0) > 0:
|
| 154 |
+
# conversion_msg += f"\n Images extracted: {conversion_result['images_extracted']}"
|
| 155 |
+
# images_dir = os.path.join(
|
| 156 |
+
# os.path.dirname(conversion_result["output_file"]), "images"
|
| 157 |
+
# )
|
| 158 |
+
# conversion_msg += f"\n Images saved to: {images_dir}"
|
| 159 |
+
# else:
|
| 160 |
+
# conversion_msg = f"\n [WARNING] Docling conversion failed: {conversion_result['error']}"
|
| 161 |
+
# except Exception as conv_error:
|
| 162 |
+
# conversion_msg = (
|
| 163 |
+
# f"\n [WARNING] Docling conversion error: {str(conv_error)}"
|
| 164 |
+
# )
|
| 165 |
+
|
| 166 |
+
return conversion_msg if conversion_msg else None
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def format_file_operation_result(
|
| 170 |
+
operation: str,
|
| 171 |
+
source: str,
|
| 172 |
+
destination: str,
|
| 173 |
+
result: Dict[str, Any],
|
| 174 |
+
conversion_msg: Optional[str] = None,
|
| 175 |
+
) -> str:
|
| 176 |
+
"""
|
| 177 |
+
格式化文件操作结果的共用逻辑
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
operation: 操作类型 ("download" 或 "move")
|
| 181 |
+
source: 源文件/URL
|
| 182 |
+
destination: 目标路径
|
| 183 |
+
result: 操作结果字典
|
| 184 |
+
conversion_msg: 转换消息
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
格式化的结果消息
|
| 188 |
+
"""
|
| 189 |
+
if result["success"]:
|
| 190 |
+
size_mb = result["size"] / (1024 * 1024)
|
| 191 |
+
msg = f"[SUCCESS] Successfully {operation}d: {source}\n"
|
| 192 |
+
|
| 193 |
+
if operation == "download":
|
| 194 |
+
msg += f" File: {destination}\n"
|
| 195 |
+
msg += f" Size: {size_mb:.2f} MB\n"
|
| 196 |
+
msg += f" Time: {result['duration']:.2f} seconds\n"
|
| 197 |
+
speed_mb = result.get("speed", 0) / (1024 * 1024)
|
| 198 |
+
msg += f" Speed: {speed_mb:.2f} MB/s"
|
| 199 |
+
else: # move
|
| 200 |
+
msg += f" To: {destination}\n"
|
| 201 |
+
msg += f" Size: {size_mb:.2f} MB\n"
|
| 202 |
+
msg += f" Time: {result['duration']:.2f} seconds"
|
| 203 |
+
|
| 204 |
+
if conversion_msg:
|
| 205 |
+
msg += conversion_msg
|
| 206 |
+
|
| 207 |
+
return msg
|
| 208 |
+
else:
|
| 209 |
+
return f"[ERROR] Failed to {operation}: {source}\n Error: {result.get('error', 'Unknown error')}"
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class LocalPathExtractor:
|
| 213 |
+
"""本地路径提取器"""
|
| 214 |
+
|
| 215 |
+
@staticmethod
|
| 216 |
+
def is_local_path(path: str) -> bool:
|
| 217 |
+
"""判断是否为本地路径"""
|
| 218 |
+
path = path.strip("\"'")
|
| 219 |
+
|
| 220 |
+
# 检查是否为URL
|
| 221 |
+
if re.match(r"^https?://", path, re.IGNORECASE) or re.match(
|
| 222 |
+
r"^ftp://", path, re.IGNORECASE
|
| 223 |
+
):
|
| 224 |
+
return False
|
| 225 |
+
|
| 226 |
+
# 路径指示符
|
| 227 |
+
path_indicators = [os.path.sep, "/", "\\", "~", ".", ".."]
|
| 228 |
+
has_extension = bool(os.path.splitext(path)[1])
|
| 229 |
+
|
| 230 |
+
if any(indicator in path for indicator in path_indicators) or has_extension:
|
| 231 |
+
expanded_path = os.path.expanduser(path)
|
| 232 |
+
return os.path.exists(expanded_path) or any(
|
| 233 |
+
indicator in path for indicator in path_indicators
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
return False
|
| 237 |
+
|
| 238 |
+
@staticmethod
|
| 239 |
+
def extract_local_paths(text: str) -> List[str]:
|
| 240 |
+
"""从文本中提取本地文件路径"""
|
| 241 |
+
patterns = [
|
| 242 |
+
r'"([^"]+)"',
|
| 243 |
+
r"'([^']+)'",
|
| 244 |
+
r"(?:^|\s)((?:[~./\\]|[A-Za-z]:)?(?:[^/\\\s]+[/\\])*[^/\\\s]+\.[A-Za-z0-9]+)(?:\s|$)",
|
| 245 |
+
r"(?:^|\s)((?:~|\.{1,2})?/[^\s]+)(?:\s|$)",
|
| 246 |
+
r"(?:^|\s)([A-Za-z]:[/\\][^\s]+)(?:\s|$)",
|
| 247 |
+
r"(?:^|\s)(\.{1,2}[/\\][^\s]+)(?:\s|$)",
|
| 248 |
+
]
|
| 249 |
+
|
| 250 |
+
local_paths = []
|
| 251 |
+
potential_paths = []
|
| 252 |
+
|
| 253 |
+
for pattern in patterns:
|
| 254 |
+
matches = re.findall(pattern, text, re.MULTILINE)
|
| 255 |
+
potential_paths.extend(matches)
|
| 256 |
+
|
| 257 |
+
for path in potential_paths:
|
| 258 |
+
path = path.strip()
|
| 259 |
+
if path and LocalPathExtractor.is_local_path(path):
|
| 260 |
+
expanded_path = os.path.expanduser(path)
|
| 261 |
+
if expanded_path not in local_paths:
|
| 262 |
+
local_paths.append(expanded_path)
|
| 263 |
+
|
| 264 |
+
return local_paths
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
class URLExtractor:
|
| 268 |
+
"""URL提取器"""
|
| 269 |
+
|
| 270 |
+
URL_PATTERNS = [
|
| 271 |
+
r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+(?:/(?:[-\w._~!$&\'()*+,;=:@]|%[\da-fA-F]{2})*)*(?:\?(?:[-\w._~!$&\'()*+,;=:@/?]|%[\da-fA-F]{2})*)?(?:#(?:[-\w._~!$&\'()*+,;=:@/?]|%[\da-fA-F]{2})*)?",
|
| 272 |
+
r"ftp://(?:[-\w.]|(?:%[\da-fA-F]{2}))+(?:/(?:[-\w._~!$&\'()*+,;=:@]|%[\da-fA-F]{2})*)*",
|
| 273 |
+
r"(?<!\S)(?:www\.)?[-\w]+(?:\.[-\w]+)+/(?:[-\w._~!$&\'()*+,;=:@/]|%[\da-fA-F]{2})+",
|
| 274 |
+
]
|
| 275 |
+
|
| 276 |
+
@staticmethod
|
| 277 |
+
def convert_arxiv_url(url: str) -> str:
|
| 278 |
+
"""将arXiv网页链接转换为PDF下载链接"""
|
| 279 |
+
# 匹配arXiv论文ID的正则表达式
|
| 280 |
+
arxiv_pattern = r"arxiv\.org/abs/(\d+\.\d+)(?:v\d+)?"
|
| 281 |
+
match = re.search(arxiv_pattern, url, re.IGNORECASE)
|
| 282 |
+
if match:
|
| 283 |
+
paper_id = match.group(1)
|
| 284 |
+
return f"https://arxiv.org/pdf/{paper_id}.pdf"
|
| 285 |
+
return url
|
| 286 |
+
|
| 287 |
+
@classmethod
|
| 288 |
+
def extract_urls(cls, text: str) -> List[str]:
|
| 289 |
+
"""从文本中提取URL"""
|
| 290 |
+
urls = []
|
| 291 |
+
|
| 292 |
+
# 首先处理特殊情况:@开头的URL
|
| 293 |
+
at_url_pattern = r"@(https?://[^\s]+)"
|
| 294 |
+
at_matches = re.findall(at_url_pattern, text, re.IGNORECASE)
|
| 295 |
+
for match in at_matches:
|
| 296 |
+
# 处理arXiv链接
|
| 297 |
+
url = cls.convert_arxiv_url(match.rstrip("/"))
|
| 298 |
+
urls.append(url)
|
| 299 |
+
|
| 300 |
+
# 然后使用原有的正则模式
|
| 301 |
+
for pattern in cls.URL_PATTERNS:
|
| 302 |
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
| 303 |
+
for match in matches:
|
| 304 |
+
# 处理可能缺少协议的URL
|
| 305 |
+
if not match.startswith(("http://", "https://", "ftp://")):
|
| 306 |
+
# 检查是否是 www 开头
|
| 307 |
+
if match.startswith("www."):
|
| 308 |
+
match = "https://" + match
|
| 309 |
+
else:
|
| 310 |
+
# 其他情况也添加 https
|
| 311 |
+
match = "https://" + match
|
| 312 |
+
|
| 313 |
+
# 处理arXiv链接
|
| 314 |
+
url = cls.convert_arxiv_url(match.rstrip("/"))
|
| 315 |
+
urls.append(url)
|
| 316 |
+
|
| 317 |
+
# 去重并保持顺序
|
| 318 |
+
seen = set()
|
| 319 |
+
unique_urls = []
|
| 320 |
+
for url in urls:
|
| 321 |
+
if url not in seen:
|
| 322 |
+
seen.add(url)
|
| 323 |
+
unique_urls.append(url)
|
| 324 |
+
|
| 325 |
+
return unique_urls
|
| 326 |
+
|
| 327 |
+
@staticmethod
|
| 328 |
+
def infer_filename_from_url(url: str) -> str:
|
| 329 |
+
"""从URL推断文件名"""
|
| 330 |
+
parsed = urlparse(url)
|
| 331 |
+
path = unquote(parsed.path)
|
| 332 |
+
|
| 333 |
+
# 从路径中提取文件名
|
| 334 |
+
filename = os.path.basename(path)
|
| 335 |
+
|
| 336 |
+
# 特殊处理:arxiv PDF链接
|
| 337 |
+
if "arxiv.org" in parsed.netloc and "/pdf/" in path:
|
| 338 |
+
if filename:
|
| 339 |
+
# 检查是否已经有合适的文件扩展名
|
| 340 |
+
if not filename.lower().endswith((".pdf", ".doc", ".docx", ".txt")):
|
| 341 |
+
filename = f"{filename}.pdf"
|
| 342 |
+
else:
|
| 343 |
+
path_parts = [p for p in path.split("/") if p]
|
| 344 |
+
if path_parts and path_parts[-1]:
|
| 345 |
+
filename = f"{path_parts[-1]}.pdf"
|
| 346 |
+
else:
|
| 347 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 348 |
+
filename = f"arxiv_paper_{timestamp}.pdf"
|
| 349 |
+
|
| 350 |
+
# 如果没有文件名或没有扩展名,生成一个
|
| 351 |
+
elif not filename or "." not in filename:
|
| 352 |
+
# 尝试从URL生成有意义的文件名
|
| 353 |
+
domain = parsed.netloc.replace("www.", "").replace(".", "_")
|
| 354 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 355 |
+
|
| 356 |
+
# 尝试根据路径推断文件类型
|
| 357 |
+
if not path or path == "/":
|
| 358 |
+
filename = f"{domain}_{timestamp}.html"
|
| 359 |
+
else:
|
| 360 |
+
# 使用路径的最后一部分
|
| 361 |
+
path_parts = [p for p in path.split("/") if p]
|
| 362 |
+
if path_parts:
|
| 363 |
+
filename = f"{path_parts[-1]}_{timestamp}"
|
| 364 |
+
else:
|
| 365 |
+
filename = f"{domain}_{timestamp}"
|
| 366 |
+
|
| 367 |
+
# 如果还是没有扩展名,根据路径推断
|
| 368 |
+
if "." not in filename:
|
| 369 |
+
# 根据路径中的关键词推断文件类型
|
| 370 |
+
if "/pdf/" in path.lower() or path.lower().endswith("pdf"):
|
| 371 |
+
filename += ".pdf"
|
| 372 |
+
elif any(
|
| 373 |
+
ext in path.lower() for ext in ["/doc/", "/word/", ".docx"]
|
| 374 |
+
):
|
| 375 |
+
filename += ".docx"
|
| 376 |
+
elif any(
|
| 377 |
+
ext in path.lower()
|
| 378 |
+
for ext in ["/ppt/", "/powerpoint/", ".pptx"]
|
| 379 |
+
):
|
| 380 |
+
filename += ".pptx"
|
| 381 |
+
elif any(ext in path.lower() for ext in ["/csv/", ".csv"]):
|
| 382 |
+
filename += ".csv"
|
| 383 |
+
elif any(ext in path.lower() for ext in ["/zip/", ".zip"]):
|
| 384 |
+
filename += ".zip"
|
| 385 |
+
else:
|
| 386 |
+
filename += ".html"
|
| 387 |
+
|
| 388 |
+
return filename
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
class PathExtractor:
|
| 392 |
+
"""路径提取器"""
|
| 393 |
+
|
| 394 |
+
@staticmethod
|
| 395 |
+
def extract_target_path(text: str) -> Optional[str]:
|
| 396 |
+
"""从文本中提取目标路径"""
|
| 397 |
+
patterns = [
|
| 398 |
+
r'(?:save|download|store|put|place|write|copy|move)\s+(?:to|into|in|at)\s+["\']?([^\s"\']+)["\']?',
|
| 399 |
+
r'(?:to|into|in|at)\s+(?:folder|directory|dir|path|location)\s*["\']?([^\s"\']+)["\']?',
|
| 400 |
+
r'(?:destination|target|output)\s*(?:is|:)?\s*["\']?([^\s"\']+)["\']?',
|
| 401 |
+
r'(?:保存|下载|存储|放到|写入|复制|移动)(?:到|至|去)\s*["\']?([^\s"\']+)["\']?',
|
| 402 |
+
r'(?:到|在|至)\s*["\']?([^\s"\']+)["\']?\s*(?:文件夹|目录|路径|位置)',
|
| 403 |
+
]
|
| 404 |
+
|
| 405 |
+
filter_words = {
|
| 406 |
+
"here",
|
| 407 |
+
"there",
|
| 408 |
+
"current",
|
| 409 |
+
"local",
|
| 410 |
+
"this",
|
| 411 |
+
"that",
|
| 412 |
+
"这里",
|
| 413 |
+
"那里",
|
| 414 |
+
"当前",
|
| 415 |
+
"本地",
|
| 416 |
+
"这个",
|
| 417 |
+
"那个",
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
for pattern in patterns:
|
| 421 |
+
match = re.search(pattern, text, re.IGNORECASE)
|
| 422 |
+
if match:
|
| 423 |
+
path = match.group(1).strip("。,,.、")
|
| 424 |
+
if path and path.lower() not in filter_words:
|
| 425 |
+
return path
|
| 426 |
+
|
| 427 |
+
return None
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
class SimplePdfConverter:
|
| 431 |
+
"""简单的PDF转换器,使用PyPDF2提取文本"""
|
| 432 |
+
|
| 433 |
+
def convert_pdf_to_markdown(
|
| 434 |
+
self, input_file: str, output_file: Optional[str] = None
|
| 435 |
+
) -> Dict[str, Any]:
|
| 436 |
+
"""
|
| 437 |
+
使用PyPDF2将PDF转换为Markdown格式
|
| 438 |
+
|
| 439 |
+
Args:
|
| 440 |
+
input_file: 输入PDF文件路径
|
| 441 |
+
output_file: 输出Markdown文件路径(可选)
|
| 442 |
+
|
| 443 |
+
Returns:
|
| 444 |
+
转换结果字典
|
| 445 |
+
"""
|
| 446 |
+
if not PYPDF2_AVAILABLE:
|
| 447 |
+
return {"success": False, "error": "PyPDF2 package is not available"}
|
| 448 |
+
|
| 449 |
+
try:
|
| 450 |
+
# 检查输入文件是否存在
|
| 451 |
+
if not os.path.exists(input_file):
|
| 452 |
+
return {
|
| 453 |
+
"success": False,
|
| 454 |
+
"error": f"Input file not found: {input_file}",
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
# 如果没有指定输出文件,自动生成
|
| 458 |
+
if not output_file:
|
| 459 |
+
base_name = os.path.splitext(input_file)[0]
|
| 460 |
+
output_file = f"{base_name}.md"
|
| 461 |
+
|
| 462 |
+
# 确保输出目录存在
|
| 463 |
+
output_dir = os.path.dirname(output_file)
|
| 464 |
+
if output_dir:
|
| 465 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 466 |
+
|
| 467 |
+
# 执行转换
|
| 468 |
+
start_time = datetime.now()
|
| 469 |
+
|
| 470 |
+
# 读取PDF文件
|
| 471 |
+
with open(input_file, "rb") as file:
|
| 472 |
+
pdf_reader = PyPDF2.PdfReader(file)
|
| 473 |
+
text_content = []
|
| 474 |
+
|
| 475 |
+
# 提取每页文本
|
| 476 |
+
for page_num, page in enumerate(pdf_reader.pages, 1):
|
| 477 |
+
text = page.extract_text()
|
| 478 |
+
if text.strip():
|
| 479 |
+
text_content.append(f"## Page {page_num}\n\n{text.strip()}\n\n")
|
| 480 |
+
|
| 481 |
+
# 生成Markdown内容
|
| 482 |
+
markdown_content = f"# Extracted from {os.path.basename(input_file)}\n\n"
|
| 483 |
+
markdown_content += f"*Total pages: {len(pdf_reader.pages)}*\n\n"
|
| 484 |
+
markdown_content += "---\n\n"
|
| 485 |
+
markdown_content += "".join(text_content)
|
| 486 |
+
|
| 487 |
+
# 保存到文件
|
| 488 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 489 |
+
f.write(markdown_content)
|
| 490 |
+
|
| 491 |
+
# 计算转换时间
|
| 492 |
+
duration = (datetime.now() - start_time).total_seconds()
|
| 493 |
+
|
| 494 |
+
# 获取文件大小
|
| 495 |
+
input_size = os.path.getsize(input_file)
|
| 496 |
+
output_size = os.path.getsize(output_file)
|
| 497 |
+
|
| 498 |
+
return {
|
| 499 |
+
"success": True,
|
| 500 |
+
"input_file": input_file,
|
| 501 |
+
"output_file": output_file,
|
| 502 |
+
"input_size": input_size,
|
| 503 |
+
"output_size": output_size,
|
| 504 |
+
"duration": duration,
|
| 505 |
+
"markdown_content": markdown_content,
|
| 506 |
+
"pages_extracted": len(pdf_reader.pages),
|
| 507 |
+
}
|
| 508 |
+
|
| 509 |
+
except Exception as e:
|
| 510 |
+
return {
|
| 511 |
+
"success": False,
|
| 512 |
+
"input_file": input_file,
|
| 513 |
+
"error": f"Conversion failed: {str(e)}",
|
| 514 |
+
}
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class DoclingConverter:
|
| 518 |
+
"""文档转换器,使用docling将文档转换为Markdown格式,支持图片提取"""
|
| 519 |
+
|
| 520 |
+
def __init__(self):
|
| 521 |
+
if not DOCLING_AVAILABLE:
|
| 522 |
+
raise ImportError(
|
| 523 |
+
"docling package is not available. Please install it first."
|
| 524 |
+
)
|
| 525 |
+
|
| 526 |
+
# 配置PDF处理选项
|
| 527 |
+
pdf_pipeline_options = PdfPipelineOptions()
|
| 528 |
+
pdf_pipeline_options.do_ocr = False # 暂时禁用OCR以避免认证问题
|
| 529 |
+
pdf_pipeline_options.do_table_structure = False # 暂时禁用表格结构识别
|
| 530 |
+
|
| 531 |
+
# 创建文档转换器(使用基础模式)
|
| 532 |
+
try:
|
| 533 |
+
self.converter = DocumentConverter(
|
| 534 |
+
format_options={
|
| 535 |
+
InputFormat.PDF: PdfFormatOption(
|
| 536 |
+
pipeline_options=pdf_pipeline_options
|
| 537 |
+
)
|
| 538 |
+
}
|
| 539 |
+
)
|
| 540 |
+
except Exception:
|
| 541 |
+
# 如果失败,尝试更简单的配置
|
| 542 |
+
self.converter = DocumentConverter()
|
| 543 |
+
|
| 544 |
+
def is_supported_format(self, file_path: str) -> bool:
|
| 545 |
+
"""检查文件格式是否支持转换"""
|
| 546 |
+
if not DOCLING_AVAILABLE:
|
| 547 |
+
return False
|
| 548 |
+
|
| 549 |
+
supported_extensions = {".pdf", ".docx", ".pptx", ".html", ".md", ".txt"}
|
| 550 |
+
file_extension = os.path.splitext(file_path)[1].lower()
|
| 551 |
+
return file_extension in supported_extensions
|
| 552 |
+
|
| 553 |
+
def is_url(self, path: str) -> bool:
|
| 554 |
+
"""检查路径是否为URL"""
|
| 555 |
+
try:
|
| 556 |
+
result = urlparse(path)
|
| 557 |
+
return result.scheme in ("http", "https")
|
| 558 |
+
except Exception:
|
| 559 |
+
return False
|
| 560 |
+
|
| 561 |
+
def extract_images(self, doc, output_dir: str) -> Dict[str, str]:
|
| 562 |
+
"""
|
| 563 |
+
提取文档中的图片并保存到本地
|
| 564 |
+
|
| 565 |
+
Args:
|
| 566 |
+
doc: docling文档对象
|
| 567 |
+
output_dir: 输出目录
|
| 568 |
+
|
| 569 |
+
Returns:
|
| 570 |
+
图片ID到本地文件路径的映射
|
| 571 |
+
"""
|
| 572 |
+
images_dir = os.path.join(output_dir, "images")
|
| 573 |
+
os.makedirs(images_dir, exist_ok=True)
|
| 574 |
+
image_map = {} # docling图片id -> 本地文件名
|
| 575 |
+
|
| 576 |
+
try:
|
| 577 |
+
# 获取文档中的图片
|
| 578 |
+
images = getattr(doc, "images", [])
|
| 579 |
+
|
| 580 |
+
for idx, img in enumerate(images):
|
| 581 |
+
try:
|
| 582 |
+
# 获取图片格式,默认为png
|
| 583 |
+
ext = getattr(img, "format", None) or "png"
|
| 584 |
+
if ext.lower() not in ["png", "jpg", "jpeg", "gif", "bmp", "webp"]:
|
| 585 |
+
ext = "png"
|
| 586 |
+
|
| 587 |
+
# 生成文件名
|
| 588 |
+
filename = f"image_{idx+1}.{ext}"
|
| 589 |
+
filepath = os.path.join(images_dir, filename)
|
| 590 |
+
|
| 591 |
+
# 保存图片数据
|
| 592 |
+
img_data = getattr(img, "data", None)
|
| 593 |
+
if img_data:
|
| 594 |
+
with open(filepath, "wb") as f:
|
| 595 |
+
f.write(img_data)
|
| 596 |
+
|
| 597 |
+
# 计算相对路径
|
| 598 |
+
rel_path = os.path.relpath(filepath, output_dir)
|
| 599 |
+
img_id = getattr(img, "id", str(idx + 1))
|
| 600 |
+
image_map[img_id] = rel_path
|
| 601 |
+
|
| 602 |
+
except Exception as img_error:
|
| 603 |
+
print(f"Warning: Failed to extract image {idx+1}: {img_error}")
|
| 604 |
+
continue
|
| 605 |
+
|
| 606 |
+
except Exception as e:
|
| 607 |
+
print(f"Warning: Failed to extract images: {e}")
|
| 608 |
+
|
| 609 |
+
return image_map
|
| 610 |
+
|
| 611 |
+
def process_markdown_with_images(
|
| 612 |
+
self, markdown_content: str, image_map: Dict[str, str]
|
| 613 |
+
) -> str:
|
| 614 |
+
"""
|
| 615 |
+
处理Markdown内容,替换图片占位符为实际的图片路径
|
| 616 |
+
|
| 617 |
+
Args:
|
| 618 |
+
markdown_content: 原始Markdown内容
|
| 619 |
+
image_map: 图片ID到本地路径的映射
|
| 620 |
+
|
| 621 |
+
Returns:
|
| 622 |
+
处理后的Markdown内容
|
| 623 |
+
"""
|
| 624 |
+
|
| 625 |
+
def replace_img(match):
|
| 626 |
+
img_id = match.group(1)
|
| 627 |
+
if img_id in image_map:
|
| 628 |
+
return f""
|
| 629 |
+
else:
|
| 630 |
+
return match.group(0)
|
| 631 |
+
|
| 632 |
+
# 替换docling的图片占位符
|
| 633 |
+
processed_content = re.sub(
|
| 634 |
+
r"!\[Image\]\(docling://image/([^)]+)\)", replace_img, markdown_content
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
return processed_content
|
| 638 |
+
|
| 639 |
+
def convert_to_markdown(
|
| 640 |
+
self,
|
| 641 |
+
input_file: str,
|
| 642 |
+
output_file: Optional[str] = None,
|
| 643 |
+
extract_images: bool = True,
|
| 644 |
+
) -> Dict[str, Any]:
|
| 645 |
+
"""
|
| 646 |
+
将文档转换为Markdown格式,支持图片提取
|
| 647 |
+
|
| 648 |
+
Args:
|
| 649 |
+
input_file: 输入文件路径或URL
|
| 650 |
+
output_file: 输出Markdown文件路径(可选)
|
| 651 |
+
extract_images: 是否提取图片(默认True)
|
| 652 |
+
|
| 653 |
+
Returns:
|
| 654 |
+
转换结果字典
|
| 655 |
+
"""
|
| 656 |
+
if not DOCLING_AVAILABLE:
|
| 657 |
+
return {"success": False, "error": "docling package is not available"}
|
| 658 |
+
|
| 659 |
+
try:
|
| 660 |
+
# 检查输入文件(如果不是URL)
|
| 661 |
+
if not self.is_url(input_file):
|
| 662 |
+
if not os.path.exists(input_file):
|
| 663 |
+
return {
|
| 664 |
+
"success": False,
|
| 665 |
+
"error": f"Input file not found: {input_file}",
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
# 检查文件格式是否支持
|
| 669 |
+
if not self.is_supported_format(input_file):
|
| 670 |
+
return {
|
| 671 |
+
"success": False,
|
| 672 |
+
"error": f"Unsupported file format: {os.path.splitext(input_file)[1]}",
|
| 673 |
+
}
|
| 674 |
+
else:
|
| 675 |
+
# 对于URL,检查是否为支持的格式
|
| 676 |
+
if not input_file.lower().endswith(
|
| 677 |
+
(".pdf", ".docx", ".pptx", ".html", ".md", ".txt")
|
| 678 |
+
):
|
| 679 |
+
return {
|
| 680 |
+
"success": False,
|
| 681 |
+
"error": f"Unsupported URL format: {input_file}",
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
# 如果没有指定输出文件,自动生成
|
| 685 |
+
if not output_file:
|
| 686 |
+
if self.is_url(input_file):
|
| 687 |
+
# 从URL生成文件名
|
| 688 |
+
filename = URLExtractor.infer_filename_from_url(input_file)
|
| 689 |
+
base_name = os.path.splitext(filename)[0]
|
| 690 |
+
else:
|
| 691 |
+
base_name = os.path.splitext(input_file)[0]
|
| 692 |
+
output_file = f"{base_name}.md"
|
| 693 |
+
|
| 694 |
+
# 确保输出目录存在
|
| 695 |
+
output_dir = os.path.dirname(output_file) or "."
|
| 696 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 697 |
+
|
| 698 |
+
# 执行转换
|
| 699 |
+
start_time = datetime.now()
|
| 700 |
+
result = self.converter.convert(input_file)
|
| 701 |
+
doc = result.document
|
| 702 |
+
|
| 703 |
+
# 提取图片(如果启用)
|
| 704 |
+
image_map = {}
|
| 705 |
+
images_extracted = 0
|
| 706 |
+
if extract_images:
|
| 707 |
+
image_map = self.extract_images(doc, output_dir)
|
| 708 |
+
images_extracted = len(image_map)
|
| 709 |
+
|
| 710 |
+
# 获取Markdown内容
|
| 711 |
+
markdown_content = doc.export_to_markdown()
|
| 712 |
+
|
| 713 |
+
# 处理图片占位符
|
| 714 |
+
if extract_images and image_map:
|
| 715 |
+
markdown_content = self.process_markdown_with_images(
|
| 716 |
+
markdown_content, image_map
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
# 保存到文件
|
| 720 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
| 721 |
+
f.write(markdown_content)
|
| 722 |
+
|
| 723 |
+
# 计算转换时间
|
| 724 |
+
duration = (datetime.now() - start_time).total_seconds()
|
| 725 |
+
|
| 726 |
+
# 获取文件大小
|
| 727 |
+
if self.is_url(input_file):
|
| 728 |
+
input_size = 0 # URL无法直接获取大小
|
| 729 |
+
else:
|
| 730 |
+
input_size = os.path.getsize(input_file)
|
| 731 |
+
output_size = os.path.getsize(output_file)
|
| 732 |
+
|
| 733 |
+
return {
|
| 734 |
+
"success": True,
|
| 735 |
+
"input_file": input_file,
|
| 736 |
+
"output_file": output_file,
|
| 737 |
+
"input_size": input_size,
|
| 738 |
+
"output_size": output_size,
|
| 739 |
+
"duration": duration,
|
| 740 |
+
"markdown_content": markdown_content,
|
| 741 |
+
"images_extracted": images_extracted,
|
| 742 |
+
"image_map": image_map,
|
| 743 |
+
}
|
| 744 |
+
|
| 745 |
+
except Exception as e:
|
| 746 |
+
return {
|
| 747 |
+
"success": False,
|
| 748 |
+
"input_file": input_file,
|
| 749 |
+
"error": f"Conversion failed: {str(e)}",
|
| 750 |
+
}
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
async def check_url_accessible(url: str) -> Dict[str, Any]:
|
| 754 |
+
"""检查URL是否可访问"""
|
| 755 |
+
try:
|
| 756 |
+
timeout = aiohttp.ClientTimeout(total=10)
|
| 757 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 758 |
+
async with session.head(url, allow_redirects=True) as response:
|
| 759 |
+
return {
|
| 760 |
+
"accessible": response.status < 400,
|
| 761 |
+
"status": response.status,
|
| 762 |
+
"content_type": response.headers.get("Content-Type", ""),
|
| 763 |
+
"content_length": response.headers.get("Content-Length", 0),
|
| 764 |
+
}
|
| 765 |
+
except Exception:
|
| 766 |
+
return {
|
| 767 |
+
"accessible": False,
|
| 768 |
+
"status": 0,
|
| 769 |
+
"content_type": "",
|
| 770 |
+
"content_length": 0,
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
async def download_file(url: str, destination: str) -> Dict[str, Any]:
|
| 775 |
+
"""下载单个文件"""
|
| 776 |
+
start_time = datetime.now()
|
| 777 |
+
chunk_size = 8192
|
| 778 |
+
|
| 779 |
+
try:
|
| 780 |
+
timeout = aiohttp.ClientTimeout(total=300) # 5分钟超时
|
| 781 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 782 |
+
async with session.get(url) as response:
|
| 783 |
+
# 检查响应状态
|
| 784 |
+
response.raise_for_status()
|
| 785 |
+
|
| 786 |
+
# 获取文件信息
|
| 787 |
+
content_type = response.headers.get(
|
| 788 |
+
"Content-Type", "application/octet-stream"
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
# 确保目标目录存在
|
| 792 |
+
parent_dir = os.path.dirname(destination)
|
| 793 |
+
if parent_dir:
|
| 794 |
+
os.makedirs(parent_dir, exist_ok=True)
|
| 795 |
+
|
| 796 |
+
# 下载文件
|
| 797 |
+
downloaded = 0
|
| 798 |
+
async with aiofiles.open(destination, "wb") as file:
|
| 799 |
+
async for chunk in response.content.iter_chunked(chunk_size):
|
| 800 |
+
await file.write(chunk)
|
| 801 |
+
downloaded += len(chunk)
|
| 802 |
+
|
| 803 |
+
# 计算下载时间
|
| 804 |
+
duration = (datetime.now() - start_time).total_seconds()
|
| 805 |
+
|
| 806 |
+
return {
|
| 807 |
+
"success": True,
|
| 808 |
+
"url": url,
|
| 809 |
+
"destination": destination,
|
| 810 |
+
"size": downloaded,
|
| 811 |
+
"content_type": content_type,
|
| 812 |
+
"duration": duration,
|
| 813 |
+
"speed": downloaded / duration if duration > 0 else 0,
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
except aiohttp.ClientError as e:
|
| 817 |
+
return {
|
| 818 |
+
"success": False,
|
| 819 |
+
"url": url,
|
| 820 |
+
"destination": destination,
|
| 821 |
+
"error": f"Network error: {str(e)}",
|
| 822 |
+
}
|
| 823 |
+
except Exception as e:
|
| 824 |
+
return {
|
| 825 |
+
"success": False,
|
| 826 |
+
"url": url,
|
| 827 |
+
"destination": destination,
|
| 828 |
+
"error": f"Download error: {str(e)}",
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
async def move_local_file(source_path: str, destination: str) -> Dict[str, Any]:
|
| 833 |
+
"""移动本地文件到目标位置"""
|
| 834 |
+
start_time = datetime.now()
|
| 835 |
+
|
| 836 |
+
try:
|
| 837 |
+
# 检查源文件是否存在
|
| 838 |
+
if not os.path.exists(source_path):
|
| 839 |
+
return {
|
| 840 |
+
"success": False,
|
| 841 |
+
"source": source_path,
|
| 842 |
+
"destination": destination,
|
| 843 |
+
"error": f"Source file not found: {source_path}",
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
# 获取源文件信息
|
| 847 |
+
source_size = os.path.getsize(source_path)
|
| 848 |
+
|
| 849 |
+
# 确保目标目录存在
|
| 850 |
+
parent_dir = os.path.dirname(destination)
|
| 851 |
+
if parent_dir:
|
| 852 |
+
os.makedirs(parent_dir, exist_ok=True)
|
| 853 |
+
|
| 854 |
+
# 执行移动操作
|
| 855 |
+
shutil.move(source_path, destination)
|
| 856 |
+
|
| 857 |
+
# 计算操作时间
|
| 858 |
+
duration = (datetime.now() - start_time).total_seconds()
|
| 859 |
+
|
| 860 |
+
return {
|
| 861 |
+
"success": True,
|
| 862 |
+
"source": source_path,
|
| 863 |
+
"destination": destination,
|
| 864 |
+
"size": source_size,
|
| 865 |
+
"duration": duration,
|
| 866 |
+
"operation": "move",
|
| 867 |
+
}
|
| 868 |
+
|
| 869 |
+
except Exception as e:
|
| 870 |
+
return {
|
| 871 |
+
"success": False,
|
| 872 |
+
"source": source_path,
|
| 873 |
+
"destination": destination,
|
| 874 |
+
"error": f"Move error: {str(e)}",
|
| 875 |
+
}
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
@mcp.tool()
|
| 879 |
+
async def download_files(instruction: str) -> str:
|
| 880 |
+
"""
|
| 881 |
+
Download files from URLs or move local files mentioned in natural language instructions.
|
| 882 |
+
|
| 883 |
+
Args:
|
| 884 |
+
instruction: Natural language instruction containing URLs/local paths and optional destination paths
|
| 885 |
+
|
| 886 |
+
Returns:
|
| 887 |
+
Status message about the download/move operations
|
| 888 |
+
|
| 889 |
+
Examples:
|
| 890 |
+
- "Download https://example.com/file.pdf to documents folder"
|
| 891 |
+
- "Move /home/user/file.pdf to documents folder"
|
| 892 |
+
- "Please get https://raw.githubusercontent.com/user/repo/main/data.csv and save it to ~/downloads"
|
| 893 |
+
- "移动 ~/Desktop/report.docx 到 /tmp/documents/"
|
| 894 |
+
- "Download www.example.com/report.xlsx"
|
| 895 |
+
"""
|
| 896 |
+
urls = URLExtractor.extract_urls(instruction)
|
| 897 |
+
local_paths = LocalPathExtractor.extract_local_paths(instruction)
|
| 898 |
+
|
| 899 |
+
if not urls and not local_paths:
|
| 900 |
+
return format_error_message(
|
| 901 |
+
"Failed to parse instruction",
|
| 902 |
+
"No downloadable URLs or movable local files found",
|
| 903 |
+
)
|
| 904 |
+
|
| 905 |
+
target_path = PathExtractor.extract_target_path(instruction)
|
| 906 |
+
|
| 907 |
+
# 处理文件
|
| 908 |
+
results = []
|
| 909 |
+
|
| 910 |
+
# 处理URL下载
|
| 911 |
+
for url in urls:
|
| 912 |
+
try:
|
| 913 |
+
# 推断文件名
|
| 914 |
+
filename = URLExtractor.infer_filename_from_url(url)
|
| 915 |
+
|
| 916 |
+
# 构建完整的目标路径
|
| 917 |
+
if target_path:
|
| 918 |
+
# 处理路径
|
| 919 |
+
if target_path.startswith("~"):
|
| 920 |
+
target_path = os.path.expanduser(target_path)
|
| 921 |
+
|
| 922 |
+
# 确保使用相对路径(如果不是绝对路径)
|
| 923 |
+
if not os.path.isabs(target_path):
|
| 924 |
+
target_path = os.path.normpath(target_path)
|
| 925 |
+
|
| 926 |
+
# 判断是文件路径还是目录路径
|
| 927 |
+
if os.path.splitext(target_path)[1]: # 有扩展名,是文件
|
| 928 |
+
destination = target_path
|
| 929 |
+
else: # 是目录
|
| 930 |
+
destination = os.path.join(target_path, filename)
|
| 931 |
+
else:
|
| 932 |
+
# 默认下载到当前目录
|
| 933 |
+
destination = filename
|
| 934 |
+
|
| 935 |
+
# 检查文件是否已存在
|
| 936 |
+
if os.path.exists(destination):
|
| 937 |
+
results.append(
|
| 938 |
+
f"[WARNING] Skipped {url}: File already exists at {destination}"
|
| 939 |
+
)
|
| 940 |
+
continue
|
| 941 |
+
|
| 942 |
+
# 先检查URL是否可访问
|
| 943 |
+
check_result = await check_url_accessible(url)
|
| 944 |
+
if not check_result["accessible"]:
|
| 945 |
+
results.append(
|
| 946 |
+
f"[ERROR] Failed to access {url}: HTTP {check_result['status'] or 'Connection failed'}"
|
| 947 |
+
)
|
| 948 |
+
continue
|
| 949 |
+
|
| 950 |
+
# 执行下载
|
| 951 |
+
result = await download_file(url, destination)
|
| 952 |
+
|
| 953 |
+
# 执行转换(如果成功下载)
|
| 954 |
+
conversion_msg = None
|
| 955 |
+
if result["success"]:
|
| 956 |
+
conversion_msg = await perform_document_conversion(
|
| 957 |
+
destination, extract_images=True
|
| 958 |
+
)
|
| 959 |
+
|
| 960 |
+
# 格式化结果
|
| 961 |
+
msg = format_file_operation_result(
|
| 962 |
+
"download", url, destination, result, conversion_msg
|
| 963 |
+
)
|
| 964 |
+
|
| 965 |
+
except Exception as e:
|
| 966 |
+
msg = f"[ERROR] Failed to download: {url}\n"
|
| 967 |
+
msg += f" Error: {str(e)}"
|
| 968 |
+
|
| 969 |
+
results.append(msg)
|
| 970 |
+
|
| 971 |
+
# 处理本地文件移动
|
| 972 |
+
for local_path in local_paths:
|
| 973 |
+
try:
|
| 974 |
+
# 获取文件名
|
| 975 |
+
filename = os.path.basename(local_path)
|
| 976 |
+
|
| 977 |
+
# 构建完整的目标路径
|
| 978 |
+
if target_path:
|
| 979 |
+
# 处理路径
|
| 980 |
+
if target_path.startswith("~"):
|
| 981 |
+
target_path = os.path.expanduser(target_path)
|
| 982 |
+
|
| 983 |
+
# 确保使用相对路径(如果不是绝对路径)
|
| 984 |
+
if not os.path.isabs(target_path):
|
| 985 |
+
target_path = os.path.normpath(target_path)
|
| 986 |
+
|
| 987 |
+
# 判断是文件路径还是目录路径
|
| 988 |
+
if os.path.splitext(target_path)[1]: # 有扩展名,是文件
|
| 989 |
+
destination = target_path
|
| 990 |
+
else: # 是目录
|
| 991 |
+
destination = os.path.join(target_path, filename)
|
| 992 |
+
else:
|
| 993 |
+
# 默认移动到当前目录
|
| 994 |
+
destination = filename
|
| 995 |
+
|
| 996 |
+
# 检查目标文件是否已存在
|
| 997 |
+
if os.path.exists(destination):
|
| 998 |
+
results.append(
|
| 999 |
+
f"[WARNING] Skipped {local_path}: File already exists at {destination}"
|
| 1000 |
+
)
|
| 1001 |
+
continue
|
| 1002 |
+
|
| 1003 |
+
# 执行移动
|
| 1004 |
+
result = await move_local_file(local_path, destination)
|
| 1005 |
+
|
| 1006 |
+
# 执行转换(如果成功移动)
|
| 1007 |
+
conversion_msg = None
|
| 1008 |
+
if result["success"]:
|
| 1009 |
+
conversion_msg = await perform_document_conversion(
|
| 1010 |
+
destination, extract_images=True
|
| 1011 |
+
)
|
| 1012 |
+
|
| 1013 |
+
# 格式化结果
|
| 1014 |
+
msg = format_file_operation_result(
|
| 1015 |
+
"move", local_path, destination, result, conversion_msg
|
| 1016 |
+
)
|
| 1017 |
+
|
| 1018 |
+
except Exception as e:
|
| 1019 |
+
msg = f"[ERROR] Failed to move: {local_path}\n"
|
| 1020 |
+
msg += f" Error: {str(e)}"
|
| 1021 |
+
|
| 1022 |
+
results.append(msg)
|
| 1023 |
+
|
| 1024 |
+
return "\n\n".join(results)
|
| 1025 |
+
|
| 1026 |
+
|
| 1027 |
+
@mcp.tool()
|
| 1028 |
+
async def parse_download_urls(text: str) -> str:
|
| 1029 |
+
"""
|
| 1030 |
+
Extract URLs, local paths and target paths from text without downloading or moving.
|
| 1031 |
+
|
| 1032 |
+
Args:
|
| 1033 |
+
text: Text containing URLs, local paths and optional destination paths
|
| 1034 |
+
|
| 1035 |
+
Returns:
|
| 1036 |
+
Parsed URLs, local paths and target path information
|
| 1037 |
+
"""
|
| 1038 |
+
urls = URLExtractor.extract_urls(text)
|
| 1039 |
+
local_paths = LocalPathExtractor.extract_local_paths(text)
|
| 1040 |
+
target_path = PathExtractor.extract_target_path(text)
|
| 1041 |
+
|
| 1042 |
+
content = "📋 Parsed file operation information:\n\n"
|
| 1043 |
+
|
| 1044 |
+
if urls:
|
| 1045 |
+
content += f"🔗 URLs found ({len(urls)}):\n"
|
| 1046 |
+
for i, url in enumerate(urls, 1):
|
| 1047 |
+
filename = URLExtractor.infer_filename_from_url(url)
|
| 1048 |
+
content += f" {i}. {url}\n 📄 Filename: {filename}\n"
|
| 1049 |
+
else:
|
| 1050 |
+
content += "🔗 No URLs found\n"
|
| 1051 |
+
|
| 1052 |
+
if local_paths:
|
| 1053 |
+
content += f"\n📁 Local files found ({len(local_paths)}):\n"
|
| 1054 |
+
for i, path in enumerate(local_paths, 1):
|
| 1055 |
+
exists = os.path.exists(path)
|
| 1056 |
+
content += f" {i}. {path}\n"
|
| 1057 |
+
content += f" ✅ Exists: {'Yes' if exists else 'No'}\n"
|
| 1058 |
+
if exists:
|
| 1059 |
+
size_mb = os.path.getsize(path) / (1024 * 1024)
|
| 1060 |
+
content += f" 📊 Size: {size_mb:.2f} MB\n"
|
| 1061 |
+
else:
|
| 1062 |
+
content += "\n📁 No local files found\n"
|
| 1063 |
+
|
| 1064 |
+
if target_path:
|
| 1065 |
+
content += f"\n🎯 Target path: {target_path}"
|
| 1066 |
+
if target_path.startswith("~"):
|
| 1067 |
+
content += f"\n (Expanded: {os.path.expanduser(target_path)})"
|
| 1068 |
+
else:
|
| 1069 |
+
content += "\n🎯 Target path: Not specified (will use current directory)"
|
| 1070 |
+
|
| 1071 |
+
return content
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
@mcp.tool()
|
| 1075 |
+
async def download_file_to(
|
| 1076 |
+
url: str, destination: Optional[str] = None, filename: Optional[str] = None
|
| 1077 |
+
) -> str:
|
| 1078 |
+
"""
|
| 1079 |
+
Download a specific file with detailed options.
|
| 1080 |
+
|
| 1081 |
+
Args:
|
| 1082 |
+
url: URL to download from
|
| 1083 |
+
destination: Target directory or full file path (optional)
|
| 1084 |
+
filename: Specific filename to use (optional, ignored if destination is a full file path)
|
| 1085 |
+
|
| 1086 |
+
Returns:
|
| 1087 |
+
Status message about the download operation
|
| 1088 |
+
"""
|
| 1089 |
+
# 确定文件名
|
| 1090 |
+
if not filename:
|
| 1091 |
+
filename = URLExtractor.infer_filename_from_url(url)
|
| 1092 |
+
|
| 1093 |
+
# 确定完整路径
|
| 1094 |
+
if destination:
|
| 1095 |
+
# 展开用户目录
|
| 1096 |
+
if destination.startswith("~"):
|
| 1097 |
+
destination = os.path.expanduser(destination)
|
| 1098 |
+
|
| 1099 |
+
# 检查是否是完整文件路径
|
| 1100 |
+
if os.path.splitext(destination)[1]: # 有扩展名
|
| 1101 |
+
target_path = destination
|
| 1102 |
+
else: # 是目录
|
| 1103 |
+
target_path = os.path.join(destination, filename)
|
| 1104 |
+
else:
|
| 1105 |
+
target_path = filename
|
| 1106 |
+
|
| 1107 |
+
# 确保使用相对路径(如果不是绝对路径)
|
| 1108 |
+
if not os.path.isabs(target_path):
|
| 1109 |
+
target_path = os.path.normpath(target_path)
|
| 1110 |
+
|
| 1111 |
+
# 检查文件是否已存在
|
| 1112 |
+
if os.path.exists(target_path):
|
| 1113 |
+
return format_error_message(
|
| 1114 |
+
"Download aborted", f"File already exists at {target_path}"
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
# 先检查URL
|
| 1118 |
+
check_result = await check_url_accessible(url)
|
| 1119 |
+
if not check_result["accessible"]:
|
| 1120 |
+
return format_error_message(
|
| 1121 |
+
"Cannot access URL",
|
| 1122 |
+
f"{url} (HTTP {check_result['status'] or 'Connection failed'})",
|
| 1123 |
+
)
|
| 1124 |
+
|
| 1125 |
+
# 显示下载信息
|
| 1126 |
+
size_mb = (
|
| 1127 |
+
int(check_result["content_length"]) / (1024 * 1024)
|
| 1128 |
+
if check_result["content_length"]
|
| 1129 |
+
else 0
|
| 1130 |
+
)
|
| 1131 |
+
msg = "[INFO] Downloading file:\n"
|
| 1132 |
+
msg += f" URL: {url}\n"
|
| 1133 |
+
msg += f" Target: {target_path}\n"
|
| 1134 |
+
if size_mb > 0:
|
| 1135 |
+
msg += f" Expected size: {size_mb:.2f} MB\n"
|
| 1136 |
+
msg += "\n"
|
| 1137 |
+
|
| 1138 |
+
# 执行下载
|
| 1139 |
+
result = await download_file(url, target_path)
|
| 1140 |
+
|
| 1141 |
+
# 执行转换(如果成功下载)
|
| 1142 |
+
conversion_msg = None
|
| 1143 |
+
if result["success"]:
|
| 1144 |
+
conversion_msg = await perform_document_conversion(
|
| 1145 |
+
target_path, extract_images=True
|
| 1146 |
+
)
|
| 1147 |
+
|
| 1148 |
+
# 添加下载信息前缀
|
| 1149 |
+
actual_size_mb = result["size"] / (1024 * 1024)
|
| 1150 |
+
speed_mb = result["speed"] / (1024 * 1024)
|
| 1151 |
+
info_msg = "[SUCCESS] Download completed!\n"
|
| 1152 |
+
info_msg += f" Saved to: {target_path}\n"
|
| 1153 |
+
info_msg += f" Size: {actual_size_mb:.2f} MB\n"
|
| 1154 |
+
info_msg += f" Duration: {result['duration']:.2f} seconds\n"
|
| 1155 |
+
info_msg += f" Speed: {speed_mb:.2f} MB/s\n"
|
| 1156 |
+
info_msg += f" Type: {result['content_type']}"
|
| 1157 |
+
|
| 1158 |
+
if conversion_msg:
|
| 1159 |
+
info_msg += conversion_msg
|
| 1160 |
+
|
| 1161 |
+
return msg + info_msg
|
| 1162 |
+
else:
|
| 1163 |
+
return msg + f"[ERROR] Download failed!\n Error: {result['error']}"
|
| 1164 |
+
|
| 1165 |
+
|
| 1166 |
+
@mcp.tool()
|
| 1167 |
+
async def move_file_to(
|
| 1168 |
+
source: str, destination: Optional[str] = None, filename: Optional[str] = None
|
| 1169 |
+
) -> str:
|
| 1170 |
+
"""
|
| 1171 |
+
Move a local file to a new location with detailed options.
|
| 1172 |
+
|
| 1173 |
+
Args:
|
| 1174 |
+
source: Source file path to move
|
| 1175 |
+
destination: Target directory or full file path (optional)
|
| 1176 |
+
filename: Specific filename to use (optional, ignored if destination is a full file path)
|
| 1177 |
+
|
| 1178 |
+
Returns:
|
| 1179 |
+
Status message about the move operation
|
| 1180 |
+
"""
|
| 1181 |
+
# 展开源路径
|
| 1182 |
+
if source.startswith("~"):
|
| 1183 |
+
source = os.path.expanduser(source)
|
| 1184 |
+
|
| 1185 |
+
# 检查源文件是否存在
|
| 1186 |
+
if not os.path.exists(source):
|
| 1187 |
+
return format_error_message("Move aborted", f"Source file not found: {source}")
|
| 1188 |
+
|
| 1189 |
+
# 确定文件名
|
| 1190 |
+
if not filename:
|
| 1191 |
+
filename = os.path.basename(source)
|
| 1192 |
+
|
| 1193 |
+
# 确定完整路径
|
| 1194 |
+
if destination:
|
| 1195 |
+
# 展开用户目录
|
| 1196 |
+
if destination.startswith("~"):
|
| 1197 |
+
destination = os.path.expanduser(destination)
|
| 1198 |
+
|
| 1199 |
+
# 检查是否是完整文件路径
|
| 1200 |
+
if os.path.splitext(destination)[1]: # 有扩展名
|
| 1201 |
+
target_path = destination
|
| 1202 |
+
else: # 是目录
|
| 1203 |
+
target_path = os.path.join(destination, filename)
|
| 1204 |
+
else:
|
| 1205 |
+
target_path = filename
|
| 1206 |
+
|
| 1207 |
+
# 确保使用相对路径(如果不是绝对路径)
|
| 1208 |
+
if not os.path.isabs(target_path):
|
| 1209 |
+
target_path = os.path.normpath(target_path)
|
| 1210 |
+
|
| 1211 |
+
# 检查目标文件是否已存在
|
| 1212 |
+
if os.path.exists(target_path):
|
| 1213 |
+
return f"[ERROR] Target file already exists: {target_path}"
|
| 1214 |
+
|
| 1215 |
+
# 显示移动信息
|
| 1216 |
+
source_size_mb = os.path.getsize(source) / (1024 * 1024)
|
| 1217 |
+
msg = "[INFO] Moving file:\n"
|
| 1218 |
+
msg += f" Source: {source}\n"
|
| 1219 |
+
msg += f" Target: {target_path}\n"
|
| 1220 |
+
msg += f" Size: {source_size_mb:.2f} MB\n"
|
| 1221 |
+
msg += "\n"
|
| 1222 |
+
|
| 1223 |
+
# 执行移动
|
| 1224 |
+
result = await move_local_file(source, target_path)
|
| 1225 |
+
|
| 1226 |
+
# 执行转换(如果成功移动)
|
| 1227 |
+
conversion_msg = None
|
| 1228 |
+
if result["success"]:
|
| 1229 |
+
conversion_msg = await perform_document_conversion(
|
| 1230 |
+
target_path, extract_images=True
|
| 1231 |
+
)
|
| 1232 |
+
|
| 1233 |
+
# 添加移动信息前缀
|
| 1234 |
+
info_msg = "[SUCCESS] File moved successfully!\n"
|
| 1235 |
+
info_msg += f" From: {source}\n"
|
| 1236 |
+
info_msg += f" To: {target_path}\n"
|
| 1237 |
+
info_msg += f" Duration: {result['duration']:.2f} seconds"
|
| 1238 |
+
|
| 1239 |
+
if conversion_msg:
|
| 1240 |
+
info_msg += conversion_msg
|
| 1241 |
+
|
| 1242 |
+
return msg + info_msg
|
| 1243 |
+
else:
|
| 1244 |
+
return msg + f"[ERROR] Move failed!\n Error: {result['error']}"
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
# @mcp.tool()
|
| 1248 |
+
# async def convert_document_to_markdown(
|
| 1249 |
+
# file_path: str, output_path: Optional[str] = None, extract_images: bool = True
|
| 1250 |
+
# ) -> str:
|
| 1251 |
+
# """
|
| 1252 |
+
# Convert a document to Markdown format with image extraction support.
|
| 1253 |
+
|
| 1254 |
+
# Supports both local files and URLs. Uses docling for advanced conversion with image extraction,
|
| 1255 |
+
# or falls back to PyPDF2 for simple PDF text extraction.
|
| 1256 |
+
|
| 1257 |
+
# Args:
|
| 1258 |
+
# file_path: Path to the input document file or URL (supports PDF, DOCX, PPTX, HTML, TXT, MD)
|
| 1259 |
+
# output_path: Path for the output Markdown file (optional, auto-generated if not provided)
|
| 1260 |
+
# extract_images: Whether to extract images from the document (default: True)
|
| 1261 |
+
|
| 1262 |
+
# Returns:
|
| 1263 |
+
# Status message about the conversion operation with preview of converted content
|
| 1264 |
+
|
| 1265 |
+
# Examples:
|
| 1266 |
+
# - "convert_document_to_markdown('paper.pdf')"
|
| 1267 |
+
# - "convert_document_to_markdown('https://example.com/doc.pdf', 'output.md')"
|
| 1268 |
+
# - "convert_document_to_markdown('presentation.pptx', extract_images=False)"
|
| 1269 |
+
# """
|
| 1270 |
+
# # 检查是否为URL
|
| 1271 |
+
# is_url_input = False
|
| 1272 |
+
# try:
|
| 1273 |
+
# parsed = urlparse(file_path)
|
| 1274 |
+
# is_url_input = parsed.scheme in ("http", "https")
|
| 1275 |
+
# except Exception:
|
| 1276 |
+
# is_url_input = False
|
| 1277 |
+
|
| 1278 |
+
# # 检查文件是否存在(如果不是URL)
|
| 1279 |
+
# if not is_url_input and not os.path.exists(file_path):
|
| 1280 |
+
# return f"[ERROR] Input file not found: {file_path}"
|
| 1281 |
+
|
| 1282 |
+
# # 检查是否是PDF文件,优先使用简单转换器(仅对本地文件)
|
| 1283 |
+
# if (
|
| 1284 |
+
# not is_url_input
|
| 1285 |
+
# and file_path.lower().endswith(".pdf")
|
| 1286 |
+
# and PYPDF2_AVAILABLE
|
| 1287 |
+
# and not extract_images
|
| 1288 |
+
# ):
|
| 1289 |
+
# try:
|
| 1290 |
+
# simple_converter = SimplePdfConverter()
|
| 1291 |
+
# result = simple_converter.convert_pdf_to_markdown(file_path, output_path)
|
| 1292 |
+
# except Exception as e:
|
| 1293 |
+
# return f"[ERROR] PDF conversion error: {str(e)}"
|
| 1294 |
+
# elif DOCLING_AVAILABLE:
|
| 1295 |
+
# try:
|
| 1296 |
+
# converter = DoclingConverter()
|
| 1297 |
+
|
| 1298 |
+
# # 检查文件格式是否支持
|
| 1299 |
+
# if not is_url_input and not converter.is_supported_format(file_path):
|
| 1300 |
+
# supported_formats = [".pdf", ".docx", ".pptx", ".html", ".md", ".txt"]
|
| 1301 |
+
# return f"[ERROR] Unsupported file format. Supported formats: {', '.join(supported_formats)}"
|
| 1302 |
+
# elif is_url_input and not file_path.lower().endswith(
|
| 1303 |
+
# (".pdf", ".docx", ".pptx", ".html", ".md", ".txt")
|
| 1304 |
+
# ):
|
| 1305 |
+
# return f"[ERROR] Unsupported URL format: {file_path}"
|
| 1306 |
+
|
| 1307 |
+
# # 执行转换(支持图片提取)
|
| 1308 |
+
# result = converter.convert_to_markdown(
|
| 1309 |
+
# file_path, output_path, extract_images
|
| 1310 |
+
# )
|
| 1311 |
+
# except Exception as e:
|
| 1312 |
+
# return f"[ERROR] Docling conversion error: {str(e)}"
|
| 1313 |
+
# else:
|
| 1314 |
+
# return (
|
| 1315 |
+
# "[ERROR] No conversion tools available. Please install docling or PyPDF2."
|
| 1316 |
+
# )
|
| 1317 |
+
|
| 1318 |
+
# if result["success"]:
|
| 1319 |
+
# msg = "[SUCCESS] Document converted successfully!\n"
|
| 1320 |
+
# msg += f" Input: {result['input_file']}\n"
|
| 1321 |
+
# msg += f" Output file: {result['output_file']}\n"
|
| 1322 |
+
# msg += f" Conversion time: {result['duration']:.2f} seconds\n"
|
| 1323 |
+
|
| 1324 |
+
# if result["input_size"] > 0:
|
| 1325 |
+
# msg += f" Original size: {result['input_size'] / 1024:.1f} KB\n"
|
| 1326 |
+
# msg += f" Markdown size: {result['output_size'] / 1024:.1f} KB\n"
|
| 1327 |
+
|
| 1328 |
+
# # 显示图片提取信息
|
| 1329 |
+
# if extract_images and "images_extracted" in result:
|
| 1330 |
+
# images_count = result["images_extracted"]
|
| 1331 |
+
# if images_count > 0:
|
| 1332 |
+
# msg += f" Images extracted: {images_count}\n"
|
| 1333 |
+
# msg += f" Images saved to: {os.path.join(os.path.dirname(result['output_file']), 'images')}\n"
|
| 1334 |
+
# else:
|
| 1335 |
+
# msg += " No images found in document\n"
|
| 1336 |
+
|
| 1337 |
+
# # 显示Markdown内容的前几行作为预览
|
| 1338 |
+
# content_lines = result["markdown_content"].split("\n")
|
| 1339 |
+
# preview_lines = content_lines[:5]
|
| 1340 |
+
# if len(content_lines) > 5:
|
| 1341 |
+
# preview_lines.append("...")
|
| 1342 |
+
|
| 1343 |
+
# msg += "\n[PREVIEW] First few lines of converted Markdown:\n"
|
| 1344 |
+
# for line in preview_lines:
|
| 1345 |
+
# msg += f" {line}\n"
|
| 1346 |
+
# else:
|
| 1347 |
+
# msg = "[ERROR] Conversion failed!\n"
|
| 1348 |
+
# msg += f" Error: {result['error']}"
|
| 1349 |
+
|
| 1350 |
+
# return msg
|
| 1351 |
+
|
| 1352 |
+
|
| 1353 |
+
if __name__ == "__main__":
|
| 1354 |
+
print("📄 Smart PDF Downloader MCP Tool")
|
| 1355 |
+
print("📝 Starting server with FastMCP...")
|
| 1356 |
+
|
| 1357 |
+
if DOCLING_AVAILABLE:
|
| 1358 |
+
print("✅ Document conversion to Markdown is ENABLED (docling available)")
|
| 1359 |
+
else:
|
| 1360 |
+
print("❌ Document conversion to Markdown is DISABLED (docling not available)")
|
| 1361 |
+
print(" Install docling to enable: pip install docling")
|
| 1362 |
+
|
| 1363 |
+
print("\nAvailable tools:")
|
| 1364 |
+
print(
|
| 1365 |
+
" • download_files - Download files or move local files from natural language"
|
| 1366 |
+
)
|
| 1367 |
+
print(" • parse_download_urls - Extract URLs, local paths and destination paths")
|
| 1368 |
+
print(" • download_file_to - Download a specific file with options")
|
| 1369 |
+
print(" • move_file_to - Move a specific local file with options")
|
| 1370 |
+
print(" • convert_document_to_markdown - Convert documents to Markdown format")
|
| 1371 |
+
|
| 1372 |
+
if DOCLING_AVAILABLE:
|
| 1373 |
+
print("\nSupported formats: PDF, DOCX, PPTX, HTML, TXT, MD")
|
| 1374 |
+
print("Features: Image extraction, Layout preservation, Automatic conversion")
|
| 1375 |
+
|
| 1376 |
+
print("")
|
| 1377 |
+
|
| 1378 |
+
# 运行服务器
|
| 1379 |
+
mcp.run()
|
projects/ui/DeepCode/tools/pdf_utils.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
PDF utility functions for the DeepCode agent system.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import PyPDF2
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def read_pdf_metadata(file_path: Path) -> dict:
|
| 10 |
+
"""Read PDF metadata with proper encoding handling."""
|
| 11 |
+
try:
|
| 12 |
+
print(f"\nAttempting to read PDF metadata from: {file_path}")
|
| 13 |
+
with open(file_path, "rb") as file:
|
| 14 |
+
pdf_reader = PyPDF2.PdfReader(file)
|
| 15 |
+
info = pdf_reader.metadata
|
| 16 |
+
first_page = pdf_reader.pages[0]
|
| 17 |
+
text = first_page.extract_text()
|
| 18 |
+
lines = text.split("\n")[:10]
|
| 19 |
+
|
| 20 |
+
title = None
|
| 21 |
+
authors = []
|
| 22 |
+
|
| 23 |
+
if info:
|
| 24 |
+
title = info.get("/Title", "").strip().replace("\x00", "")
|
| 25 |
+
author = info.get("/Author", "").strip().replace("\x00", "")
|
| 26 |
+
if author:
|
| 27 |
+
authors = [author]
|
| 28 |
+
|
| 29 |
+
if not title and lines:
|
| 30 |
+
title = lines[0].strip()
|
| 31 |
+
|
| 32 |
+
if not authors and len(lines) > 1:
|
| 33 |
+
for line in lines[1:3]:
|
| 34 |
+
if "author" in line.lower() or "by" in line.lower():
|
| 35 |
+
authors = [line.strip()]
|
| 36 |
+
break
|
| 37 |
+
|
| 38 |
+
return {
|
| 39 |
+
"title": title if title else "Unknown Title",
|
| 40 |
+
"authors": authors if authors else ["Unknown Author"],
|
| 41 |
+
"year": info.get("/CreationDate", "")[:4] if info else "Unknown Year",
|
| 42 |
+
"first_lines": lines,
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f"\nError reading PDF: {str(e)}")
|
| 47 |
+
return {
|
| 48 |
+
"title": "Error reading PDF",
|
| 49 |
+
"authors": ["Unknown"],
|
| 50 |
+
"year": "Unknown",
|
| 51 |
+
"first_lines": [],
|
| 52 |
+
}
|
projects/ui/DeepCode/ui/__init__.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
UI Module
|
| 3 |
+
|
| 4 |
+
Streamlit application user interface components module
|
| 5 |
+
|
| 6 |
+
Contains the following submodules:
|
| 7 |
+
- styles: CSS styles
|
| 8 |
+
- components: UI components
|
| 9 |
+
- layout: Page layout
|
| 10 |
+
- handlers: Event handlers
|
| 11 |
+
- streamlit_app: Main application
|
| 12 |
+
- app: Application entry
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
__version__ = "1.0.0"
|
| 16 |
+
__author__ = "DeepCode Team"
|
| 17 |
+
|
| 18 |
+
# Import main components
|
| 19 |
+
from .layout import main_layout
|
| 20 |
+
from .components import display_header, display_features, display_status
|
| 21 |
+
from .handlers import initialize_session_state
|
| 22 |
+
from .styles import get_main_styles
|
| 23 |
+
|
| 24 |
+
# Import application main function
|
| 25 |
+
try:
|
| 26 |
+
from .streamlit_app import main as streamlit_main
|
| 27 |
+
except ImportError:
|
| 28 |
+
# Fallback to absolute import if relative import fails
|
| 29 |
+
import sys
|
| 30 |
+
import os
|
| 31 |
+
|
| 32 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
| 33 |
+
from streamlit_app import main as streamlit_main
|
| 34 |
+
|
| 35 |
+
__all__ = [
|
| 36 |
+
"main_layout",
|
| 37 |
+
"display_header",
|
| 38 |
+
"display_features",
|
| 39 |
+
"display_status",
|
| 40 |
+
"initialize_session_state",
|
| 41 |
+
"get_main_styles",
|
| 42 |
+
"streamlit_main",
|
| 43 |
+
]
|
projects/ui/DeepCode/ui/app.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DeepCode UI Application Entry Point
|
| 3 |
+
|
| 4 |
+
This file serves as the unified entry point for the UI module
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .streamlit_app import main
|
| 8 |
+
|
| 9 |
+
# Directly export main function for external calls
|
| 10 |
+
__all__ = ["main"]
|
| 11 |
+
|
| 12 |
+
if __name__ == "__main__":
|
| 13 |
+
main()
|
projects/ui/DeepCode/ui/components.py
ADDED
|
@@ -0,0 +1,1450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Streamlit UI Components Module
|
| 3 |
+
|
| 4 |
+
Contains all reusable UI components
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import streamlit as st
|
| 8 |
+
import sys
|
| 9 |
+
from typing import Dict, Any, Optional
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def display_header():
|
| 15 |
+
"""Display application header"""
|
| 16 |
+
st.markdown(
|
| 17 |
+
"""
|
| 18 |
+
<div class="main-header">
|
| 19 |
+
<h1>🧬 DeepCode</h1>
|
| 20 |
+
<h3>OPEN-SOURCE CODE AGENT</h3>
|
| 21 |
+
<p>⚡ DATA INTELLIGENCE LAB @ HKU • REVOLUTIONIZING RESEARCH REPRODUCIBILITY ⚡</p>
|
| 22 |
+
</div>
|
| 23 |
+
""",
|
| 24 |
+
unsafe_allow_html=True,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def display_features():
|
| 29 |
+
"""Display DeepCode AI Agent capabilities"""
|
| 30 |
+
# AI Agent core capabilities display area - updated to match README content
|
| 31 |
+
st.markdown(
|
| 32 |
+
"""
|
| 33 |
+
<div class="ai-capabilities-section">
|
| 34 |
+
<div class="neural-network">
|
| 35 |
+
<div class="neuron pulse-1"></div>
|
| 36 |
+
<div class="neuron pulse-2"></div>
|
| 37 |
+
<div class="neuron pulse-3"></div>
|
| 38 |
+
</div>
|
| 39 |
+
<h2 class="capabilities-title">🧠 Open Agentic Coding Platform</h2>
|
| 40 |
+
<p class="capabilities-subtitle">Advancing Code Generation with Multi-Agent Systems</p>
|
| 41 |
+
</div>
|
| 42 |
+
""",
|
| 43 |
+
unsafe_allow_html=True,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Core functionality modules - Vertical Layout
|
| 47 |
+
st.markdown(
|
| 48 |
+
"""
|
| 49 |
+
<div class="feature-card-vertical primary">
|
| 50 |
+
<div class="card-glow-vertical"></div>
|
| 51 |
+
<div class="feature-header">
|
| 52 |
+
<div class="feature-logo-container">
|
| 53 |
+
<div class="ai-brain-logo">
|
| 54 |
+
<div class="brain-node node-1"></div>
|
| 55 |
+
<div class="brain-node node-2"></div>
|
| 56 |
+
<div class="brain-node node-3"></div>
|
| 57 |
+
<div class="brain-connection conn-1"></div>
|
| 58 |
+
<div class="brain-connection conn-2"></div>
|
| 59 |
+
</div>
|
| 60 |
+
<div class="feature-icon-large">🚀</div>
|
| 61 |
+
</div>
|
| 62 |
+
<div class="feature-header-content">
|
| 63 |
+
<h3 class="feature-title-large">Paper2Code: Research-to-Production Pipeline</h3>
|
| 64 |
+
<p class="feature-subtitle">Automated Implementation of Complex Algorithms</p>
|
| 65 |
+
</div>
|
| 66 |
+
<div class="feature-stats">
|
| 67 |
+
<div class="stat-item">
|
| 68 |
+
<span class="stat-number typing-number">Multi-Modal</span>
|
| 69 |
+
<span class="stat-label">Analysis</span>
|
| 70 |
+
</div>
|
| 71 |
+
<div class="stat-item">
|
| 72 |
+
<span class="stat-number typing-number">Production</span>
|
| 73 |
+
<span class="stat-label">Ready</span>
|
| 74 |
+
</div>
|
| 75 |
+
</div>
|
| 76 |
+
</div>
|
| 77 |
+
<div class="feature-content">
|
| 78 |
+
<div class="content-left">
|
| 79 |
+
<p class="feature-description-large">Multi-modal document analysis engine that extracts algorithmic logic and mathematical models from academic papers, generating optimized implementations with proper data structures while preserving computational complexity characteristics.</p>
|
| 80 |
+
<div class="feature-flow">
|
| 81 |
+
<div class="flow-step active">
|
| 82 |
+
<div class="flow-icon">📄</div>
|
| 83 |
+
<span>Document Parsing</span>
|
| 84 |
+
</div>
|
| 85 |
+
<div class="flow-arrow">→</div>
|
| 86 |
+
<div class="flow-step active">
|
| 87 |
+
<div class="flow-icon">🧠</div>
|
| 88 |
+
<span>Algorithm Extraction</span>
|
| 89 |
+
</div>
|
| 90 |
+
<div class="flow-arrow">→</div>
|
| 91 |
+
<div class="flow-step active">
|
| 92 |
+
<div class="flow-icon">⚡</div>
|
| 93 |
+
<span>Code Synthesis</span>
|
| 94 |
+
</div>
|
| 95 |
+
<div class="flow-arrow">→</div>
|
| 96 |
+
<div class="flow-step active">
|
| 97 |
+
<div class="flow-icon">✅</div>
|
| 98 |
+
<span>Quality Assurance</span>
|
| 99 |
+
</div>
|
| 100 |
+
</div>
|
| 101 |
+
</div>
|
| 102 |
+
<div class="content-right">
|
| 103 |
+
<div class="code-simulation">
|
| 104 |
+
<div class="code-header">
|
| 105 |
+
<span class="code-lang">Python</span>
|
| 106 |
+
<div class="code-status generating">Generating...</div>
|
| 107 |
+
</div>
|
| 108 |
+
<div class="code-lines">
|
| 109 |
+
<div class="code-line typing">import torch</div>
|
| 110 |
+
<div class="code-line typing delay-1">import torch.nn as nn</div>
|
| 111 |
+
<div class="code-line typing delay-2">class ResearchAlgorithm(nn.Module):</div>
|
| 112 |
+
<div class="code-line typing delay-3"> def __init__(self, config):</div>
|
| 113 |
+
<div class="code-line typing delay-4"> super().__init__()</div>
|
| 114 |
+
</div>
|
| 115 |
+
</div>
|
| 116 |
+
</div>
|
| 117 |
+
</div>
|
| 118 |
+
</div>
|
| 119 |
+
""",
|
| 120 |
+
unsafe_allow_html=True,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
st.markdown(
|
| 124 |
+
"""
|
| 125 |
+
<div class="feature-card-vertical secondary">
|
| 126 |
+
<div class="card-glow-vertical"></div>
|
| 127 |
+
<div class="feature-header">
|
| 128 |
+
<div class="feature-logo-container">
|
| 129 |
+
<div class="multi-agent-logo">
|
| 130 |
+
<div class="agent-node agent-1">🎨</div>
|
| 131 |
+
<div class="agent-node agent-2">💻</div>
|
| 132 |
+
<div class="agent-node agent-3">⚡</div>
|
| 133 |
+
<div class="agent-connection conn-12"></div>
|
| 134 |
+
<div class="agent-connection conn-23"></div>
|
| 135 |
+
<div class="agent-connection conn-13"></div>
|
| 136 |
+
</div>
|
| 137 |
+
<div class="feature-icon-large">🎨</div>
|
| 138 |
+
</div>
|
| 139 |
+
<div class="feature-header-content">
|
| 140 |
+
<h3 class="feature-title-large">Text2Web: Automated Prototyping Engine</h3>
|
| 141 |
+
<p class="feature-subtitle">Natural Language to Front-End Code Synthesis</p>
|
| 142 |
+
</div>
|
| 143 |
+
<div class="feature-stats">
|
| 144 |
+
<div class="stat-item">
|
| 145 |
+
<span class="stat-number typing-number">Intelligent</span>
|
| 146 |
+
<span class="stat-label">Scaffolding</span>
|
| 147 |
+
</div>
|
| 148 |
+
<div class="stat-item">
|
| 149 |
+
<span class="stat-number typing-number">Scalable</span>
|
| 150 |
+
<span class="stat-label">Architecture</span>
|
| 151 |
+
</div>
|
| 152 |
+
</div>
|
| 153 |
+
</div>
|
| 154 |
+
<div class="feature-content">
|
| 155 |
+
<div class="content-left">
|
| 156 |
+
<p class="feature-description-large">Context-aware code generation using fine-tuned language models. Intelligent scaffolding system generating complete application structures including frontend components, maintaining architectural consistency across modules.</p>
|
| 157 |
+
<div class="agent-grid">
|
| 158 |
+
<div class="agent-card active">
|
| 159 |
+
<div class="agent-avatar">📝</div>
|
| 160 |
+
<h4>Intent Understanding</h4>
|
| 161 |
+
<p>Semantic analysis of requirements</p>
|
| 162 |
+
</div>
|
| 163 |
+
<div class="agent-card active">
|
| 164 |
+
<div class="agent-avatar">🎨</div>
|
| 165 |
+
<h4>UI Architecture</h4>
|
| 166 |
+
<p>Component design & structure</p>
|
| 167 |
+
</div>
|
| 168 |
+
<div class="agent-card active">
|
| 169 |
+
<div class="agent-avatar">💻</div>
|
| 170 |
+
<h4>Code Generation</h4>
|
| 171 |
+
<p>Functional interface creation</p>
|
| 172 |
+
</div>
|
| 173 |
+
<div class="agent-card active">
|
| 174 |
+
<div class="agent-avatar">✨</div>
|
| 175 |
+
<h4>Quality Assurance</h4>
|
| 176 |
+
<p>Automated testing & validation</p>
|
| 177 |
+
</div>
|
| 178 |
+
</div>
|
| 179 |
+
</div>
|
| 180 |
+
<div class="content-right">
|
| 181 |
+
<div class="collaboration-viz">
|
| 182 |
+
<div class="collaboration-center">
|
| 183 |
+
<div class="center-node">🎯</div>
|
| 184 |
+
<span>Web Application</span>
|
| 185 |
+
</div>
|
| 186 |
+
<div class="collaboration-agents">
|
| 187 |
+
<div class="collab-agent agent-pos-1">
|
| 188 |
+
<div class="pulse-ring"></div>
|
| 189 |
+
📝
|
| 190 |
+
</div>
|
| 191 |
+
<div class="collab-agent agent-pos-2">
|
| 192 |
+
<div class="pulse-ring"></div>
|
| 193 |
+
🏗️
|
| 194 |
+
</div>
|
| 195 |
+
<div class="collab-agent agent-pos-3">
|
| 196 |
+
<div class="pulse-ring"></div>
|
| 197 |
+
⚙️
|
| 198 |
+
</div>
|
| 199 |
+
<div class="collab-agent agent-pos-4">
|
| 200 |
+
<div class="pulse-ring"></div>
|
| 201 |
+
🧪
|
| 202 |
+
</div>
|
| 203 |
+
</div>
|
| 204 |
+
</div>
|
| 205 |
+
</div>
|
| 206 |
+
</div>
|
| 207 |
+
</div>
|
| 208 |
+
""",
|
| 209 |
+
unsafe_allow_html=True,
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
st.markdown(
|
| 213 |
+
"""
|
| 214 |
+
<div class="feature-card-vertical accent">
|
| 215 |
+
<div class="card-glow-vertical"></div>
|
| 216 |
+
<div class="feature-header">
|
| 217 |
+
<div class="feature-logo-container">
|
| 218 |
+
<div class="future-logo">
|
| 219 |
+
<div class="orbit orbit-1">
|
| 220 |
+
<div class="orbit-node">⚙️</div>
|
| 221 |
+
</div>
|
| 222 |
+
<div class="orbit orbit-2">
|
| 223 |
+
<div class="orbit-node">🔧</div>
|
| 224 |
+
</div>
|
| 225 |
+
<div class="orbit-center">🚀</div>
|
| 226 |
+
</div>
|
| 227 |
+
<div class="feature-icon-large">⚙️</div>
|
| 228 |
+
</div>
|
| 229 |
+
<div class="feature-header-content">
|
| 230 |
+
<h3 class="feature-title-large">Text2Backend: Scalable Architecture Generator</h3>
|
| 231 |
+
<p class="feature-subtitle">Intelligent Server-Side Development</p>
|
| 232 |
+
</div>
|
| 233 |
+
<div class="feature-stats">
|
| 234 |
+
<div class="stat-item">
|
| 235 |
+
<span class="stat-number typing-number">Database</span>
|
| 236 |
+
<span class="stat-label">Integration</span>
|
| 237 |
+
</div>
|
| 238 |
+
<div class="stat-item">
|
| 239 |
+
<span class="stat-number typing-number">API</span>
|
| 240 |
+
<span class="stat-label">Endpoints</span>
|
| 241 |
+
</div>
|
| 242 |
+
</div>
|
| 243 |
+
</div>
|
| 244 |
+
<div class="feature-content">
|
| 245 |
+
<div class="content-left">
|
| 246 |
+
<p class="feature-description-large">Generates efficient, scalable backend systems with database schemas, API endpoints, and microservices architecture. Uses dependency analysis to ensure scalable architecture from initial generation with comprehensive testing.</p>
|
| 247 |
+
<div class="vision-demo">
|
| 248 |
+
<div class="demo-input">
|
| 249 |
+
<div class="input-icon">💬</div>
|
| 250 |
+
<div class="input-text typing">"Build a scalable e-commerce API with user authentication and payment processing"</div>
|
| 251 |
+
</div>
|
| 252 |
+
<div class="demo-arrow">⬇️</div>
|
| 253 |
+
<div class="demo-output">
|
| 254 |
+
<div class="output-items">
|
| 255 |
+
<div class="output-item">🏗️ Microservices Architecture</div>
|
| 256 |
+
<div class="output-item">🔒 Authentication & Security</div>
|
| 257 |
+
<div class="output-item">🗄️ Database Schema Design</div>
|
| 258 |
+
<div class="output-item">📊 API Documentation & Testing</div>
|
| 259 |
+
</div>
|
| 260 |
+
</div>
|
| 261 |
+
</div>
|
| 262 |
+
</div>
|
| 263 |
+
<div class="content-right">
|
| 264 |
+
<div class="future-timeline">
|
| 265 |
+
<div class="timeline-item completed">
|
| 266 |
+
<div class="timeline-marker">✅</div>
|
| 267 |
+
<div class="timeline-content">
|
| 268 |
+
<h4>API Design</h4>
|
| 269 |
+
<p>RESTful endpoints</p>
|
| 270 |
+
</div>
|
| 271 |
+
</div>
|
| 272 |
+
<div class="timeline-item completed">
|
| 273 |
+
<div class="timeline-marker">✅</div>
|
| 274 |
+
<div class="timeline-content">
|
| 275 |
+
<h4>Database Layer</h4>
|
| 276 |
+
<p>Schema & relationships</p>
|
| 277 |
+
</div>
|
| 278 |
+
</div>
|
| 279 |
+
<div class="timeline-item active">
|
| 280 |
+
<div class="timeline-marker">🔄</div>
|
| 281 |
+
<div class="timeline-content">
|
| 282 |
+
<h4>Security Layer</h4>
|
| 283 |
+
<p>Authentication & authorization</p>
|
| 284 |
+
</div>
|
| 285 |
+
</div>
|
| 286 |
+
<div class="timeline-item future">
|
| 287 |
+
<div class="timeline-marker">🚀</div>
|
| 288 |
+
<div class="timeline-content">
|
| 289 |
+
<h4>Deployment</h4>
|
| 290 |
+
<p>CI/CD integration</p>
|
| 291 |
+
</div>
|
| 292 |
+
</div>
|
| 293 |
+
</div>
|
| 294 |
+
</div>
|
| 295 |
+
</div>
|
| 296 |
+
</div>
|
| 297 |
+
""",
|
| 298 |
+
unsafe_allow_html=True,
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
st.markdown(
|
| 302 |
+
"""
|
| 303 |
+
<div class="feature-card-vertical tech">
|
| 304 |
+
<div class="card-glow-vertical"></div>
|
| 305 |
+
<div class="feature-header">
|
| 306 |
+
<div class="feature-logo-container">
|
| 307 |
+
<div class="opensource-logo">
|
| 308 |
+
<div class="github-stars">
|
| 309 |
+
<div class="star star-1">📄</div>
|
| 310 |
+
<div class="star star-2">🤖</div>
|
| 311 |
+
<div class="star star-3">⚡</div>
|
| 312 |
+
</div>
|
| 313 |
+
<div class="community-nodes">
|
| 314 |
+
<div class="community-node">🧠</div>
|
| 315 |
+
<div class="community-node">🔍</div>
|
| 316 |
+
<div class="community-node">⚙️</div>
|
| 317 |
+
</div>
|
| 318 |
+
</div>
|
| 319 |
+
<div class="feature-icon-large">🎯</div>
|
| 320 |
+
</div>
|
| 321 |
+
<div class="feature-header-content">
|
| 322 |
+
<h3 class="feature-title-large">CodeRAG Integration System</h3>
|
| 323 |
+
<p class="feature-subtitle">Advanced Multi-Agent Orchestration</p>
|
| 324 |
+
</div>
|
| 325 |
+
<div class="feature-stats">
|
| 326 |
+
<div class="stat-item">
|
| 327 |
+
<span class="stat-number typing-number">Global</span>
|
| 328 |
+
<span class="stat-label">Code Analysis</span>
|
| 329 |
+
</div>
|
| 330 |
+
<div class="stat-item">
|
| 331 |
+
<span class="stat-number typing-number">Intelligent</span>
|
| 332 |
+
<span class="stat-label">Orchestration</span>
|
| 333 |
+
</div>
|
| 334 |
+
</div>
|
| 335 |
+
</div>
|
| 336 |
+
<div class="feature-content">
|
| 337 |
+
<div class="content-left">
|
| 338 |
+
<p class="feature-description-large">Advanced retrieval-augmented generation combining semantic vector embeddings with graph-based dependency analysis. Central orchestrating agent coordinates specialized agents with dynamic task planning and intelligent memory management.</p>
|
| 339 |
+
<div class="community-features">
|
| 340 |
+
<div class="community-feature">
|
| 341 |
+
<div class="feature-icon-small">🧠</div>
|
| 342 |
+
<div class="feature-text">
|
| 343 |
+
<h4>Intelligent Orchestration</h4>
|
| 344 |
+
<p>Central decision-making with dynamic planning algorithms</p>
|
| 345 |
+
</div>
|
| 346 |
+
</div>
|
| 347 |
+
<div class="community-feature">
|
| 348 |
+
<div class="feature-icon-small">🔍</div>
|
| 349 |
+
<div class="feature-text">
|
| 350 |
+
<h4>CodeRAG System</h4>
|
| 351 |
+
<p>Semantic analysis with dependency graph mapping</p>
|
| 352 |
+
</div>
|
| 353 |
+
</div>
|
| 354 |
+
<div class="community-feature">
|
| 355 |
+
<div class="feature-icon-small">⚡</div>
|
| 356 |
+
<div class="feature-text">
|
| 357 |
+
<h4>Quality Assurance</h4>
|
| 358 |
+
<p>Automated testing, validation, and documentation</p>
|
| 359 |
+
</div>
|
| 360 |
+
</div>
|
| 361 |
+
</div>
|
| 362 |
+
</div>
|
| 363 |
+
<div class="content-right">
|
| 364 |
+
<div class="tech-ecosystem">
|
| 365 |
+
<div class="ecosystem-center">
|
| 366 |
+
<div class="center-logo">🧠</div>
|
| 367 |
+
<span>Multi-Agent Engine</span>
|
| 368 |
+
</div>
|
| 369 |
+
<div class="ecosystem-ring">
|
| 370 |
+
<div class="ecosystem-item item-1">
|
| 371 |
+
<div class="item-icon">🎯</div>
|
| 372 |
+
<span>Central Orchestration</span>
|
| 373 |
+
</div>
|
| 374 |
+
<div class="ecosystem-item item-2">
|
| 375 |
+
<div class="item-icon">📝</div>
|
| 376 |
+
<span>Intent Understanding</span>
|
| 377 |
+
</div>
|
| 378 |
+
<div class="ecosystem-item item-3">
|
| 379 |
+
<div class="item-icon">🔍</div>
|
| 380 |
+
<span>Code Mining & Indexing</span>
|
| 381 |
+
</div>
|
| 382 |
+
<div class="ecosystem-item item-4">
|
| 383 |
+
<div class="item-icon">🧬</div>
|
| 384 |
+
<span>Code Generation</span>
|
| 385 |
+
</div>
|
| 386 |
+
</div>
|
| 387 |
+
</div>
|
| 388 |
+
</div>
|
| 389 |
+
</div>
|
| 390 |
+
</div>
|
| 391 |
+
""",
|
| 392 |
+
unsafe_allow_html=True,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
def display_status(message: str, status_type: str = "info"):
|
| 397 |
+
"""
|
| 398 |
+
Display status message
|
| 399 |
+
|
| 400 |
+
Args:
|
| 401 |
+
message: Status message
|
| 402 |
+
status_type: Status type (success, error, warning, info)
|
| 403 |
+
"""
|
| 404 |
+
status_classes = {
|
| 405 |
+
"success": "status-success",
|
| 406 |
+
"error": "status-error",
|
| 407 |
+
"warning": "status-warning",
|
| 408 |
+
"info": "status-info",
|
| 409 |
+
}
|
| 410 |
+
|
| 411 |
+
icons = {"success": "✅", "error": "❌", "warning": "⚠️", "info": "ℹ️"}
|
| 412 |
+
|
| 413 |
+
css_class = status_classes.get(status_type, "status-info")
|
| 414 |
+
icon = icons.get(status_type, "ℹ️")
|
| 415 |
+
|
| 416 |
+
st.markdown(
|
| 417 |
+
f"""
|
| 418 |
+
<div class="{css_class}">
|
| 419 |
+
{icon} {message}
|
| 420 |
+
</div>
|
| 421 |
+
""",
|
| 422 |
+
unsafe_allow_html=True,
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def system_status_component():
|
| 427 |
+
"""System status check component"""
|
| 428 |
+
st.markdown("### 🔧 System Status & Diagnostics")
|
| 429 |
+
|
| 430 |
+
# Basic system information
|
| 431 |
+
col1, col2 = st.columns(2)
|
| 432 |
+
|
| 433 |
+
with col1:
|
| 434 |
+
st.markdown("#### 📊 Environment")
|
| 435 |
+
st.info(f"**Python:** {sys.version.split()[0]}")
|
| 436 |
+
st.info(f"**Platform:** {sys.platform}")
|
| 437 |
+
|
| 438 |
+
# Check key modules
|
| 439 |
+
modules_to_check = [
|
| 440 |
+
("streamlit", "Streamlit UI Framework"),
|
| 441 |
+
("asyncio", "Async Processing"),
|
| 442 |
+
("nest_asyncio", "Nested Event Loops"),
|
| 443 |
+
("concurrent.futures", "Threading Support"),
|
| 444 |
+
]
|
| 445 |
+
|
| 446 |
+
st.markdown("#### 📦 Module Status")
|
| 447 |
+
for module_name, description in modules_to_check:
|
| 448 |
+
try:
|
| 449 |
+
__import__(module_name)
|
| 450 |
+
st.success(f"✅ {description}")
|
| 451 |
+
except ImportError:
|
| 452 |
+
st.error(f"❌ {description} - Missing")
|
| 453 |
+
|
| 454 |
+
with col2:
|
| 455 |
+
st.markdown("#### ⚙️ Threading & Context")
|
| 456 |
+
|
| 457 |
+
# Check Streamlit context
|
| 458 |
+
try:
|
| 459 |
+
from streamlit.runtime.scriptrunner import get_script_run_ctx
|
| 460 |
+
|
| 461 |
+
ctx = get_script_run_ctx()
|
| 462 |
+
if ctx:
|
| 463 |
+
st.success("✅ Streamlit Context Available")
|
| 464 |
+
else:
|
| 465 |
+
st.warning("⚠️ Streamlit Context Not Found")
|
| 466 |
+
except Exception as e:
|
| 467 |
+
st.error(f"❌ Context Check Failed: {e}")
|
| 468 |
+
|
| 469 |
+
# Check event loop
|
| 470 |
+
try:
|
| 471 |
+
import asyncio
|
| 472 |
+
|
| 473 |
+
try:
|
| 474 |
+
loop = asyncio.get_event_loop()
|
| 475 |
+
if loop.is_running():
|
| 476 |
+
st.info("🔄 Event Loop Running")
|
| 477 |
+
else:
|
| 478 |
+
st.info("⏸️ Event Loop Not Running")
|
| 479 |
+
except RuntimeError:
|
| 480 |
+
st.info("🆕 No Event Loop (Normal)")
|
| 481 |
+
except Exception as e:
|
| 482 |
+
st.error(f"❌ Event Loop Check Failed: {e}")
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
def error_troubleshooting_component():
|
| 486 |
+
"""Error troubleshooting component"""
|
| 487 |
+
with st.expander("🛠️ Troubleshooting Tips", expanded=False):
|
| 488 |
+
st.markdown("""
|
| 489 |
+
### Common Issues & Solutions
|
| 490 |
+
|
| 491 |
+
#### 1. ScriptRunContext Warnings
|
| 492 |
+
- **What it means:** Threading context warnings in Streamlit
|
| 493 |
+
- **Solution:** These warnings are usually safe to ignore
|
| 494 |
+
- **Prevention:** Restart the application if persistent
|
| 495 |
+
|
| 496 |
+
#### 2. Async Processing Errors
|
| 497 |
+
- **Symptoms:** "Event loop" or "Thread" errors
|
| 498 |
+
- **Solution:** The app uses multiple fallback methods
|
| 499 |
+
- **Action:** Try refreshing the page or restarting
|
| 500 |
+
|
| 501 |
+
#### 3. File Upload Issues
|
| 502 |
+
- **Check:** File size < 200MB
|
| 503 |
+
- **Formats:** PDF, DOCX, TXT, HTML, MD
|
| 504 |
+
- **Action:** Try a different file format
|
| 505 |
+
|
| 506 |
+
#### 4. Processing Timeout
|
| 507 |
+
- **Normal:** Large papers may take 5-10 minutes
|
| 508 |
+
- **Action:** Wait patiently, check progress indicators
|
| 509 |
+
- **Limit:** 5-minute maximum processing time
|
| 510 |
+
|
| 511 |
+
#### 5. Memory Issues
|
| 512 |
+
- **Symptoms:** "Out of memory" errors
|
| 513 |
+
- **Solution:** Close other applications
|
| 514 |
+
- **Action:** Try smaller/simpler papers first
|
| 515 |
+
""")
|
| 516 |
+
|
| 517 |
+
if st.button("🔄 Reset Application State"):
|
| 518 |
+
# Clear all session state
|
| 519 |
+
for key in list(st.session_state.keys()):
|
| 520 |
+
del st.session_state[key]
|
| 521 |
+
st.success("Application state reset! Please refresh the page.")
|
| 522 |
+
st.rerun()
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
def sidebar_control_panel() -> Dict[str, Any]:
|
| 526 |
+
"""
|
| 527 |
+
Sidebar control panel
|
| 528 |
+
|
| 529 |
+
Returns:
|
| 530 |
+
Control panel state
|
| 531 |
+
"""
|
| 532 |
+
with st.sidebar:
|
| 533 |
+
st.markdown("### 🎛️ Control Panel")
|
| 534 |
+
|
| 535 |
+
# Application status
|
| 536 |
+
if st.session_state.processing:
|
| 537 |
+
st.warning("🟡 Engine Processing...")
|
| 538 |
+
else:
|
| 539 |
+
st.info("⚪ Engine Ready")
|
| 540 |
+
|
| 541 |
+
# Workflow configuration options
|
| 542 |
+
st.markdown("### ⚙️ Workflow Settings")
|
| 543 |
+
|
| 544 |
+
# Indexing functionality toggle
|
| 545 |
+
enable_indexing = st.checkbox(
|
| 546 |
+
"🗂️ Enable Codebase Indexing",
|
| 547 |
+
value=True,
|
| 548 |
+
help="Enable GitHub repository download and codebase indexing. Disabling this will skip Phase 6 (GitHub Download) and Phase 7 (Codebase Indexing) for faster processing.",
|
| 549 |
+
key="enable_indexing",
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
if enable_indexing:
|
| 553 |
+
st.success("✅ Full workflow with indexing enabled")
|
| 554 |
+
else:
|
| 555 |
+
st.info("⚡ Fast mode - indexing disabled")
|
| 556 |
+
|
| 557 |
+
# System information
|
| 558 |
+
st.markdown("### 📊 System Info")
|
| 559 |
+
st.info(f"**Python:** {sys.version.split()[0]}")
|
| 560 |
+
st.info(f"**Platform:** {sys.platform}")
|
| 561 |
+
|
| 562 |
+
# Add system status check
|
| 563 |
+
with st.expander("🔧 System Status"):
|
| 564 |
+
system_status_component()
|
| 565 |
+
|
| 566 |
+
# Add error diagnostics
|
| 567 |
+
error_troubleshooting_component()
|
| 568 |
+
|
| 569 |
+
st.markdown("---")
|
| 570 |
+
|
| 571 |
+
# Processing history
|
| 572 |
+
history_info = display_processing_history()
|
| 573 |
+
|
| 574 |
+
return {
|
| 575 |
+
"processing": st.session_state.processing,
|
| 576 |
+
"history_count": history_info["count"],
|
| 577 |
+
"has_history": history_info["has_history"],
|
| 578 |
+
"enable_indexing": enable_indexing, # Add indexing toggle state
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
|
| 582 |
+
def display_processing_history() -> Dict[str, Any]:
|
| 583 |
+
"""
|
| 584 |
+
Display processing history
|
| 585 |
+
|
| 586 |
+
Returns:
|
| 587 |
+
History information
|
| 588 |
+
"""
|
| 589 |
+
st.markdown("### 📊 Processing History")
|
| 590 |
+
|
| 591 |
+
has_history = bool(st.session_state.results)
|
| 592 |
+
history_count = len(st.session_state.results)
|
| 593 |
+
|
| 594 |
+
if has_history:
|
| 595 |
+
# Only show last 10 records
|
| 596 |
+
recent_results = st.session_state.results[-10:]
|
| 597 |
+
for i, result in enumerate(reversed(recent_results)):
|
| 598 |
+
status_icon = "✅" if result.get("status") == "success" else "❌"
|
| 599 |
+
with st.expander(
|
| 600 |
+
f"{status_icon} Task - {result.get('timestamp', 'Unknown')}"
|
| 601 |
+
):
|
| 602 |
+
st.write(f"**Status:** {result.get('status', 'Unknown')}")
|
| 603 |
+
if result.get("input_type"):
|
| 604 |
+
st.write(f"**Type:** {result['input_type']}")
|
| 605 |
+
if result.get("error"):
|
| 606 |
+
st.error(f"Error: {result['error']}")
|
| 607 |
+
else:
|
| 608 |
+
st.info("No processing history yet")
|
| 609 |
+
|
| 610 |
+
# Clear history button
|
| 611 |
+
if has_history:
|
| 612 |
+
col1, col2 = st.columns(2)
|
| 613 |
+
with col1:
|
| 614 |
+
if st.button("🗑️ Clear History", use_container_width=True):
|
| 615 |
+
st.session_state.results = []
|
| 616 |
+
st.rerun()
|
| 617 |
+
with col2:
|
| 618 |
+
st.info(f"Total: {history_count} tasks")
|
| 619 |
+
|
| 620 |
+
return {"has_history": has_history, "count": history_count}
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def file_input_component(task_counter: int) -> Optional[str]:
|
| 624 |
+
"""
|
| 625 |
+
File input component with automatic PDF conversion
|
| 626 |
+
|
| 627 |
+
Args:
|
| 628 |
+
task_counter: Task counter
|
| 629 |
+
|
| 630 |
+
Returns:
|
| 631 |
+
PDF file path or None
|
| 632 |
+
"""
|
| 633 |
+
uploaded_file = st.file_uploader(
|
| 634 |
+
"Upload research paper file",
|
| 635 |
+
type=[
|
| 636 |
+
"pdf",
|
| 637 |
+
"docx",
|
| 638 |
+
"doc",
|
| 639 |
+
"ppt",
|
| 640 |
+
"pptx",
|
| 641 |
+
"xls",
|
| 642 |
+
"xlsx",
|
| 643 |
+
"html",
|
| 644 |
+
"htm",
|
| 645 |
+
"txt",
|
| 646 |
+
"md",
|
| 647 |
+
],
|
| 648 |
+
help="Supported formats: PDF, Word, PowerPoint, Excel, HTML, Text (all files will be converted to PDF)",
|
| 649 |
+
key=f"file_uploader_{task_counter}",
|
| 650 |
+
)
|
| 651 |
+
|
| 652 |
+
if uploaded_file is not None:
|
| 653 |
+
# Display file information
|
| 654 |
+
file_size = len(uploaded_file.getvalue())
|
| 655 |
+
st.info(f"📄 **File:** {uploaded_file.name} ({format_file_size(file_size)})")
|
| 656 |
+
|
| 657 |
+
# Save uploaded file to temporary directory
|
| 658 |
+
try:
|
| 659 |
+
import tempfile
|
| 660 |
+
import sys
|
| 661 |
+
import os
|
| 662 |
+
from pathlib import Path
|
| 663 |
+
|
| 664 |
+
# Add project root to path for imports
|
| 665 |
+
current_dir = Path(__file__).parent
|
| 666 |
+
project_root = current_dir.parent
|
| 667 |
+
if str(project_root) not in sys.path:
|
| 668 |
+
sys.path.insert(0, str(project_root))
|
| 669 |
+
|
| 670 |
+
# Import PDF converter
|
| 671 |
+
from tools.pdf_converter import PDFConverter
|
| 672 |
+
|
| 673 |
+
# Save original file
|
| 674 |
+
file_ext = uploaded_file.name.split(".")[-1].lower()
|
| 675 |
+
with tempfile.NamedTemporaryFile(
|
| 676 |
+
delete=False, suffix=f".{file_ext}"
|
| 677 |
+
) as tmp_file:
|
| 678 |
+
tmp_file.write(uploaded_file.getvalue())
|
| 679 |
+
original_file_path = tmp_file.name
|
| 680 |
+
|
| 681 |
+
st.success("✅ File uploaded successfully!")
|
| 682 |
+
|
| 683 |
+
# Check if file is already PDF
|
| 684 |
+
if file_ext == "pdf":
|
| 685 |
+
st.info("📑 File is already in PDF format, no conversion needed.")
|
| 686 |
+
return original_file_path
|
| 687 |
+
|
| 688 |
+
# Convert to PDF
|
| 689 |
+
with st.spinner(f"🔄 Converting {file_ext.upper()} to PDF..."):
|
| 690 |
+
try:
|
| 691 |
+
converter = PDFConverter()
|
| 692 |
+
|
| 693 |
+
# Check dependencies
|
| 694 |
+
deps = converter.check_dependencies()
|
| 695 |
+
missing_deps = []
|
| 696 |
+
|
| 697 |
+
if (
|
| 698 |
+
file_ext in {"doc", "docx", "ppt", "pptx", "xls", "xlsx"}
|
| 699 |
+
and not deps["libreoffice"]
|
| 700 |
+
):
|
| 701 |
+
missing_deps.append("LibreOffice")
|
| 702 |
+
|
| 703 |
+
if file_ext in {"txt", "md"} and not deps["reportlab"]:
|
| 704 |
+
missing_deps.append("ReportLab")
|
| 705 |
+
|
| 706 |
+
if missing_deps:
|
| 707 |
+
st.error(f"❌ Missing dependencies: {', '.join(missing_deps)}")
|
| 708 |
+
st.info("💡 Please install the required dependencies:")
|
| 709 |
+
if "LibreOffice" in missing_deps:
|
| 710 |
+
st.code(
|
| 711 |
+
"# Install LibreOffice\n"
|
| 712 |
+
"# Windows: Download from https://www.libreoffice.org/\n"
|
| 713 |
+
"# macOS: brew install --cask libreoffice\n"
|
| 714 |
+
"# Ubuntu: sudo apt-get install libreoffice"
|
| 715 |
+
)
|
| 716 |
+
if "ReportLab" in missing_deps:
|
| 717 |
+
st.code("pip install reportlab")
|
| 718 |
+
|
| 719 |
+
# Clean up original file
|
| 720 |
+
try:
|
| 721 |
+
os.unlink(original_file_path)
|
| 722 |
+
except Exception:
|
| 723 |
+
pass
|
| 724 |
+
return None
|
| 725 |
+
|
| 726 |
+
# Perform conversion
|
| 727 |
+
pdf_path = converter.convert_to_pdf(original_file_path)
|
| 728 |
+
|
| 729 |
+
# Clean up original file
|
| 730 |
+
try:
|
| 731 |
+
os.unlink(original_file_path)
|
| 732 |
+
except Exception:
|
| 733 |
+
pass
|
| 734 |
+
|
| 735 |
+
# Display conversion result
|
| 736 |
+
pdf_size = Path(pdf_path).stat().st_size
|
| 737 |
+
st.success("✅ Successfully converted to PDF!")
|
| 738 |
+
st.info(
|
| 739 |
+
f"📑 **PDF File:** {Path(pdf_path).name} ({format_file_size(pdf_size)})"
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
return str(pdf_path)
|
| 743 |
+
|
| 744 |
+
except Exception as e:
|
| 745 |
+
st.error(f"❌ PDF conversion failed: {str(e)}")
|
| 746 |
+
st.warning("💡 You can try:")
|
| 747 |
+
st.markdown("- Converting the file to PDF manually")
|
| 748 |
+
st.markdown("- Using a different file format")
|
| 749 |
+
st.markdown("- Checking if the file is corrupted")
|
| 750 |
+
|
| 751 |
+
# Clean up original file
|
| 752 |
+
try:
|
| 753 |
+
os.unlink(original_file_path)
|
| 754 |
+
except Exception:
|
| 755 |
+
pass
|
| 756 |
+
return None
|
| 757 |
+
|
| 758 |
+
except Exception as e:
|
| 759 |
+
st.error(f"❌ Failed to process uploaded file: {str(e)}")
|
| 760 |
+
return None
|
| 761 |
+
|
| 762 |
+
return None
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def url_input_component(task_counter: int) -> Optional[str]:
|
| 766 |
+
"""
|
| 767 |
+
URL input component
|
| 768 |
+
|
| 769 |
+
Args:
|
| 770 |
+
task_counter: Task counter
|
| 771 |
+
|
| 772 |
+
Returns:
|
| 773 |
+
URL or None
|
| 774 |
+
"""
|
| 775 |
+
url_input = st.text_input(
|
| 776 |
+
"Enter paper URL",
|
| 777 |
+
placeholder="https://arxiv.org/abs/..., https://ieeexplore.ieee.org/..., etc.",
|
| 778 |
+
help="Enter a direct link to a research paper (arXiv, IEEE, ACM, etc.)",
|
| 779 |
+
key=f"url_input_{task_counter}",
|
| 780 |
+
)
|
| 781 |
+
|
| 782 |
+
if url_input:
|
| 783 |
+
# Simple URL validation
|
| 784 |
+
if url_input.startswith(("http://", "https://")):
|
| 785 |
+
st.success(f"✅ URL entered: {url_input}")
|
| 786 |
+
return url_input
|
| 787 |
+
else:
|
| 788 |
+
st.warning("⚠️ Please enter a valid URL starting with http:// or https://")
|
| 789 |
+
return None
|
| 790 |
+
|
| 791 |
+
return None
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
def chat_input_component(task_counter: int) -> Optional[str]:
|
| 795 |
+
"""
|
| 796 |
+
Chat input component for coding requirements
|
| 797 |
+
|
| 798 |
+
Args:
|
| 799 |
+
task_counter: Task counter
|
| 800 |
+
|
| 801 |
+
Returns:
|
| 802 |
+
User coding requirements or None
|
| 803 |
+
"""
|
| 804 |
+
st.markdown(
|
| 805 |
+
"""
|
| 806 |
+
<div style="background: linear-gradient(135deg, #1e3c72 0%, #2a5298 100%);
|
| 807 |
+
border-radius: 10px;
|
| 808 |
+
padding: 15px;
|
| 809 |
+
margin-bottom: 20px;
|
| 810 |
+
border-left: 4px solid #4dd0e1;">
|
| 811 |
+
<h4 style="color: white; margin: 0 0 10px 0; font-size: 1.1rem;">
|
| 812 |
+
💬 Describe Your Coding Requirements
|
| 813 |
+
</h4>
|
| 814 |
+
<p style="color: #e0f7fa; margin: 0; font-size: 0.9rem;">
|
| 815 |
+
Tell us what you want to build. Our AI will analyze your requirements and generate a comprehensive implementation plan.
|
| 816 |
+
</p>
|
| 817 |
+
</div>
|
| 818 |
+
""",
|
| 819 |
+
unsafe_allow_html=True,
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
# Examples to help users understand what they can input
|
| 823 |
+
with st.expander("💡 See Examples", expanded=False):
|
| 824 |
+
st.markdown("""
|
| 825 |
+
**Academic Research Examples:**
|
| 826 |
+
- "I need to implement a reinforcement learning algorithm for robotic control"
|
| 827 |
+
- "Create a neural network for image classification with attention mechanisms"
|
| 828 |
+
- "Build a natural language processing pipeline for sentiment analysis"
|
| 829 |
+
|
| 830 |
+
**Engineering Project Examples:**
|
| 831 |
+
- "Develop a web application for project management with user authentication"
|
| 832 |
+
- "Create a data visualization dashboard for sales analytics"
|
| 833 |
+
- "Build a REST API for a e-commerce platform with database integration"
|
| 834 |
+
|
| 835 |
+
**Mixed Project Examples:**
|
| 836 |
+
- "Implement a machine learning model with a web interface for real-time predictions"
|
| 837 |
+
- "Create a research tool with user-friendly GUI for data analysis"
|
| 838 |
+
- "Build a chatbot with both academic evaluation metrics and production deployment"
|
| 839 |
+
""")
|
| 840 |
+
|
| 841 |
+
# Main text area for user input
|
| 842 |
+
user_input = st.text_area(
|
| 843 |
+
"Enter your coding requirements:",
|
| 844 |
+
placeholder="""Example: I want to build a web application that can analyze user sentiment from social media posts. The application should have:
|
| 845 |
+
|
| 846 |
+
1. A user-friendly interface where users can input text or upload files
|
| 847 |
+
2. A machine learning backend that performs sentiment analysis
|
| 848 |
+
3. Visualization of results with charts and statistics
|
| 849 |
+
4. User authentication and data storage
|
| 850 |
+
5. REST API for integration with other applications
|
| 851 |
+
|
| 852 |
+
The system should be scalable and production-ready, with proper error handling and documentation.""",
|
| 853 |
+
height=200,
|
| 854 |
+
help="Describe what you want to build, including functionality, technologies, and any specific requirements",
|
| 855 |
+
key=f"chat_input_{task_counter}",
|
| 856 |
+
)
|
| 857 |
+
|
| 858 |
+
if user_input and len(user_input.strip()) > 20: # Minimum length check
|
| 859 |
+
# Display input summary
|
| 860 |
+
word_count = len(user_input.split())
|
| 861 |
+
char_count = len(user_input)
|
| 862 |
+
|
| 863 |
+
st.success(
|
| 864 |
+
f"✅ **Requirements captured!** ({word_count} words, {char_count} characters)"
|
| 865 |
+
)
|
| 866 |
+
|
| 867 |
+
# Show a preview of what will be analyzed
|
| 868 |
+
with st.expander("📋 Preview your requirements", expanded=False):
|
| 869 |
+
st.text_area(
|
| 870 |
+
"Your input:",
|
| 871 |
+
user_input,
|
| 872 |
+
height=100,
|
| 873 |
+
disabled=True,
|
| 874 |
+
key=f"preview_{task_counter}",
|
| 875 |
+
)
|
| 876 |
+
|
| 877 |
+
return user_input.strip()
|
| 878 |
+
|
| 879 |
+
elif user_input and len(user_input.strip()) <= 20:
|
| 880 |
+
st.warning(
|
| 881 |
+
"⚠️ Please provide more detailed requirements (at least 20 characters)"
|
| 882 |
+
)
|
| 883 |
+
return None
|
| 884 |
+
|
| 885 |
+
return None
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
def input_method_selector(task_counter: int) -> tuple[Optional[str], Optional[str]]:
|
| 889 |
+
"""
|
| 890 |
+
Input method selector
|
| 891 |
+
|
| 892 |
+
Args:
|
| 893 |
+
task_counter: Task counter
|
| 894 |
+
|
| 895 |
+
Returns:
|
| 896 |
+
(input_source, input_type)
|
| 897 |
+
"""
|
| 898 |
+
st.markdown(
|
| 899 |
+
"""
|
| 900 |
+
<h3 style="color: var(--text-primary) !important; font-family: 'Inter', sans-serif !important; font-weight: 600 !important; font-size: 1.5rem !important; margin-bottom: 1rem !important;">
|
| 901 |
+
🚀 Start Processing
|
| 902 |
+
</h3>
|
| 903 |
+
""",
|
| 904 |
+
unsafe_allow_html=True,
|
| 905 |
+
)
|
| 906 |
+
|
| 907 |
+
# Input options
|
| 908 |
+
st.markdown(
|
| 909 |
+
"""
|
| 910 |
+
<p style="color: var(--text-secondary) !important; font-family: 'Inter', sans-serif !important; font-weight: 500 !important; margin-bottom: 1rem !important;">
|
| 911 |
+
Choose input method:
|
| 912 |
+
</p>
|
| 913 |
+
""",
|
| 914 |
+
unsafe_allow_html=True,
|
| 915 |
+
)
|
| 916 |
+
|
| 917 |
+
input_method = st.radio(
|
| 918 |
+
"Choose your input method:",
|
| 919 |
+
["📁 Upload File", "🌐 Enter URL", "💬 Chat Input"],
|
| 920 |
+
horizontal=True,
|
| 921 |
+
label_visibility="hidden",
|
| 922 |
+
key=f"input_method_{task_counter}",
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
input_source = None
|
| 926 |
+
input_type = None
|
| 927 |
+
|
| 928 |
+
if input_method == "📁 Upload File":
|
| 929 |
+
input_source = file_input_component(task_counter)
|
| 930 |
+
input_type = "file" if input_source else None
|
| 931 |
+
elif input_method == "🌐 Enter URL":
|
| 932 |
+
input_source = url_input_component(task_counter)
|
| 933 |
+
input_type = "url" if input_source else None
|
| 934 |
+
else: # Chat input
|
| 935 |
+
input_source = chat_input_component(task_counter)
|
| 936 |
+
input_type = "chat" if input_source else None
|
| 937 |
+
|
| 938 |
+
return input_source, input_type
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
def results_display_component(result: Dict[str, Any], task_counter: int):
|
| 942 |
+
"""
|
| 943 |
+
Results display component
|
| 944 |
+
|
| 945 |
+
Args:
|
| 946 |
+
result: Processing result
|
| 947 |
+
task_counter: Task counter
|
| 948 |
+
"""
|
| 949 |
+
st.markdown("### 📋 Processing Results")
|
| 950 |
+
|
| 951 |
+
# Display overall status
|
| 952 |
+
if result.get("status") == "success":
|
| 953 |
+
st.success("🎉 **All workflows completed successfully!**")
|
| 954 |
+
else:
|
| 955 |
+
st.error("❌ **Processing encountered errors**")
|
| 956 |
+
|
| 957 |
+
# Create tabs to organize different phase results
|
| 958 |
+
tab1, tab2, tab3, tab4 = st.tabs(
|
| 959 |
+
[
|
| 960 |
+
"📊 Analysis Phase",
|
| 961 |
+
"📥 Download Phase",
|
| 962 |
+
"🔧 Implementation Phase",
|
| 963 |
+
"📁 Generated Files",
|
| 964 |
+
]
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
with tab1:
|
| 968 |
+
st.markdown("#### 📊 Paper Analysis Results")
|
| 969 |
+
with st.expander("Analysis Output Details", expanded=True):
|
| 970 |
+
analysis_result = result.get(
|
| 971 |
+
"analysis_result", "No analysis result available"
|
| 972 |
+
)
|
| 973 |
+
try:
|
| 974 |
+
# Try to parse JSON result for formatted display
|
| 975 |
+
if analysis_result.strip().startswith("{"):
|
| 976 |
+
parsed_analysis = json.loads(analysis_result)
|
| 977 |
+
st.json(parsed_analysis)
|
| 978 |
+
else:
|
| 979 |
+
st.text_area(
|
| 980 |
+
"Raw Analysis Output",
|
| 981 |
+
analysis_result,
|
| 982 |
+
height=300,
|
| 983 |
+
key=f"analysis_{task_counter}",
|
| 984 |
+
)
|
| 985 |
+
except Exception:
|
| 986 |
+
st.text_area(
|
| 987 |
+
"Analysis Output",
|
| 988 |
+
analysis_result,
|
| 989 |
+
height=300,
|
| 990 |
+
key=f"analysis_{task_counter}",
|
| 991 |
+
)
|
| 992 |
+
|
| 993 |
+
with tab2:
|
| 994 |
+
st.markdown("#### 📥 Download & Preparation Results")
|
| 995 |
+
with st.expander("Download Process Details", expanded=True):
|
| 996 |
+
download_result = result.get(
|
| 997 |
+
"download_result", "No download result available"
|
| 998 |
+
)
|
| 999 |
+
st.text_area(
|
| 1000 |
+
"Download Output",
|
| 1001 |
+
download_result,
|
| 1002 |
+
height=300,
|
| 1003 |
+
key=f"download_{task_counter}",
|
| 1004 |
+
)
|
| 1005 |
+
|
| 1006 |
+
# Try to extract file path information
|
| 1007 |
+
if "paper_dir" in download_result or "path" in download_result.lower():
|
| 1008 |
+
st.info(
|
| 1009 |
+
"💡 **Tip:** Look for file paths in the output above to locate generated files"
|
| 1010 |
+
)
|
| 1011 |
+
|
| 1012 |
+
with tab3:
|
| 1013 |
+
st.markdown("#### 🔧 Code Implementation Results")
|
| 1014 |
+
repo_result = result.get("repo_result", "No implementation result available")
|
| 1015 |
+
|
| 1016 |
+
# Analyze implementation results to extract key information
|
| 1017 |
+
if "successfully" in repo_result.lower():
|
| 1018 |
+
st.success("✅ Code implementation completed successfully!")
|
| 1019 |
+
elif "failed" in repo_result.lower():
|
| 1020 |
+
st.warning("⚠️ Code implementation encountered issues")
|
| 1021 |
+
else:
|
| 1022 |
+
st.info("ℹ️ Code implementation status unclear")
|
| 1023 |
+
|
| 1024 |
+
with st.expander("Implementation Details", expanded=True):
|
| 1025 |
+
st.text_area(
|
| 1026 |
+
"Repository & Code Generation Output",
|
| 1027 |
+
repo_result,
|
| 1028 |
+
height=300,
|
| 1029 |
+
key=f"repo_{task_counter}",
|
| 1030 |
+
)
|
| 1031 |
+
|
| 1032 |
+
# Try to extract generated code directory information
|
| 1033 |
+
if "Code generated in:" in repo_result:
|
| 1034 |
+
code_dir = repo_result.split("Code generated in:")[-1].strip()
|
| 1035 |
+
st.markdown(f"**📁 Generated Code Directory:** `{code_dir}`")
|
| 1036 |
+
|
| 1037 |
+
# Display workflow stage details
|
| 1038 |
+
st.markdown("#### 🔄 Workflow Stages Completed")
|
| 1039 |
+
stages = [
|
| 1040 |
+
("📄 Document Processing", "✅"),
|
| 1041 |
+
("🔍 Reference Analysis", "✅"),
|
| 1042 |
+
("📋 Plan Generation", "✅"),
|
| 1043 |
+
("📦 Repository Download", "✅"),
|
| 1044 |
+
("🗂️ Codebase Indexing", "✅" if "indexing" in repo_result.lower() else "⚠️"),
|
| 1045 |
+
(
|
| 1046 |
+
"⚙️ Code Implementation",
|
| 1047 |
+
"✅" if "successfully" in repo_result.lower() else "⚠️",
|
| 1048 |
+
),
|
| 1049 |
+
]
|
| 1050 |
+
|
| 1051 |
+
for stage_name, status in stages:
|
| 1052 |
+
st.markdown(f"- {stage_name}: {status}")
|
| 1053 |
+
|
| 1054 |
+
with tab4:
|
| 1055 |
+
st.markdown("#### 📁 Generated Files & Reports")
|
| 1056 |
+
|
| 1057 |
+
# Try to extract file paths from results
|
| 1058 |
+
all_results = (
|
| 1059 |
+
f"{result.get('download_result', '')} {result.get('repo_result', '')}"
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
# Look for possible file path patterns
|
| 1063 |
+
import re
|
| 1064 |
+
|
| 1065 |
+
file_patterns = [
|
| 1066 |
+
r"([^\s]+\.txt)",
|
| 1067 |
+
r"([^\s]+\.json)",
|
| 1068 |
+
r"([^\s]+\.py)",
|
| 1069 |
+
r"([^\s]+\.md)",
|
| 1070 |
+
r"paper_dir[:\s]+([^\s]+)",
|
| 1071 |
+
r"saved to ([^\s]+)",
|
| 1072 |
+
r"generated in[:\s]+([^\s]+)",
|
| 1073 |
+
]
|
| 1074 |
+
|
| 1075 |
+
found_files = set()
|
| 1076 |
+
for pattern in file_patterns:
|
| 1077 |
+
matches = re.findall(pattern, all_results, re.IGNORECASE)
|
| 1078 |
+
for match in matches:
|
| 1079 |
+
if isinstance(match, tuple):
|
| 1080 |
+
found_files.update(match)
|
| 1081 |
+
else:
|
| 1082 |
+
found_files.add(match)
|
| 1083 |
+
|
| 1084 |
+
if found_files:
|
| 1085 |
+
st.markdown("**📄 Detected Generated Files:**")
|
| 1086 |
+
for file_path in sorted(found_files):
|
| 1087 |
+
if file_path and len(file_path) > 3: # Filter out too short matches
|
| 1088 |
+
st.markdown(f"- `{file_path}`")
|
| 1089 |
+
else:
|
| 1090 |
+
st.info(
|
| 1091 |
+
"No specific file paths detected in the output. Check the detailed results above for file locations."
|
| 1092 |
+
)
|
| 1093 |
+
|
| 1094 |
+
# Provide option to view raw results
|
| 1095 |
+
with st.expander("View Raw Processing Results"):
|
| 1096 |
+
st.json(
|
| 1097 |
+
{
|
| 1098 |
+
"analysis_result": result.get("analysis_result", ""),
|
| 1099 |
+
"download_result": result.get("download_result", ""),
|
| 1100 |
+
"repo_result": result.get("repo_result", ""),
|
| 1101 |
+
"status": result.get("status", "unknown"),
|
| 1102 |
+
}
|
| 1103 |
+
)
|
| 1104 |
+
|
| 1105 |
+
# Action buttons
|
| 1106 |
+
st.markdown("---")
|
| 1107 |
+
col1, col2 = st.columns(2)
|
| 1108 |
+
|
| 1109 |
+
with col1:
|
| 1110 |
+
if st.button("🔄 Process New Paper", type="primary", use_container_width=True):
|
| 1111 |
+
st.session_state.show_results = False
|
| 1112 |
+
st.session_state.last_result = None
|
| 1113 |
+
st.session_state.last_error = None
|
| 1114 |
+
st.session_state.task_counter += 1
|
| 1115 |
+
st.rerun()
|
| 1116 |
+
|
| 1117 |
+
with col2:
|
| 1118 |
+
if st.button("💾 Export Results", type="secondary", use_container_width=True):
|
| 1119 |
+
# Create result export
|
| 1120 |
+
export_data = {
|
| 1121 |
+
"timestamp": datetime.now().isoformat(),
|
| 1122 |
+
"processing_results": result,
|
| 1123 |
+
"status": result.get("status", "unknown"),
|
| 1124 |
+
}
|
| 1125 |
+
st.download_button(
|
| 1126 |
+
label="📄 Download Results JSON",
|
| 1127 |
+
data=json.dumps(export_data, indent=2, ensure_ascii=False),
|
| 1128 |
+
file_name=f"paper_processing_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
| 1129 |
+
mime="application/json",
|
| 1130 |
+
use_container_width=True,
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
|
| 1134 |
+
def progress_display_component():
|
| 1135 |
+
"""
|
| 1136 |
+
Progress display component
|
| 1137 |
+
|
| 1138 |
+
Returns:
|
| 1139 |
+
(progress_bar, status_text)
|
| 1140 |
+
"""
|
| 1141 |
+
# Display processing progress title
|
| 1142 |
+
st.markdown("### 📊 Processing Progress")
|
| 1143 |
+
|
| 1144 |
+
# Create progress container
|
| 1145 |
+
progress_container = st.container()
|
| 1146 |
+
|
| 1147 |
+
with progress_container:
|
| 1148 |
+
# Add custom CSS styles
|
| 1149 |
+
st.markdown(
|
| 1150 |
+
"""
|
| 1151 |
+
<style>
|
| 1152 |
+
.progress-container {
|
| 1153 |
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 1154 |
+
border-radius: 15px;
|
| 1155 |
+
padding: 20px;
|
| 1156 |
+
margin: 10px 0;
|
| 1157 |
+
box-shadow: 0 10px 30px rgba(0,0,0,0.2);
|
| 1158 |
+
}
|
| 1159 |
+
.progress-steps {
|
| 1160 |
+
display: flex;
|
| 1161 |
+
justify-content: space-between;
|
| 1162 |
+
margin-bottom: 15px;
|
| 1163 |
+
flex-wrap: wrap;
|
| 1164 |
+
}
|
| 1165 |
+
.progress-step {
|
| 1166 |
+
background: rgba(255,255,255,0.1);
|
| 1167 |
+
border-radius: 10px;
|
| 1168 |
+
padding: 8px 12px;
|
| 1169 |
+
margin: 2px;
|
| 1170 |
+
color: white;
|
| 1171 |
+
font-size: 0.8rem;
|
| 1172 |
+
font-weight: 500;
|
| 1173 |
+
border: 2px solid transparent;
|
| 1174 |
+
transition: all 0.3s ease;
|
| 1175 |
+
}
|
| 1176 |
+
.progress-step.active {
|
| 1177 |
+
background: rgba(255,255,255,0.3);
|
| 1178 |
+
border-color: #00ff88;
|
| 1179 |
+
box-shadow: 0 0 15px rgba(0,255,136,0.3);
|
| 1180 |
+
}
|
| 1181 |
+
.progress-step.completed {
|
| 1182 |
+
background: rgba(0,255,136,0.2);
|
| 1183 |
+
border-color: #00ff88;
|
| 1184 |
+
}
|
| 1185 |
+
.status-text {
|
| 1186 |
+
color: white;
|
| 1187 |
+
font-weight: 600;
|
| 1188 |
+
font-size: 1.1rem;
|
| 1189 |
+
margin: 10px 0;
|
| 1190 |
+
text-align: center;
|
| 1191 |
+
}
|
| 1192 |
+
</style>
|
| 1193 |
+
""",
|
| 1194 |
+
unsafe_allow_html=True,
|
| 1195 |
+
)
|
| 1196 |
+
|
| 1197 |
+
st.markdown('<div class="progress-container">', unsafe_allow_html=True)
|
| 1198 |
+
|
| 1199 |
+
# Create step indicator
|
| 1200 |
+
st.markdown(
|
| 1201 |
+
"""
|
| 1202 |
+
<div class="progress-steps">
|
| 1203 |
+
<div class="progress-step" id="step-init">🚀 Initialize</div>
|
| 1204 |
+
<div class="progress-step" id="step-analyze">📊 Analyze</div>
|
| 1205 |
+
<div class="progress-step" id="step-download">📥 Download</div>
|
| 1206 |
+
<div class="progress-step" id="step-references">🔍 References</div>
|
| 1207 |
+
<div class="progress-step" id="step-plan">📋 Plan</div>
|
| 1208 |
+
<div class="progress-step" id="step-repos">📦 Repos</div>
|
| 1209 |
+
<div class="progress-step" id="step-index">🗂️ Index</div>
|
| 1210 |
+
<div class="progress-step" id="step-implement">⚙️ Implement</div>
|
| 1211 |
+
</div>
|
| 1212 |
+
""",
|
| 1213 |
+
unsafe_allow_html=True,
|
| 1214 |
+
)
|
| 1215 |
+
|
| 1216 |
+
# Create progress bar and status text
|
| 1217 |
+
progress_bar = st.progress(0)
|
| 1218 |
+
status_text = st.empty()
|
| 1219 |
+
|
| 1220 |
+
st.markdown("</div>", unsafe_allow_html=True)
|
| 1221 |
+
|
| 1222 |
+
return progress_bar, status_text
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
def enhanced_progress_display_component(
|
| 1226 |
+
enable_indexing: bool = True, chat_mode: bool = False
|
| 1227 |
+
):
|
| 1228 |
+
"""
|
| 1229 |
+
Enhanced progress display component
|
| 1230 |
+
|
| 1231 |
+
Args:
|
| 1232 |
+
enable_indexing: Whether indexing is enabled
|
| 1233 |
+
chat_mode: Whether in chat mode (user requirements input)
|
| 1234 |
+
|
| 1235 |
+
Returns:
|
| 1236 |
+
(progress_bar, status_text, step_indicator, workflow_steps)
|
| 1237 |
+
"""
|
| 1238 |
+
# Display processing progress title
|
| 1239 |
+
if chat_mode:
|
| 1240 |
+
st.markdown("### 💬 AI Chat Planning - Requirements to Code Workflow")
|
| 1241 |
+
elif enable_indexing:
|
| 1242 |
+
st.markdown("### 🚀 AI Research Engine - Full Processing Workflow")
|
| 1243 |
+
else:
|
| 1244 |
+
st.markdown(
|
| 1245 |
+
"### ⚡ AI Research Engine - Fast Processing Workflow (Indexing Disabled)"
|
| 1246 |
+
)
|
| 1247 |
+
|
| 1248 |
+
# Create progress container
|
| 1249 |
+
progress_container = st.container()
|
| 1250 |
+
|
| 1251 |
+
with progress_container:
|
| 1252 |
+
# Workflow step definitions - adjust based on mode and indexing toggle
|
| 1253 |
+
if chat_mode:
|
| 1254 |
+
# Chat mode - simplified workflow for user requirements
|
| 1255 |
+
workflow_steps = [
|
| 1256 |
+
("🚀", "Initialize", "Setting up chat engine"),
|
| 1257 |
+
("💬", "Planning", "Analyzing requirements"),
|
| 1258 |
+
("🏗️", "Setup", "Creating workspace"),
|
| 1259 |
+
("📝", "Save Plan", "Saving implementation plan"),
|
| 1260 |
+
("⚙️", "Implement", "Generating code"),
|
| 1261 |
+
]
|
| 1262 |
+
elif enable_indexing:
|
| 1263 |
+
workflow_steps = [
|
| 1264 |
+
("🚀", "Initialize", "Setting up AI engine"),
|
| 1265 |
+
("📊", "Analyze", "Analyzing paper content"),
|
| 1266 |
+
("📥", "Download", "Processing document"),
|
| 1267 |
+
(
|
| 1268 |
+
"📋",
|
| 1269 |
+
"Plan",
|
| 1270 |
+
"Generating code plan",
|
| 1271 |
+
), # Phase 3: code planning orchestration
|
| 1272 |
+
(
|
| 1273 |
+
"🔍",
|
| 1274 |
+
"References",
|
| 1275 |
+
"Analyzing references",
|
| 1276 |
+
), # Phase 4: now conditional
|
| 1277 |
+
("📦", "Repos", "Downloading repositories"), # Phase 5: GitHub download
|
| 1278 |
+
("🗂️", "Index", "Building code index"), # Phase 6: code indexing
|
| 1279 |
+
("⚙️", "Implement", "Implementing code"), # Phase 7: code implementation
|
| 1280 |
+
]
|
| 1281 |
+
else:
|
| 1282 |
+
# Fast mode - skip References, Repos and Index steps
|
| 1283 |
+
workflow_steps = [
|
| 1284 |
+
("🚀", "Initialize", "Setting up AI engine"),
|
| 1285 |
+
("📊", "Analyze", "Analyzing paper content"),
|
| 1286 |
+
("📥", "Download", "Processing document"),
|
| 1287 |
+
(
|
| 1288 |
+
"📋",
|
| 1289 |
+
"Plan",
|
| 1290 |
+
"Generating code plan",
|
| 1291 |
+
), # Phase 3: code planning orchestration
|
| 1292 |
+
(
|
| 1293 |
+
"⚙️",
|
| 1294 |
+
"Implement",
|
| 1295 |
+
"Implementing code",
|
| 1296 |
+
), # Jump directly to implementation
|
| 1297 |
+
]
|
| 1298 |
+
|
| 1299 |
+
# Display step grid with fixed layout
|
| 1300 |
+
# Use a maximum of 8 columns for consistent sizing
|
| 1301 |
+
max_cols = 8
|
| 1302 |
+
cols = st.columns(max_cols)
|
| 1303 |
+
step_indicators = []
|
| 1304 |
+
|
| 1305 |
+
# Calculate column spacing for centering steps
|
| 1306 |
+
total_steps = len(workflow_steps)
|
| 1307 |
+
if total_steps <= max_cols:
|
| 1308 |
+
# Center the steps when fewer than max columns
|
| 1309 |
+
start_col = (max_cols - total_steps) // 2
|
| 1310 |
+
else:
|
| 1311 |
+
start_col = 0
|
| 1312 |
+
|
| 1313 |
+
for i, (icon, title, desc) in enumerate(workflow_steps):
|
| 1314 |
+
col_index = start_col + i if total_steps <= max_cols else i
|
| 1315 |
+
if col_index < max_cols:
|
| 1316 |
+
with cols[col_index]:
|
| 1317 |
+
step_placeholder = st.empty()
|
| 1318 |
+
step_indicators.append(step_placeholder)
|
| 1319 |
+
step_placeholder.markdown(
|
| 1320 |
+
f"""
|
| 1321 |
+
<div style="
|
| 1322 |
+
text-align: center;
|
| 1323 |
+
padding: 12px 8px;
|
| 1324 |
+
border-radius: 12px;
|
| 1325 |
+
background: rgba(255,255,255,0.05);
|
| 1326 |
+
margin: 5px 2px;
|
| 1327 |
+
border: 2px solid transparent;
|
| 1328 |
+
min-height: 90px;
|
| 1329 |
+
display: flex;
|
| 1330 |
+
flex-direction: column;
|
| 1331 |
+
justify-content: center;
|
| 1332 |
+
align-items: center;
|
| 1333 |
+
box-sizing: border-box;
|
| 1334 |
+
">
|
| 1335 |
+
<div style="font-size: 1.5rem; margin-bottom: 4px;">{icon}</div>
|
| 1336 |
+
<div style="font-size: 0.75rem; font-weight: 600; line-height: 1.2; margin-bottom: 2px;">{title}</div>
|
| 1337 |
+
<div style="font-size: 0.6rem; color: #888; line-height: 1.1; text-align: center;">{desc}</div>
|
| 1338 |
+
</div>
|
| 1339 |
+
""",
|
| 1340 |
+
unsafe_allow_html=True,
|
| 1341 |
+
)
|
| 1342 |
+
|
| 1343 |
+
# Create main progress bar
|
| 1344 |
+
st.markdown("#### Overall Progress")
|
| 1345 |
+
progress_bar = st.progress(0)
|
| 1346 |
+
|
| 1347 |
+
# Status text display
|
| 1348 |
+
status_text = st.empty()
|
| 1349 |
+
|
| 1350 |
+
# Display mode information
|
| 1351 |
+
if not enable_indexing:
|
| 1352 |
+
st.info(
|
| 1353 |
+
"⚡ Fast Mode: Reference analysis, GitHub repository download and codebase indexing are disabled for faster processing."
|
| 1354 |
+
)
|
| 1355 |
+
|
| 1356 |
+
return progress_bar, status_text, step_indicators, workflow_steps
|
| 1357 |
+
|
| 1358 |
+
|
| 1359 |
+
def update_step_indicator(
|
| 1360 |
+
step_indicators, workflow_steps, current_step: int, status: str = "active"
|
| 1361 |
+
):
|
| 1362 |
+
"""
|
| 1363 |
+
Update step indicator
|
| 1364 |
+
|
| 1365 |
+
Args:
|
| 1366 |
+
step_indicators: Step indicator list
|
| 1367 |
+
workflow_steps: Workflow steps definition
|
| 1368 |
+
current_step: Current step index
|
| 1369 |
+
status: Status ("active", "completed", "error")
|
| 1370 |
+
"""
|
| 1371 |
+
status_colors = {
|
| 1372 |
+
"pending": ("rgba(255,255,255,0.05)", "transparent", "#888"),
|
| 1373 |
+
"active": ("rgba(255,215,0,0.2)", "#ffd700", "#fff"),
|
| 1374 |
+
"completed": ("rgba(0,255,136,0.2)", "#00ff88", "#fff"),
|
| 1375 |
+
"error": ("rgba(255,99,99,0.2)", "#ff6363", "#fff"),
|
| 1376 |
+
}
|
| 1377 |
+
|
| 1378 |
+
for i, (icon, title, desc) in enumerate(workflow_steps):
|
| 1379 |
+
if i < current_step:
|
| 1380 |
+
bg_color, border_color, text_color = status_colors["completed"]
|
| 1381 |
+
display_icon = "✅"
|
| 1382 |
+
elif i == current_step:
|
| 1383 |
+
bg_color, border_color, text_color = status_colors[status]
|
| 1384 |
+
display_icon = icon
|
| 1385 |
+
else:
|
| 1386 |
+
bg_color, border_color, text_color = status_colors["pending"]
|
| 1387 |
+
display_icon = icon
|
| 1388 |
+
|
| 1389 |
+
step_indicators[i].markdown(
|
| 1390 |
+
f"""
|
| 1391 |
+
<div style="
|
| 1392 |
+
text-align: center;
|
| 1393 |
+
padding: 12px 8px;
|
| 1394 |
+
border-radius: 12px;
|
| 1395 |
+
background: {bg_color};
|
| 1396 |
+
margin: 5px 2px;
|
| 1397 |
+
border: 2px solid {border_color};
|
| 1398 |
+
color: {text_color};
|
| 1399 |
+
transition: all 0.3s ease;
|
| 1400 |
+
box-shadow: {f'0 0 15px {border_color}30' if i == current_step else 'none'};
|
| 1401 |
+
min-height: 90px;
|
| 1402 |
+
display: flex;
|
| 1403 |
+
flex-direction: column;
|
| 1404 |
+
justify-content: center;
|
| 1405 |
+
align-items: center;
|
| 1406 |
+
box-sizing: border-box;
|
| 1407 |
+
">
|
| 1408 |
+
<div style="font-size: 1.5rem; margin-bottom: 4px;">{display_icon}</div>
|
| 1409 |
+
<div style="font-size: 0.75rem; font-weight: 600; line-height: 1.2; margin-bottom: 2px;">{title}</div>
|
| 1410 |
+
<div style="font-size: 0.6rem; opacity: 0.8; line-height: 1.1; text-align: center;">{desc}</div>
|
| 1411 |
+
</div>
|
| 1412 |
+
""",
|
| 1413 |
+
unsafe_allow_html=True,
|
| 1414 |
+
)
|
| 1415 |
+
|
| 1416 |
+
|
| 1417 |
+
def footer_component():
|
| 1418 |
+
"""Footer component"""
|
| 1419 |
+
st.markdown("---")
|
| 1420 |
+
st.markdown(
|
| 1421 |
+
"""
|
| 1422 |
+
<div style="text-align: center; color: #666; padding: 2rem;">
|
| 1423 |
+
<p>🧬 <strong>DeepCode</strong> | Open-Source Code Agent | Data Intelligence Lab @ HKU |
|
| 1424 |
+
<a href="https://github.com/your-repo" target="_blank" style="color: var(--neon-blue);">GitHub</a></p>
|
| 1425 |
+
<p>⚡ Revolutionizing Research Reproducibility • Multi-Agent Architecture • Automated Code Generation</p>
|
| 1426 |
+
<p><small>💡 Join our growing community in building the future of automated research reproducibility</small></p>
|
| 1427 |
+
</div>
|
| 1428 |
+
""",
|
| 1429 |
+
unsafe_allow_html=True,
|
| 1430 |
+
)
|
| 1431 |
+
|
| 1432 |
+
|
| 1433 |
+
def format_file_size(size_bytes: int) -> str:
|
| 1434 |
+
"""
|
| 1435 |
+
Format file size
|
| 1436 |
+
|
| 1437 |
+
Args:
|
| 1438 |
+
size_bytes: Size in bytes
|
| 1439 |
+
|
| 1440 |
+
Returns:
|
| 1441 |
+
Formatted file size
|
| 1442 |
+
"""
|
| 1443 |
+
if size_bytes == 0:
|
| 1444 |
+
return "0B"
|
| 1445 |
+
size_names = ["B", "KB", "MB", "GB"]
|
| 1446 |
+
i = 0
|
| 1447 |
+
while size_bytes >= 1024 and i < len(size_names) - 1:
|
| 1448 |
+
size_bytes /= 1024.0
|
| 1449 |
+
i += 1
|
| 1450 |
+
return f"{size_bytes:.1f}{size_names[i]}"
|
projects/ui/DeepCode/ui/handlers.py
ADDED
|
@@ -0,0 +1,773 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Streamlit Event Handlers Module
|
| 3 |
+
|
| 4 |
+
Contains all event handling and business logic
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import time
|
| 9 |
+
import os
|
| 10 |
+
import traceback
|
| 11 |
+
import atexit
|
| 12 |
+
import signal
|
| 13 |
+
from datetime import datetime
|
| 14 |
+
from typing import Dict, Any
|
| 15 |
+
|
| 16 |
+
import streamlit as st
|
| 17 |
+
import nest_asyncio
|
| 18 |
+
import concurrent.futures
|
| 19 |
+
|
| 20 |
+
# Import necessary modules
|
| 21 |
+
from mcp_agent.app import MCPApp
|
| 22 |
+
from workflows.agent_orchestration_engine import (
|
| 23 |
+
execute_multi_agent_research_pipeline,
|
| 24 |
+
execute_chat_based_planning_pipeline,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _emergency_cleanup():
|
| 29 |
+
"""
|
| 30 |
+
Emergency resource cleanup function
|
| 31 |
+
Called when program exits abnormally
|
| 32 |
+
"""
|
| 33 |
+
try:
|
| 34 |
+
cleanup_resources()
|
| 35 |
+
except Exception:
|
| 36 |
+
pass # Silent handling to avoid new exceptions during exit
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _signal_handler(signum, frame):
|
| 40 |
+
"""
|
| 41 |
+
Signal handler for program termination signals
|
| 42 |
+
"""
|
| 43 |
+
try:
|
| 44 |
+
cleanup_resources()
|
| 45 |
+
except Exception:
|
| 46 |
+
pass
|
| 47 |
+
finally:
|
| 48 |
+
# Restore default signal handling and resend signal
|
| 49 |
+
signal.signal(signum, signal.SIG_DFL)
|
| 50 |
+
os.kill(os.getpid(), signum)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# Register exit cleanup function
|
| 54 |
+
atexit.register(_emergency_cleanup)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _safe_register_signal_handlers():
|
| 58 |
+
"""Safely register signal handlers"""
|
| 59 |
+
try:
|
| 60 |
+
# Check if running in main thread
|
| 61 |
+
import threading
|
| 62 |
+
|
| 63 |
+
if threading.current_thread() is not threading.main_thread():
|
| 64 |
+
return # Signal handlers can only be registered in main thread
|
| 65 |
+
|
| 66 |
+
# Try to register signal handlers
|
| 67 |
+
signal.signal(signal.SIGTERM, _signal_handler)
|
| 68 |
+
signal.signal(signal.SIGINT, _signal_handler)
|
| 69 |
+
if hasattr(signal, "SIGBREAK"): # Windows
|
| 70 |
+
signal.signal(signal.SIGBREAK, _signal_handler)
|
| 71 |
+
except (AttributeError, OSError, ValueError):
|
| 72 |
+
# Some signals are not available on certain platforms or disabled in some environments
|
| 73 |
+
# This is common in web frameworks like Streamlit
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# Delayed signal handler registration to avoid import-time errors
|
| 78 |
+
try:
|
| 79 |
+
_safe_register_signal_handlers()
|
| 80 |
+
except Exception:
|
| 81 |
+
# If registration fails, silently ignore and don't affect app startup
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
async def process_input_async(
|
| 86 |
+
input_source: str,
|
| 87 |
+
input_type: str,
|
| 88 |
+
enable_indexing: bool = True,
|
| 89 |
+
progress_callback=None,
|
| 90 |
+
) -> Dict[str, Any]:
|
| 91 |
+
"""
|
| 92 |
+
Process input asynchronously
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
input_source: Input source
|
| 96 |
+
input_type: Input type
|
| 97 |
+
enable_indexing: Whether to enable indexing functionality
|
| 98 |
+
progress_callback: Progress callback function
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Processing result
|
| 102 |
+
"""
|
| 103 |
+
try:
|
| 104 |
+
# Create and use MCP app in the same async context
|
| 105 |
+
app = MCPApp(name="paper_to_code")
|
| 106 |
+
|
| 107 |
+
async with app.run() as agent_app:
|
| 108 |
+
logger = agent_app.logger
|
| 109 |
+
context = agent_app.context
|
| 110 |
+
context.config.mcp.servers["filesystem"].args.extend([os.getcwd()])
|
| 111 |
+
|
| 112 |
+
# Initialize progress
|
| 113 |
+
if progress_callback:
|
| 114 |
+
if input_type == "chat":
|
| 115 |
+
progress_callback(
|
| 116 |
+
5, "🚀 Initializing chat-based planning pipeline..."
|
| 117 |
+
)
|
| 118 |
+
else:
|
| 119 |
+
progress_callback(5, "🚀 Initializing AI research engine...")
|
| 120 |
+
|
| 121 |
+
# Choose pipeline based on input type
|
| 122 |
+
if input_type == "chat":
|
| 123 |
+
# Use chat-based planning pipeline for user requirements
|
| 124 |
+
repo_result = await execute_chat_based_planning_pipeline(
|
| 125 |
+
input_source, # User's coding requirements
|
| 126 |
+
logger,
|
| 127 |
+
progress_callback,
|
| 128 |
+
enable_indexing=enable_indexing, # Pass indexing control parameter
|
| 129 |
+
)
|
| 130 |
+
else:
|
| 131 |
+
# Use traditional multi-agent research pipeline for files/URLs
|
| 132 |
+
repo_result = await execute_multi_agent_research_pipeline(
|
| 133 |
+
input_source,
|
| 134 |
+
logger,
|
| 135 |
+
progress_callback,
|
| 136 |
+
enable_indexing=enable_indexing, # Pass indexing control parameter
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
return {
|
| 140 |
+
"analysis_result": "Integrated into complete workflow",
|
| 141 |
+
"download_result": "Integrated into complete workflow",
|
| 142 |
+
"repo_result": repo_result,
|
| 143 |
+
"status": "success",
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
except Exception as e:
|
| 147 |
+
error_msg = str(e)
|
| 148 |
+
traceback_msg = traceback.format_exc()
|
| 149 |
+
|
| 150 |
+
return {"error": error_msg, "traceback": traceback_msg, "status": "error"}
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def run_async_task(coro):
|
| 154 |
+
"""
|
| 155 |
+
Helper function to run async tasks
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
coro: Coroutine object
|
| 159 |
+
|
| 160 |
+
Returns:
|
| 161 |
+
Task result
|
| 162 |
+
"""
|
| 163 |
+
# Apply nest_asyncio to support nested event loops
|
| 164 |
+
nest_asyncio.apply()
|
| 165 |
+
|
| 166 |
+
# Save current Streamlit context
|
| 167 |
+
try:
|
| 168 |
+
from streamlit.runtime.scriptrunner import get_script_run_ctx
|
| 169 |
+
from streamlit.runtime.scriptrunner.script_run_context import (
|
| 170 |
+
SCRIPT_RUN_CONTEXT_ATTR_NAME,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
current_ctx = get_script_run_ctx()
|
| 174 |
+
context_available = True
|
| 175 |
+
except ImportError:
|
| 176 |
+
# If Streamlit context modules can't be imported, use fallback method
|
| 177 |
+
current_ctx = None
|
| 178 |
+
context_available = False
|
| 179 |
+
|
| 180 |
+
def run_in_new_loop():
|
| 181 |
+
"""Run coroutine in new event loop"""
|
| 182 |
+
# Set Streamlit context in new thread (if available)
|
| 183 |
+
if context_available and current_ctx:
|
| 184 |
+
try:
|
| 185 |
+
import threading
|
| 186 |
+
|
| 187 |
+
setattr(
|
| 188 |
+
threading.current_thread(),
|
| 189 |
+
SCRIPT_RUN_CONTEXT_ATTR_NAME,
|
| 190 |
+
current_ctx,
|
| 191 |
+
)
|
| 192 |
+
except Exception:
|
| 193 |
+
pass # Ignore context setting errors
|
| 194 |
+
|
| 195 |
+
loop = None
|
| 196 |
+
try:
|
| 197 |
+
loop = asyncio.new_event_loop()
|
| 198 |
+
asyncio.set_event_loop(loop)
|
| 199 |
+
result = loop.run_until_complete(coro)
|
| 200 |
+
return result
|
| 201 |
+
except Exception as e:
|
| 202 |
+
raise e
|
| 203 |
+
finally:
|
| 204 |
+
# Clean up resources
|
| 205 |
+
if loop:
|
| 206 |
+
try:
|
| 207 |
+
loop.close()
|
| 208 |
+
except Exception:
|
| 209 |
+
pass
|
| 210 |
+
asyncio.set_event_loop(None)
|
| 211 |
+
|
| 212 |
+
# Clean up thread context (if available)
|
| 213 |
+
if context_available:
|
| 214 |
+
try:
|
| 215 |
+
import threading
|
| 216 |
+
|
| 217 |
+
if hasattr(
|
| 218 |
+
threading.current_thread(), SCRIPT_RUN_CONTEXT_ATTR_NAME
|
| 219 |
+
):
|
| 220 |
+
delattr(
|
| 221 |
+
threading.current_thread(), SCRIPT_RUN_CONTEXT_ATTR_NAME
|
| 222 |
+
)
|
| 223 |
+
except Exception:
|
| 224 |
+
pass # Ignore cleanup errors
|
| 225 |
+
|
| 226 |
+
# Force garbage collection
|
| 227 |
+
import gc
|
| 228 |
+
|
| 229 |
+
gc.collect()
|
| 230 |
+
|
| 231 |
+
# Use thread pool to run async task, avoiding event loop conflicts
|
| 232 |
+
executor = None
|
| 233 |
+
try:
|
| 234 |
+
executor = concurrent.futures.ThreadPoolExecutor(
|
| 235 |
+
max_workers=1, thread_name_prefix="deepcode_ctx_async"
|
| 236 |
+
)
|
| 237 |
+
future = executor.submit(run_in_new_loop)
|
| 238 |
+
result = future.result(timeout=300) # 5 minute timeout
|
| 239 |
+
return result
|
| 240 |
+
except concurrent.futures.TimeoutError:
|
| 241 |
+
st.error("Processing timeout after 5 minutes. Please try again.")
|
| 242 |
+
raise TimeoutError("Processing timeout")
|
| 243 |
+
except Exception as e:
|
| 244 |
+
# If thread pool execution fails, try direct execution
|
| 245 |
+
st.warning(f"Threaded async execution failed: {e}, trying direct execution...")
|
| 246 |
+
try:
|
| 247 |
+
# Fallback method: run directly in current thread
|
| 248 |
+
loop = None
|
| 249 |
+
try:
|
| 250 |
+
loop = asyncio.new_event_loop()
|
| 251 |
+
asyncio.set_event_loop(loop)
|
| 252 |
+
result = loop.run_until_complete(coro)
|
| 253 |
+
return result
|
| 254 |
+
finally:
|
| 255 |
+
if loop:
|
| 256 |
+
try:
|
| 257 |
+
loop.close()
|
| 258 |
+
except Exception:
|
| 259 |
+
pass
|
| 260 |
+
asyncio.set_event_loop(None)
|
| 261 |
+
import gc
|
| 262 |
+
|
| 263 |
+
gc.collect()
|
| 264 |
+
except Exception as backup_error:
|
| 265 |
+
st.error(f"All execution methods failed: {backup_error}")
|
| 266 |
+
raise backup_error
|
| 267 |
+
finally:
|
| 268 |
+
# Ensure thread pool is properly closed
|
| 269 |
+
if executor:
|
| 270 |
+
try:
|
| 271 |
+
executor.shutdown(wait=True, cancel_futures=True)
|
| 272 |
+
except Exception:
|
| 273 |
+
pass
|
| 274 |
+
# Force garbage collection
|
| 275 |
+
import gc
|
| 276 |
+
|
| 277 |
+
gc.collect()
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def run_async_task_simple(coro):
|
| 281 |
+
"""
|
| 282 |
+
Simple async task runner, avoiding threading issues
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
coro: Coroutine object
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
Task result
|
| 289 |
+
"""
|
| 290 |
+
# Apply nest_asyncio to support nested event loops
|
| 291 |
+
nest_asyncio.apply()
|
| 292 |
+
|
| 293 |
+
try:
|
| 294 |
+
# Try to run in current event loop
|
| 295 |
+
loop = asyncio.get_event_loop()
|
| 296 |
+
if loop.is_running():
|
| 297 |
+
# If current loop is running, use improved thread pool method
|
| 298 |
+
import concurrent.futures
|
| 299 |
+
import gc
|
| 300 |
+
|
| 301 |
+
def run_in_thread():
|
| 302 |
+
# Create new event loop and set as current thread's loop
|
| 303 |
+
new_loop = asyncio.new_event_loop()
|
| 304 |
+
asyncio.set_event_loop(new_loop)
|
| 305 |
+
try:
|
| 306 |
+
result = new_loop.run_until_complete(coro)
|
| 307 |
+
return result
|
| 308 |
+
except Exception as e:
|
| 309 |
+
# Ensure exception information is properly passed
|
| 310 |
+
raise e
|
| 311 |
+
finally:
|
| 312 |
+
# Ensure loop is properly closed
|
| 313 |
+
try:
|
| 314 |
+
new_loop.close()
|
| 315 |
+
except Exception:
|
| 316 |
+
pass
|
| 317 |
+
# Clear current thread's event loop reference
|
| 318 |
+
asyncio.set_event_loop(None)
|
| 319 |
+
# Force garbage collection
|
| 320 |
+
gc.collect()
|
| 321 |
+
|
| 322 |
+
# Use context manager to ensure thread pool is properly closed
|
| 323 |
+
executor = None
|
| 324 |
+
try:
|
| 325 |
+
executor = concurrent.futures.ThreadPoolExecutor(
|
| 326 |
+
max_workers=1, thread_name_prefix="deepcode_async"
|
| 327 |
+
)
|
| 328 |
+
future = executor.submit(run_in_thread)
|
| 329 |
+
result = future.result(timeout=300) # 5 minute timeout
|
| 330 |
+
return result
|
| 331 |
+
except concurrent.futures.TimeoutError:
|
| 332 |
+
st.error(
|
| 333 |
+
"Processing timeout after 5 minutes. Please try again with a smaller file."
|
| 334 |
+
)
|
| 335 |
+
raise TimeoutError("Processing timeout")
|
| 336 |
+
except Exception as e:
|
| 337 |
+
st.error(f"Async processing error: {e}")
|
| 338 |
+
raise e
|
| 339 |
+
finally:
|
| 340 |
+
# Ensure thread pool is properly closed
|
| 341 |
+
if executor:
|
| 342 |
+
try:
|
| 343 |
+
executor.shutdown(wait=True, cancel_futures=True)
|
| 344 |
+
except Exception:
|
| 345 |
+
pass
|
| 346 |
+
# Force garbage collection
|
| 347 |
+
gc.collect()
|
| 348 |
+
else:
|
| 349 |
+
# Run directly in current loop
|
| 350 |
+
return loop.run_until_complete(coro)
|
| 351 |
+
except Exception:
|
| 352 |
+
# Final fallback method: create new event loop
|
| 353 |
+
loop = None
|
| 354 |
+
try:
|
| 355 |
+
loop = asyncio.new_event_loop()
|
| 356 |
+
asyncio.set_event_loop(loop)
|
| 357 |
+
result = loop.run_until_complete(coro)
|
| 358 |
+
return result
|
| 359 |
+
except Exception as backup_error:
|
| 360 |
+
st.error(f"All async methods failed: {backup_error}")
|
| 361 |
+
raise backup_error
|
| 362 |
+
finally:
|
| 363 |
+
if loop:
|
| 364 |
+
try:
|
| 365 |
+
loop.close()
|
| 366 |
+
except Exception:
|
| 367 |
+
pass
|
| 368 |
+
asyncio.set_event_loop(None)
|
| 369 |
+
# Force garbage collection
|
| 370 |
+
import gc
|
| 371 |
+
|
| 372 |
+
gc.collect()
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def handle_processing_workflow(
|
| 376 |
+
input_source: str, input_type: str, enable_indexing: bool = True
|
| 377 |
+
) -> Dict[str, Any]:
|
| 378 |
+
"""
|
| 379 |
+
Main processing function for workflow
|
| 380 |
+
|
| 381 |
+
Args:
|
| 382 |
+
input_source: Input source
|
| 383 |
+
input_type: Input type
|
| 384 |
+
enable_indexing: Whether to enable indexing functionality
|
| 385 |
+
|
| 386 |
+
Returns:
|
| 387 |
+
Processing result
|
| 388 |
+
"""
|
| 389 |
+
from .components import (
|
| 390 |
+
enhanced_progress_display_component,
|
| 391 |
+
update_step_indicator,
|
| 392 |
+
display_status,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# Display enhanced progress components
|
| 396 |
+
chat_mode = input_type == "chat"
|
| 397 |
+
progress_bar, status_text, step_indicators, workflow_steps = (
|
| 398 |
+
enhanced_progress_display_component(enable_indexing, chat_mode)
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
# Step mapping: map progress percentages to step indices - adjust based on mode and indexing toggle
|
| 402 |
+
if chat_mode:
|
| 403 |
+
# Chat mode step mapping: Initialize -> Planning -> Setup -> Save Plan -> Implement
|
| 404 |
+
step_mapping = {
|
| 405 |
+
5: 0, # Initialize
|
| 406 |
+
30: 1, # Planning (analyzing requirements)
|
| 407 |
+
50: 2, # Setup (creating workspace)
|
| 408 |
+
70: 3, # Save Plan (saving implementation plan)
|
| 409 |
+
85: 4, # Implement (generating code)
|
| 410 |
+
100: 4, # Complete
|
| 411 |
+
}
|
| 412 |
+
elif not enable_indexing:
|
| 413 |
+
# Skip indexing-related steps progress mapping - fast mode order: Initialize -> Analyze -> Download -> Plan -> Implement
|
| 414 |
+
step_mapping = {
|
| 415 |
+
5: 0, # Initialize
|
| 416 |
+
10: 1, # Analyze
|
| 417 |
+
25: 2, # Download
|
| 418 |
+
40: 3, # Plan (now prioritized over References, 40%)
|
| 419 |
+
85: 4, # Implement (skip References, Repos and Index)
|
| 420 |
+
100: 4, # Complete
|
| 421 |
+
}
|
| 422 |
+
else:
|
| 423 |
+
# Full workflow step mapping - new order: Initialize -> Analyze -> Download -> Plan -> References -> Repos -> Index -> Implement
|
| 424 |
+
step_mapping = {
|
| 425 |
+
5: 0, # Initialize
|
| 426 |
+
10: 1, # Analyze
|
| 427 |
+
25: 2, # Download
|
| 428 |
+
40: 3, # Plan (now 4th position, 40%)
|
| 429 |
+
50: 4, # References (now 5th position, conditional, 50%)
|
| 430 |
+
60: 5, # Repos (GitHub download)
|
| 431 |
+
70: 6, # Index (code indexing)
|
| 432 |
+
85: 7, # Implement (code implementation)
|
| 433 |
+
100: 7, # Complete
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
current_step = 0
|
| 437 |
+
|
| 438 |
+
# Define enhanced progress callback function
|
| 439 |
+
def update_progress(progress: int, message: str):
|
| 440 |
+
nonlocal current_step
|
| 441 |
+
|
| 442 |
+
# Update progress bar
|
| 443 |
+
progress_bar.progress(progress)
|
| 444 |
+
status_text.markdown(f"**{message}**")
|
| 445 |
+
|
| 446 |
+
# Determine current step
|
| 447 |
+
new_step = step_mapping.get(progress, current_step)
|
| 448 |
+
if new_step != current_step:
|
| 449 |
+
current_step = new_step
|
| 450 |
+
update_step_indicator(
|
| 451 |
+
step_indicators, workflow_steps, current_step, "active"
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
time.sleep(0.3) # Brief pause for users to see progress changes
|
| 455 |
+
|
| 456 |
+
# Step 1: Initialization
|
| 457 |
+
if chat_mode:
|
| 458 |
+
update_progress(5, "🚀 Initializing chat-based planning engine...")
|
| 459 |
+
elif enable_indexing:
|
| 460 |
+
update_progress(5, "🚀 Initializing AI research engine and loading models...")
|
| 461 |
+
else:
|
| 462 |
+
update_progress(
|
| 463 |
+
5, "🚀 Initializing AI research engine (Fast mode - indexing disabled)..."
|
| 464 |
+
)
|
| 465 |
+
update_step_indicator(step_indicators, workflow_steps, 0, "active")
|
| 466 |
+
|
| 467 |
+
# Start async processing with progress callback
|
| 468 |
+
with st.spinner("🔄 Processing workflow stages..."):
|
| 469 |
+
try:
|
| 470 |
+
# First try using simple async processing method
|
| 471 |
+
result = run_async_task_simple(
|
| 472 |
+
process_input_async(
|
| 473 |
+
input_source, input_type, enable_indexing, update_progress
|
| 474 |
+
)
|
| 475 |
+
)
|
| 476 |
+
except Exception as e:
|
| 477 |
+
st.warning(f"Primary async method failed: {e}")
|
| 478 |
+
# Fallback method: use original thread pool method
|
| 479 |
+
try:
|
| 480 |
+
result = run_async_task(
|
| 481 |
+
process_input_async(
|
| 482 |
+
input_source, input_type, enable_indexing, update_progress
|
| 483 |
+
)
|
| 484 |
+
)
|
| 485 |
+
except Exception as backup_error:
|
| 486 |
+
st.error(f"Both async methods failed. Error: {backup_error}")
|
| 487 |
+
return {
|
| 488 |
+
"status": "error",
|
| 489 |
+
"error": str(backup_error),
|
| 490 |
+
"traceback": traceback.format_exc(),
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
# Update final status based on results
|
| 494 |
+
if result["status"] == "success":
|
| 495 |
+
# Complete all steps
|
| 496 |
+
update_progress(100, "✅ All processing stages completed successfully!")
|
| 497 |
+
update_step_indicator(
|
| 498 |
+
step_indicators, workflow_steps, len(workflow_steps), "completed"
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
# Display success information
|
| 502 |
+
st.balloons() # Add celebration animation
|
| 503 |
+
if chat_mode:
|
| 504 |
+
display_status(
|
| 505 |
+
"🎉 Chat workflow completed! Your requirements have been analyzed and code has been generated.",
|
| 506 |
+
"success",
|
| 507 |
+
)
|
| 508 |
+
elif enable_indexing:
|
| 509 |
+
display_status(
|
| 510 |
+
"🎉 Workflow completed! Your research paper has been successfully processed and code has been generated.",
|
| 511 |
+
"success",
|
| 512 |
+
)
|
| 513 |
+
else:
|
| 514 |
+
display_status(
|
| 515 |
+
"🎉 Fast workflow completed! Your research paper has been processed (indexing skipped for faster processing).",
|
| 516 |
+
"success",
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
else:
|
| 520 |
+
# Processing failed
|
| 521 |
+
update_progress(0, "❌ Processing failed - see error details below")
|
| 522 |
+
update_step_indicator(step_indicators, workflow_steps, current_step, "error")
|
| 523 |
+
display_status(
|
| 524 |
+
f"❌ Processing encountered an error: {result.get('error', 'Unknown error')}",
|
| 525 |
+
"error",
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
# Wait a moment for users to see completion status
|
| 529 |
+
time.sleep(2.5)
|
| 530 |
+
|
| 531 |
+
return result
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
def update_session_state_with_result(result: Dict[str, Any], input_type: str):
|
| 535 |
+
"""
|
| 536 |
+
Update session state with result
|
| 537 |
+
|
| 538 |
+
Args:
|
| 539 |
+
result: Processing result
|
| 540 |
+
input_type: Input type
|
| 541 |
+
"""
|
| 542 |
+
if result["status"] == "success":
|
| 543 |
+
# Save result to session state
|
| 544 |
+
st.session_state.last_result = result
|
| 545 |
+
st.session_state.show_results = True
|
| 546 |
+
|
| 547 |
+
# Save to history
|
| 548 |
+
st.session_state.results.append(
|
| 549 |
+
{
|
| 550 |
+
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
| 551 |
+
"input_type": input_type,
|
| 552 |
+
"status": "success",
|
| 553 |
+
"result": result,
|
| 554 |
+
}
|
| 555 |
+
)
|
| 556 |
+
else:
|
| 557 |
+
# Save error information to session state for display
|
| 558 |
+
st.session_state.last_error = result.get("error", "Unknown error")
|
| 559 |
+
|
| 560 |
+
# Save error to history
|
| 561 |
+
st.session_state.results.append(
|
| 562 |
+
{
|
| 563 |
+
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
| 564 |
+
"input_type": input_type,
|
| 565 |
+
"status": "error",
|
| 566 |
+
"error": result.get("error", "Unknown error"),
|
| 567 |
+
}
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
# Limit history to maximum 50 records
|
| 571 |
+
if len(st.session_state.results) > 50:
|
| 572 |
+
st.session_state.results = st.session_state.results[-50:]
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
def cleanup_temp_file(input_source: str, input_type: str):
|
| 576 |
+
"""
|
| 577 |
+
Cleanup temporary file
|
| 578 |
+
|
| 579 |
+
Args:
|
| 580 |
+
input_source: Input source
|
| 581 |
+
input_type: Input type
|
| 582 |
+
"""
|
| 583 |
+
if input_type == "file" and input_source and os.path.exists(input_source):
|
| 584 |
+
try:
|
| 585 |
+
os.unlink(input_source)
|
| 586 |
+
except Exception:
|
| 587 |
+
pass
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def handle_start_processing_button(input_source: str, input_type: str):
|
| 591 |
+
"""
|
| 592 |
+
Handle start processing button click
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
input_source: Input source
|
| 596 |
+
input_type: Input type
|
| 597 |
+
"""
|
| 598 |
+
from .components import display_status
|
| 599 |
+
|
| 600 |
+
st.session_state.processing = True
|
| 601 |
+
|
| 602 |
+
# Get indexing toggle status
|
| 603 |
+
enable_indexing = st.session_state.get("enable_indexing", True)
|
| 604 |
+
|
| 605 |
+
try:
|
| 606 |
+
# Process workflow
|
| 607 |
+
result = handle_processing_workflow(input_source, input_type, enable_indexing)
|
| 608 |
+
|
| 609 |
+
# Display result status
|
| 610 |
+
if result["status"] == "success":
|
| 611 |
+
display_status("All operations completed successfully! 🎉", "success")
|
| 612 |
+
else:
|
| 613 |
+
display_status("Error during processing", "error")
|
| 614 |
+
|
| 615 |
+
# Update session state
|
| 616 |
+
update_session_state_with_result(result, input_type)
|
| 617 |
+
|
| 618 |
+
except Exception as e:
|
| 619 |
+
# Handle exceptional cases
|
| 620 |
+
st.error(f"Unexpected error during processing: {e}")
|
| 621 |
+
result = {"status": "error", "error": str(e)}
|
| 622 |
+
update_session_state_with_result(result, input_type)
|
| 623 |
+
|
| 624 |
+
finally:
|
| 625 |
+
# Reset state and clean up resources after processing
|
| 626 |
+
st.session_state.processing = False
|
| 627 |
+
|
| 628 |
+
# Clean up temporary files
|
| 629 |
+
cleanup_temp_file(input_source, input_type)
|
| 630 |
+
|
| 631 |
+
# Clean up system resources
|
| 632 |
+
cleanup_resources()
|
| 633 |
+
|
| 634 |
+
# Rerun to display results or errors
|
| 635 |
+
st.rerun()
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def handle_error_display():
|
| 639 |
+
"""Handle error display"""
|
| 640 |
+
if hasattr(st.session_state, "last_error") and st.session_state.last_error:
|
| 641 |
+
st.error(f"❌ Error: {st.session_state.last_error}")
|
| 642 |
+
if st.button("🔄 Try Again", type="secondary", use_container_width=True):
|
| 643 |
+
st.session_state.last_error = None
|
| 644 |
+
st.session_state.task_counter += 1
|
| 645 |
+
st.rerun()
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
def initialize_session_state():
|
| 649 |
+
"""Initialize session state"""
|
| 650 |
+
if "processing" not in st.session_state:
|
| 651 |
+
st.session_state.processing = False
|
| 652 |
+
if "results" not in st.session_state:
|
| 653 |
+
st.session_state.results = []
|
| 654 |
+
if "current_step" not in st.session_state:
|
| 655 |
+
st.session_state.current_step = 0
|
| 656 |
+
if "task_counter" not in st.session_state:
|
| 657 |
+
st.session_state.task_counter = 0
|
| 658 |
+
if "show_results" not in st.session_state:
|
| 659 |
+
st.session_state.show_results = False
|
| 660 |
+
if "last_result" not in st.session_state:
|
| 661 |
+
st.session_state.last_result = None
|
| 662 |
+
if "last_error" not in st.session_state:
|
| 663 |
+
st.session_state.last_error = None
|
| 664 |
+
if "enable_indexing" not in st.session_state:
|
| 665 |
+
st.session_state.enable_indexing = (
|
| 666 |
+
False # Default enable indexing functionality
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def cleanup_resources():
|
| 671 |
+
"""
|
| 672 |
+
Clean up system resources to prevent memory leaks
|
| 673 |
+
"""
|
| 674 |
+
try:
|
| 675 |
+
import gc
|
| 676 |
+
import threading
|
| 677 |
+
import multiprocessing
|
| 678 |
+
import asyncio
|
| 679 |
+
import sys
|
| 680 |
+
|
| 681 |
+
# 1. Clean up asyncio-related resources
|
| 682 |
+
try:
|
| 683 |
+
# Get current event loop (if exists)
|
| 684 |
+
try:
|
| 685 |
+
loop = asyncio.get_running_loop()
|
| 686 |
+
# Cancel all pending tasks
|
| 687 |
+
if loop and not loop.is_closed():
|
| 688 |
+
pending_tasks = [
|
| 689 |
+
task for task in asyncio.all_tasks(loop) if not task.done()
|
| 690 |
+
]
|
| 691 |
+
if pending_tasks:
|
| 692 |
+
for task in pending_tasks:
|
| 693 |
+
if not task.cancelled():
|
| 694 |
+
task.cancel()
|
| 695 |
+
# Wait for task cancellation to complete
|
| 696 |
+
try:
|
| 697 |
+
if pending_tasks:
|
| 698 |
+
# Use timeout to avoid blocking too long
|
| 699 |
+
import time
|
| 700 |
+
|
| 701 |
+
time.sleep(0.1)
|
| 702 |
+
except Exception:
|
| 703 |
+
pass
|
| 704 |
+
except RuntimeError:
|
| 705 |
+
# No running event loop, continue with other cleanup
|
| 706 |
+
pass
|
| 707 |
+
except Exception:
|
| 708 |
+
pass
|
| 709 |
+
|
| 710 |
+
# 2. Force garbage collection
|
| 711 |
+
gc.collect()
|
| 712 |
+
|
| 713 |
+
# 3. Clean up active threads (except main thread)
|
| 714 |
+
active_threads = threading.active_count()
|
| 715 |
+
if active_threads > 1:
|
| 716 |
+
# Wait some time for threads to naturally finish
|
| 717 |
+
import time
|
| 718 |
+
|
| 719 |
+
time.sleep(0.5)
|
| 720 |
+
|
| 721 |
+
# 4. Clean up multiprocessing resources
|
| 722 |
+
try:
|
| 723 |
+
# Clean up possible multiprocessing resources
|
| 724 |
+
if hasattr(multiprocessing, "active_children"):
|
| 725 |
+
for child in multiprocessing.active_children():
|
| 726 |
+
if child.is_alive():
|
| 727 |
+
child.terminate()
|
| 728 |
+
child.join(timeout=1.0)
|
| 729 |
+
# If join times out, force kill
|
| 730 |
+
if child.is_alive():
|
| 731 |
+
try:
|
| 732 |
+
child.kill()
|
| 733 |
+
child.join(timeout=0.5)
|
| 734 |
+
except Exception:
|
| 735 |
+
pass
|
| 736 |
+
|
| 737 |
+
# Clean up multiprocessing-related resource tracker
|
| 738 |
+
try:
|
| 739 |
+
import multiprocessing.resource_tracker
|
| 740 |
+
|
| 741 |
+
if hasattr(multiprocessing.resource_tracker, "_resource_tracker"):
|
| 742 |
+
tracker = multiprocessing.resource_tracker._resource_tracker
|
| 743 |
+
if tracker and hasattr(tracker, "_stop"):
|
| 744 |
+
tracker._stop()
|
| 745 |
+
except Exception:
|
| 746 |
+
pass
|
| 747 |
+
|
| 748 |
+
except Exception:
|
| 749 |
+
pass
|
| 750 |
+
|
| 751 |
+
# 5. Force clean up Python internal caches
|
| 752 |
+
try:
|
| 753 |
+
# Clean up some temporary objects in module cache
|
| 754 |
+
import sys
|
| 755 |
+
|
| 756 |
+
# Don't delete key modules, only clean up possible temporary resources
|
| 757 |
+
if hasattr(sys, "_clear_type_cache"):
|
| 758 |
+
sys._clear_type_cache()
|
| 759 |
+
except Exception:
|
| 760 |
+
pass
|
| 761 |
+
|
| 762 |
+
# 6. Final garbage collection
|
| 763 |
+
gc.collect()
|
| 764 |
+
|
| 765 |
+
except Exception as e:
|
| 766 |
+
# Silently handle cleanup errors to avoid affecting main flow
|
| 767 |
+
# But can log errors in debug mode
|
| 768 |
+
try:
|
| 769 |
+
import logging
|
| 770 |
+
|
| 771 |
+
logging.getLogger(__name__).debug(f"Resource cleanup warning: {e}")
|
| 772 |
+
except Exception:
|
| 773 |
+
pass
|
projects/ui/DeepCode/ui/layout.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Streamlit Page Layout Module
|
| 3 |
+
|
| 4 |
+
Contains main page layout and flow control
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import streamlit as st
|
| 8 |
+
|
| 9 |
+
from .components import (
|
| 10 |
+
display_header,
|
| 11 |
+
display_features,
|
| 12 |
+
sidebar_control_panel,
|
| 13 |
+
input_method_selector,
|
| 14 |
+
results_display_component,
|
| 15 |
+
footer_component,
|
| 16 |
+
)
|
| 17 |
+
from .handlers import (
|
| 18 |
+
initialize_session_state,
|
| 19 |
+
handle_start_processing_button,
|
| 20 |
+
handle_error_display,
|
| 21 |
+
)
|
| 22 |
+
from .styles import get_main_styles
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def setup_page_config():
|
| 26 |
+
"""Setup page configuration"""
|
| 27 |
+
st.set_page_config(
|
| 28 |
+
page_title="DeepCode - AI Research Engine",
|
| 29 |
+
page_icon="🧬",
|
| 30 |
+
layout="wide",
|
| 31 |
+
initial_sidebar_state="expanded",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def apply_custom_styles():
|
| 36 |
+
"""Apply custom styles"""
|
| 37 |
+
st.markdown(get_main_styles(), unsafe_allow_html=True)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def render_main_content():
|
| 41 |
+
"""Render main content area"""
|
| 42 |
+
# Display header and features
|
| 43 |
+
display_header()
|
| 44 |
+
display_features()
|
| 45 |
+
st.markdown("---")
|
| 46 |
+
|
| 47 |
+
# Display results if available
|
| 48 |
+
if st.session_state.show_results and st.session_state.last_result:
|
| 49 |
+
results_display_component(
|
| 50 |
+
st.session_state.last_result, st.session_state.task_counter
|
| 51 |
+
)
|
| 52 |
+
st.markdown("---")
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
+
# Show input interface only when not displaying results
|
| 56 |
+
if not st.session_state.show_results:
|
| 57 |
+
render_input_interface()
|
| 58 |
+
|
| 59 |
+
# Display error messages if any
|
| 60 |
+
handle_error_display()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def render_input_interface():
|
| 64 |
+
"""Render input interface"""
|
| 65 |
+
# Get input source and type
|
| 66 |
+
input_source, input_type = input_method_selector(st.session_state.task_counter)
|
| 67 |
+
|
| 68 |
+
# Processing button
|
| 69 |
+
if input_source and not st.session_state.processing:
|
| 70 |
+
if st.button("🚀 Start Processing", type="primary", use_container_width=True):
|
| 71 |
+
handle_start_processing_button(input_source, input_type)
|
| 72 |
+
|
| 73 |
+
elif st.session_state.processing:
|
| 74 |
+
st.info("🔄 Processing in progress... Please wait.")
|
| 75 |
+
st.warning("⚠️ Do not refresh the page or close the browser during processing.")
|
| 76 |
+
|
| 77 |
+
elif not input_source:
|
| 78 |
+
st.info("👆 Please upload a file or enter a URL to start processing.")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def render_sidebar():
|
| 82 |
+
"""Render sidebar"""
|
| 83 |
+
return sidebar_control_panel()
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def main_layout():
|
| 87 |
+
"""Main layout function"""
|
| 88 |
+
# Initialize session state
|
| 89 |
+
initialize_session_state()
|
| 90 |
+
|
| 91 |
+
# Setup page configuration
|
| 92 |
+
setup_page_config()
|
| 93 |
+
|
| 94 |
+
# Apply custom styles
|
| 95 |
+
apply_custom_styles()
|
| 96 |
+
|
| 97 |
+
# Render sidebar
|
| 98 |
+
sidebar_info = render_sidebar()
|
| 99 |
+
|
| 100 |
+
# Render main content
|
| 101 |
+
render_main_content()
|
| 102 |
+
|
| 103 |
+
# Display footer
|
| 104 |
+
footer_component()
|
| 105 |
+
|
| 106 |
+
return sidebar_info
|
projects/ui/DeepCode/ui/streamlit_app.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DeepCode - AI Research Engine
|
| 3 |
+
|
| 4 |
+
Streamlit Web Interface Main Application File
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
# Disable .pyc file generation
|
| 11 |
+
os.environ["PYTHONDONTWRITEBYTECODE"] = "1"
|
| 12 |
+
|
| 13 |
+
# Add parent directory to path for module imports
|
| 14 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 15 |
+
parent_dir = os.path.dirname(current_dir)
|
| 16 |
+
if parent_dir not in sys.path:
|
| 17 |
+
sys.path.insert(0, parent_dir)
|
| 18 |
+
|
| 19 |
+
# Import UI modules
|
| 20 |
+
from ui.layout import main_layout
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def main():
|
| 24 |
+
"""
|
| 25 |
+
Main function - Streamlit application entry
|
| 26 |
+
|
| 27 |
+
All UI logic has been modularized into ui/ folder
|
| 28 |
+
"""
|
| 29 |
+
# Run main layout
|
| 30 |
+
sidebar_info = main_layout()
|
| 31 |
+
|
| 32 |
+
# Additional global logic can be added here if needed
|
| 33 |
+
|
| 34 |
+
return sidebar_info
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
main()
|
projects/ui/DeepCode/ui/styles.py
ADDED
|
@@ -0,0 +1,2590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Streamlit UI Styles Module
|
| 3 |
+
|
| 4 |
+
Contains all CSS style definitions for the application
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def get_main_styles() -> str:
|
| 9 |
+
"""
|
| 10 |
+
Get main CSS styles
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
CSS styles string
|
| 14 |
+
"""
|
| 15 |
+
return """
|
| 16 |
+
<style>
|
| 17 |
+
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@300;400;700&family=Inter:wght@300;400;600;700&display=swap');
|
| 18 |
+
|
| 19 |
+
:root {
|
| 20 |
+
--primary-bg: #0a0e27;
|
| 21 |
+
--secondary-bg: #1a1f3a;
|
| 22 |
+
--accent-bg: #2d3748;
|
| 23 |
+
--card-bg: rgba(45, 55, 72, 0.9);
|
| 24 |
+
--glass-bg: rgba(255, 255, 255, 0.08);
|
| 25 |
+
--glass-border: rgba(255, 255, 255, 0.12);
|
| 26 |
+
--primary-gradient: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 27 |
+
--accent-gradient: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
|
| 28 |
+
--neon-blue: #64b5f6;
|
| 29 |
+
--neon-cyan: #4dd0e1;
|
| 30 |
+
--neon-green: #81c784;
|
| 31 |
+
--neon-purple: #ba68c8;
|
| 32 |
+
--text-primary: #ffffff;
|
| 33 |
+
--text-secondary: #e3f2fd;
|
| 34 |
+
--text-muted: #90caf9;
|
| 35 |
+
--border-color: rgba(100, 181, 246, 0.2);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
/* Light theme variables - Using dark theme colors */
|
| 39 |
+
:root {
|
| 40 |
+
--light-primary-bg: #0a0e27;
|
| 41 |
+
--light-secondary-bg: #1a1f3a;
|
| 42 |
+
--light-accent-bg: #2d3748;
|
| 43 |
+
--light-card-bg: rgba(45, 55, 72, 0.9);
|
| 44 |
+
--light-border-soft: rgba(100, 181, 246, 0.2);
|
| 45 |
+
--light-border-medium: rgba(100, 181, 246, 0.4);
|
| 46 |
+
--light-text-primary: #ffffff;
|
| 47 |
+
--light-text-secondary: #e3f2fd;
|
| 48 |
+
--light-text-muted: #90caf9;
|
| 49 |
+
--light-accent-blue: #64b5f6;
|
| 50 |
+
--light-accent-cyan: #4dd0e1;
|
| 51 |
+
--light-accent-green: #81c784;
|
| 52 |
+
--light-accent-purple: #ba68c8;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
/* Global app background and text */
|
| 56 |
+
.stApp {
|
| 57 |
+
background: linear-gradient(135deg, var(--primary-bg) 0%, var(--secondary-bg) 100%);
|
| 58 |
+
color: var(--text-primary);
|
| 59 |
+
font-family: 'Inter', sans-serif;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
/* Force high contrast for all text elements */
|
| 63 |
+
.stApp * {
|
| 64 |
+
color: var(--text-primary) !important;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
/* Sidebar redesign - dark tech theme */
|
| 68 |
+
.css-1d391kg {
|
| 69 |
+
background: linear-gradient(180deg, #0d1117 0%, #161b22 50%, #21262d 100%) !important;
|
| 70 |
+
border-right: 2px solid var(--neon-cyan) !important;
|
| 71 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.3) !important;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
/* Light mode sidebar - soft and gentle */
|
| 75 |
+
@media (prefers-color-scheme: light) {
|
| 76 |
+
.css-1d391kg {
|
| 77 |
+
background: linear-gradient(180deg, var(--light-primary-bg) 0%, var(--light-secondary-bg) 50%, var(--light-accent-bg) 100%) !important;
|
| 78 |
+
border-right: 1px solid var(--light-border-soft) !important;
|
| 79 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05) !important;
|
| 80 |
+
}
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
/* Alternative light theme detection for Streamlit light theme */
|
| 84 |
+
[data-theme="light"] .css-1d391kg,
|
| 85 |
+
.css-1d391kg[data-theme="light"] {
|
| 86 |
+
background: linear-gradient(180deg, var(--light-primary-bg) 0%, var(--light-secondary-bg) 50%, var(--light-accent-bg) 100%) !important;
|
| 87 |
+
border-right: 1px solid var(--light-border-soft) !important;
|
| 88 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05) !important;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
.css-1d391kg * {
|
| 92 |
+
color: var(--text-primary) !important;
|
| 93 |
+
font-weight: 500 !important;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
.css-1d391kg h3 {
|
| 97 |
+
color: var(--neon-cyan) !important;
|
| 98 |
+
font-weight: 700 !important;
|
| 99 |
+
font-size: 1.2rem !important;
|
| 100 |
+
text-shadow: 0 0 15px rgba(77, 208, 225, 0.6) !important;
|
| 101 |
+
border-bottom: 1px solid rgba(77, 208, 225, 0.3) !important;
|
| 102 |
+
padding-bottom: 0.5rem !important;
|
| 103 |
+
margin-bottom: 1rem !important;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
/* Light mode text styling */
|
| 107 |
+
@media (prefers-color-scheme: light) {
|
| 108 |
+
.css-1d391kg * {
|
| 109 |
+
color: var(--light-text-primary) !important;
|
| 110 |
+
font-weight: 500 !important;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
.css-1d391kg h3 {
|
| 114 |
+
color: var(--light-accent-blue) !important;
|
| 115 |
+
font-weight: 600 !important;
|
| 116 |
+
font-size: 1.2rem !important;
|
| 117 |
+
text-shadow: none !important;
|
| 118 |
+
border-bottom: 1px solid var(--light-border-soft) !important;
|
| 119 |
+
padding-bottom: 0.5rem !important;
|
| 120 |
+
margin-bottom: 1rem !important;
|
| 121 |
+
}
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
/* Alternative light theme detection */
|
| 125 |
+
[data-theme="light"] .css-1d391kg *,
|
| 126 |
+
.css-1d391kg[data-theme="light"] * {
|
| 127 |
+
color: var(--light-text-primary) !important;
|
| 128 |
+
font-weight: 500 !important;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
[data-theme="light"] .css-1d391kg h3,
|
| 132 |
+
.css-1d391kg[data-theme="light"] h3 {
|
| 133 |
+
color: var(--light-accent-blue) !important;
|
| 134 |
+
font-weight: 600 !important;
|
| 135 |
+
font-size: 1.2rem !important;
|
| 136 |
+
text-shadow: none !important;
|
| 137 |
+
border-bottom: 1px solid var(--light-border-soft) !important;
|
| 138 |
+
padding-bottom: 0.5rem !important;
|
| 139 |
+
margin-bottom: 1rem !important;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
.css-1d391kg p, .css-1d391kg div {
|
| 143 |
+
color: var(--text-primary) !important;
|
| 144 |
+
font-weight: 600 !important;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
/* Sidebar info boxes - dark tech style */
|
| 148 |
+
.css-1d391kg .stAlert,
|
| 149 |
+
.css-1d391kg .stInfo,
|
| 150 |
+
.css-1d391kg .stSuccess,
|
| 151 |
+
.css-1d391kg .stWarning,
|
| 152 |
+
.css-1d391kg .stError {
|
| 153 |
+
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%) !important;
|
| 154 |
+
border: 2px solid var(--neon-cyan) !important;
|
| 155 |
+
color: var(--text-primary) !important;
|
| 156 |
+
font-weight: 700 !important;
|
| 157 |
+
border-radius: 12px !important;
|
| 158 |
+
box-shadow: 0 0 15px rgba(77, 208, 225, 0.3) !important;
|
| 159 |
+
backdrop-filter: blur(10px) !important;
|
| 160 |
+
margin: 0.5rem 0 !important;
|
| 161 |
+
padding: 1rem !important;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
/* Light mode info boxes - soft and gentle */
|
| 165 |
+
@media (prefers-color-scheme: light) {
|
| 166 |
+
.css-1d391kg .stAlert,
|
| 167 |
+
.css-1d391kg .stInfo,
|
| 168 |
+
.css-1d391kg .stSuccess,
|
| 169 |
+
.css-1d391kg .stWarning,
|
| 170 |
+
.css-1d391kg .stError {
|
| 171 |
+
background: var(--light-card-bg) !important;
|
| 172 |
+
border: 1px solid var(--light-border-soft) !important;
|
| 173 |
+
color: var(--light-text-primary) !important;
|
| 174 |
+
font-weight: 500 !important;
|
| 175 |
+
border-radius: 8px !important;
|
| 176 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
|
| 177 |
+
backdrop-filter: none !important;
|
| 178 |
+
margin: 0.5rem 0 !important;
|
| 179 |
+
padding: 0.8rem !important;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
.css-1d391kg .stInfo {
|
| 183 |
+
border-left: 3px solid var(--light-accent-blue) !important;
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
.css-1d391kg .stSuccess {
|
| 187 |
+
border-left: 3px solid var(--light-accent-green) !important;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
.css-1d391kg .stWarning {
|
| 191 |
+
border-left: 3px solid #f59e0b !important;
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
.css-1d391kg .stError {
|
| 195 |
+
border-left: 3px solid #ef4444 !important;
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
/* Alternative light theme detection for info boxes */
|
| 200 |
+
[data-theme="light"] .css-1d391kg .stAlert,
|
| 201 |
+
[data-theme="light"] .css-1d391kg .stInfo,
|
| 202 |
+
[data-theme="light"] .css-1d391kg .stSuccess,
|
| 203 |
+
[data-theme="light"] .css-1d391kg .stWarning,
|
| 204 |
+
[data-theme="light"] .css-1d391kg .stError,
|
| 205 |
+
.css-1d391kg[data-theme="light"] .stAlert,
|
| 206 |
+
.css-1d391kg[data-theme="light"] .stInfo,
|
| 207 |
+
.css-1d391kg[data-theme="light"] .stSuccess,
|
| 208 |
+
.css-1d391kg[data-theme="light"] .stWarning,
|
| 209 |
+
.css-1d391kg[data-theme="light"] .stError {
|
| 210 |
+
background: var(--light-card-bg) !important;
|
| 211 |
+
border: 1px solid var(--light-border-soft) !important;
|
| 212 |
+
color: var(--light-text-primary) !important;
|
| 213 |
+
font-weight: 500 !important;
|
| 214 |
+
border-radius: 8px !important;
|
| 215 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
|
| 216 |
+
backdrop-filter: none !important;
|
| 217 |
+
margin: 0.5rem 0 !important;
|
| 218 |
+
padding: 0.8rem !important;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
/* Force white text for sidebar info boxes */
|
| 222 |
+
.css-1d391kg .stInfo div,
|
| 223 |
+
.css-1d391kg .stInfo p,
|
| 224 |
+
.css-1d391kg .stInfo span {
|
| 225 |
+
color: #ffffff !important;
|
| 226 |
+
font-weight: 700 !important;
|
| 227 |
+
font-size: 0.9rem !important;
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
/* Light mode: Override white text for sidebar info boxes */
|
| 231 |
+
@media (prefers-color-scheme: light) {
|
| 232 |
+
.css-1d391kg .stInfo div,
|
| 233 |
+
.css-1d391kg .stInfo p,
|
| 234 |
+
.css-1d391kg .stInfo span {
|
| 235 |
+
color: var(--light-text-primary) !important;
|
| 236 |
+
font-weight: 600 !important;
|
| 237 |
+
font-size: 0.9rem !important;
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
/* Alternative light theme detection for info box text */
|
| 242 |
+
[data-theme="light"] .css-1d391kg .stInfo div,
|
| 243 |
+
[data-theme="light"] .css-1d391kg .stInfo p,
|
| 244 |
+
[data-theme="light"] .css-1d391kg .stInfo span,
|
| 245 |
+
.css-1d391kg[data-theme="light"] .stInfo div,
|
| 246 |
+
.css-1d391kg[data-theme="light"] .stInfo p,
|
| 247 |
+
.css-1d391kg[data-theme="light"] .stInfo span {
|
| 248 |
+
color: var(--light-text-primary) !important;
|
| 249 |
+
font-weight: 600 !important;
|
| 250 |
+
font-size: 0.9rem !important;
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
/* Light mode: Override all alert/info box text colors */
|
| 254 |
+
@media (prefers-color-scheme: light) {
|
| 255 |
+
.css-1d391kg .stAlert div,
|
| 256 |
+
.css-1d391kg .stAlert p,
|
| 257 |
+
.css-1d391kg .stAlert span,
|
| 258 |
+
.css-1d391kg .stSuccess div,
|
| 259 |
+
.css-1d391kg .stSuccess p,
|
| 260 |
+
.css-1d391kg .stSuccess span,
|
| 261 |
+
.css-1d391kg .stWarning div,
|
| 262 |
+
.css-1d391kg .stWarning p,
|
| 263 |
+
.css-1d391kg .stWarning span,
|
| 264 |
+
.css-1d391kg .stError div,
|
| 265 |
+
.css-1d391kg .stError p,
|
| 266 |
+
.css-1d391kg .stError span {
|
| 267 |
+
color: var(--light-text-primary) !important;
|
| 268 |
+
font-weight: 600 !important;
|
| 269 |
+
font-size: 0.9rem !important;
|
| 270 |
+
}
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
/* Alternative light theme detection for all alert boxes */
|
| 274 |
+
[data-theme="light"] .css-1d391kg .stAlert div,
|
| 275 |
+
[data-theme="light"] .css-1d391kg .stAlert p,
|
| 276 |
+
[data-theme="light"] .css-1d391kg .stAlert span,
|
| 277 |
+
[data-theme="light"] .css-1d391kg .stSuccess div,
|
| 278 |
+
[data-theme="light"] .css-1d391kg .stSuccess p,
|
| 279 |
+
[data-theme="light"] .css-1d391kg .stSuccess span,
|
| 280 |
+
[data-theme="light"] .css-1d391kg .stWarning div,
|
| 281 |
+
[data-theme="light"] .css-1d391kg .stWarning p,
|
| 282 |
+
[data-theme="light"] .css-1d391kg .stWarning span,
|
| 283 |
+
[data-theme="light"] .css-1d391kg .stError div,
|
| 284 |
+
[data-theme="light"] .css-1d391kg .stError p,
|
| 285 |
+
[data-theme="light"] .css-1d391kg .stError span,
|
| 286 |
+
.css-1d391kg[data-theme="light"] .stAlert div,
|
| 287 |
+
.css-1d391kg[data-theme="light"] .stAlert p,
|
| 288 |
+
.css-1d391kg[data-theme="light"] .stAlert span,
|
| 289 |
+
.css-1d391kg[data-theme="light"] .stSuccess div,
|
| 290 |
+
.css-1d391kg[data-theme="light"] .stSuccess p,
|
| 291 |
+
.css-1d391kg[data-theme="light"] .stSuccess span,
|
| 292 |
+
.css-1d391kg[data-theme="light"] .stWarning div,
|
| 293 |
+
.css-1d391kg[data-theme="light"] .stWarning p,
|
| 294 |
+
.css-1d391kg[data-theme="light"] .stWarning span,
|
| 295 |
+
.css-1d391kg[data-theme="light"] .stError div,
|
| 296 |
+
.css-1d391kg[data-theme="light"] .stError p,
|
| 297 |
+
.css-1d391kg[data-theme="light"] .stError span {
|
| 298 |
+
color: var(--light-text-primary) !important;
|
| 299 |
+
font-weight: 600 !important;
|
| 300 |
+
font-size: 0.9rem !important;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
/* ============================================
|
| 304 |
+
LIGHT MODE: COMPREHENSIVE TEXT OVERRIDE
|
| 305 |
+
============================================ */
|
| 306 |
+
|
| 307 |
+
/* Light mode: Comprehensive sidebar text color override */
|
| 308 |
+
@media (prefers-color-scheme: light) {
|
| 309 |
+
.css-1d391kg,
|
| 310 |
+
.css-1d391kg * {
|
| 311 |
+
color: var(--light-text-primary) !important;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
.css-1d391kg h1,
|
| 315 |
+
.css-1d391kg h2,
|
| 316 |
+
.css-1d391kg h3,
|
| 317 |
+
.css-1d391kg h4,
|
| 318 |
+
.css-1d391kg h5,
|
| 319 |
+
.css-1d391kg h6 {
|
| 320 |
+
color: var(--light-accent-blue) !important;
|
| 321 |
+
}
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
/* Alternative light theme detection - Comprehensive override */
|
| 325 |
+
[data-theme="light"] .css-1d391kg,
|
| 326 |
+
[data-theme="light"] .css-1d391kg *,
|
| 327 |
+
.css-1d391kg[data-theme="light"],
|
| 328 |
+
.css-1d391kg[data-theme="light"] * {
|
| 329 |
+
color: var(--light-text-primary) !important;
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
[data-theme="light"] .css-1d391kg h1,
|
| 333 |
+
[data-theme="light"] .css-1d391kg h2,
|
| 334 |
+
[data-theme="light"] .css-1d391kg h3,
|
| 335 |
+
[data-theme="light"] .css-1d391kg h4,
|
| 336 |
+
[data-theme="light"] .css-1d391kg h5,
|
| 337 |
+
[data-theme="light"] .css-1d391kg h6,
|
| 338 |
+
.css-1d391kg[data-theme="light"] h1,
|
| 339 |
+
.css-1d391kg[data-theme="light"] h2,
|
| 340 |
+
.css-1d391kg[data-theme="light"] h3,
|
| 341 |
+
.css-1d391kg[data-theme="light"] h4,
|
| 342 |
+
.css-1d391kg[data-theme="light"] h5,
|
| 343 |
+
.css-1d391kg[data-theme="light"] h6 {
|
| 344 |
+
color: var(--light-accent-blue) !important;
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
/* ================================
|
| 348 |
+
AI ANIMATION EFFECTS & LOGOS
|
| 349 |
+
================================ */
|
| 350 |
+
|
| 351 |
+
/* AI Brain Logo Animation */
|
| 352 |
+
.ai-brain-logo {
|
| 353 |
+
position: absolute;
|
| 354 |
+
width: 80px;
|
| 355 |
+
height: 80px;
|
| 356 |
+
z-index: 1;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
.brain-node {
|
| 360 |
+
position: absolute;
|
| 361 |
+
width: 12px;
|
| 362 |
+
height: 12px;
|
| 363 |
+
background: var(--neon-cyan);
|
| 364 |
+
border-radius: 50%;
|
| 365 |
+
box-shadow: 0 0 15px var(--neon-cyan);
|
| 366 |
+
animation: brainPulse 2s ease-in-out infinite;
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
.brain-node.node-1 {
|
| 370 |
+
top: 10px;
|
| 371 |
+
left: 20px;
|
| 372 |
+
animation-delay: 0s;
|
| 373 |
+
}
|
| 374 |
+
|
| 375 |
+
.brain-node.node-2 {
|
| 376 |
+
top: 30px;
|
| 377 |
+
right: 15px;
|
| 378 |
+
animation-delay: 0.7s;
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
.brain-node.node-3 {
|
| 382 |
+
bottom: 15px;
|
| 383 |
+
left: 30px;
|
| 384 |
+
animation-delay: 1.4s;
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
.brain-connection {
|
| 388 |
+
position: absolute;
|
| 389 |
+
background: linear-gradient(45deg, transparent, var(--neon-cyan), transparent);
|
| 390 |
+
height: 2px;
|
| 391 |
+
border-radius: 1px;
|
| 392 |
+
animation: connectionFlow 3s ease-in-out infinite;
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
.brain-connection.conn-1 {
|
| 396 |
+
width: 30px;
|
| 397 |
+
top: 20px;
|
| 398 |
+
left: 25px;
|
| 399 |
+
transform: rotate(45deg);
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
.brain-connection.conn-2 {
|
| 403 |
+
width: 25px;
|
| 404 |
+
top: 40px;
|
| 405 |
+
left: 15px;
|
| 406 |
+
transform: rotate(-30deg);
|
| 407 |
+
animation-delay: 1s;
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
@keyframes brainPulse {
|
| 411 |
+
0%, 100% {
|
| 412 |
+
transform: scale(1);
|
| 413 |
+
opacity: 0.7;
|
| 414 |
+
box-shadow: 0 0 15px var(--neon-cyan);
|
| 415 |
+
}
|
| 416 |
+
50% {
|
| 417 |
+
transform: scale(1.3);
|
| 418 |
+
opacity: 1;
|
| 419 |
+
box-shadow: 0 0 25px var(--neon-cyan), 0 0 35px var(--neon-cyan);
|
| 420 |
+
}
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
@keyframes connectionFlow {
|
| 424 |
+
0% {
|
| 425 |
+
opacity: 0;
|
| 426 |
+
background: linear-gradient(45deg, transparent, transparent, transparent);
|
| 427 |
+
}
|
| 428 |
+
50% {
|
| 429 |
+
opacity: 1;
|
| 430 |
+
background: linear-gradient(45deg, transparent, var(--neon-cyan), transparent);
|
| 431 |
+
}
|
| 432 |
+
100% {
|
| 433 |
+
opacity: 0;
|
| 434 |
+
background: linear-gradient(45deg, transparent, transparent, transparent);
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
/* Multi-Agent Logo Animation */
|
| 439 |
+
.multi-agent-logo {
|
| 440 |
+
position: absolute;
|
| 441 |
+
width: 80px;
|
| 442 |
+
height: 80px;
|
| 443 |
+
z-index: 1;
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
.agent-node {
|
| 447 |
+
position: absolute;
|
| 448 |
+
width: 20px;
|
| 449 |
+
height: 20px;
|
| 450 |
+
background: rgba(186, 104, 200, 0.2);
|
| 451 |
+
border: 2px solid var(--neon-purple);
|
| 452 |
+
border-radius: 50%;
|
| 453 |
+
display: flex;
|
| 454 |
+
align-items: center;
|
| 455 |
+
justify-content: center;
|
| 456 |
+
font-size: 10px;
|
| 457 |
+
animation: agentOrbit 4s linear infinite;
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
.agent-node.agent-1 {
|
| 461 |
+
top: 5px;
|
| 462 |
+
left: 30px;
|
| 463 |
+
animation-delay: 0s;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
.agent-node.agent-2 {
|
| 467 |
+
top: 30px;
|
| 468 |
+
right: 5px;
|
| 469 |
+
animation-delay: 1.3s;
|
| 470 |
+
}
|
| 471 |
+
|
| 472 |
+
.agent-node.agent-3 {
|
| 473 |
+
bottom: 5px;
|
| 474 |
+
left: 15px;
|
| 475 |
+
animation-delay: 2.6s;
|
| 476 |
+
}
|
| 477 |
+
|
| 478 |
+
.agent-connection {
|
| 479 |
+
position: absolute;
|
| 480 |
+
background: linear-gradient(90deg, transparent, var(--neon-purple), transparent);
|
| 481 |
+
height: 1px;
|
| 482 |
+
animation: agentSync 3s ease-in-out infinite;
|
| 483 |
+
}
|
| 484 |
+
|
| 485 |
+
.agent-connection.conn-12 {
|
| 486 |
+
width: 25px;
|
| 487 |
+
top: 20px;
|
| 488 |
+
left: 40px;
|
| 489 |
+
transform: rotate(30deg);
|
| 490 |
+
}
|
| 491 |
+
|
| 492 |
+
.agent-connection.conn-23 {
|
| 493 |
+
width: 30px;
|
| 494 |
+
top: 45px;
|
| 495 |
+
left: 25px;
|
| 496 |
+
transform: rotate(-45deg);
|
| 497 |
+
animation-delay: 1s;
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
.agent-connection.conn-13 {
|
| 501 |
+
width: 20px;
|
| 502 |
+
top: 25px;
|
| 503 |
+
left: 25px;
|
| 504 |
+
transform: rotate(90deg);
|
| 505 |
+
animation-delay: 2s;
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
@keyframes agentOrbit {
|
| 509 |
+
0% { transform: rotate(0deg) translateX(15px) rotate(0deg); }
|
| 510 |
+
100% { transform: rotate(360deg) translateX(15px) rotate(-360deg); }
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
@keyframes agentSync {
|
| 514 |
+
0%, 100% { opacity: 0.3; }
|
| 515 |
+
50% { opacity: 1; }
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
/* Future Vision Orbit Logo */
|
| 519 |
+
.future-logo {
|
| 520 |
+
position: absolute;
|
| 521 |
+
width: 80px;
|
| 522 |
+
height: 80px;
|
| 523 |
+
z-index: 1;
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
.orbit {
|
| 527 |
+
position: absolute;
|
| 528 |
+
border: 1px solid rgba(129, 199, 132, 0.3);
|
| 529 |
+
border-radius: 50%;
|
| 530 |
+
animation: orbitRotation 8s linear infinite;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
.orbit-1 {
|
| 534 |
+
width: 60px;
|
| 535 |
+
height: 60px;
|
| 536 |
+
top: 10px;
|
| 537 |
+
left: 10px;
|
| 538 |
+
}
|
| 539 |
+
|
| 540 |
+
.orbit-2 {
|
| 541 |
+
width: 40px;
|
| 542 |
+
height: 40px;
|
| 543 |
+
top: 20px;
|
| 544 |
+
left: 20px;
|
| 545 |
+
animation-direction: reverse;
|
| 546 |
+
animation-duration: 6s;
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
.orbit-node {
|
| 550 |
+
position: absolute;
|
| 551 |
+
top: -8px;
|
| 552 |
+
left: 50%;
|
| 553 |
+
transform: translateX(-50%);
|
| 554 |
+
width: 16px;
|
| 555 |
+
height: 16px;
|
| 556 |
+
background: var(--neon-green);
|
| 557 |
+
border-radius: 50%;
|
| 558 |
+
display: flex;
|
| 559 |
+
align-items: center;
|
| 560 |
+
justify-content: center;
|
| 561 |
+
font-size: 8px;
|
| 562 |
+
box-shadow: 0 0 10px var(--neon-green);
|
| 563 |
+
}
|
| 564 |
+
|
| 565 |
+
.orbit-center {
|
| 566 |
+
position: absolute;
|
| 567 |
+
top: 50%;
|
| 568 |
+
left: 50%;
|
| 569 |
+
transform: translate(-50%, -50%);
|
| 570 |
+
width: 20px;
|
| 571 |
+
height: 20px;
|
| 572 |
+
background: radial-gradient(circle, var(--neon-green), var(--neon-cyan));
|
| 573 |
+
border-radius: 50%;
|
| 574 |
+
display: flex;
|
| 575 |
+
align-items: center;
|
| 576 |
+
justify-content: center;
|
| 577 |
+
font-size: 10px;
|
| 578 |
+
animation: centerPulse 2s ease-in-out infinite;
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
@keyframes orbitRotation {
|
| 582 |
+
0% { transform: rotate(0deg); }
|
| 583 |
+
100% { transform: rotate(360deg); }
|
| 584 |
+
}
|
| 585 |
+
|
| 586 |
+
@keyframes centerPulse {
|
| 587 |
+
0%, 100% {
|
| 588 |
+
transform: translate(-50%, -50%) scale(1);
|
| 589 |
+
box-shadow: 0 0 15px var(--neon-green);
|
| 590 |
+
}
|
| 591 |
+
50% {
|
| 592 |
+
transform: translate(-50%, -50%) scale(1.2);
|
| 593 |
+
box-shadow: 0 0 25px var(--neon-green), 0 0 35px var(--neon-cyan);
|
| 594 |
+
}
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
/* Open Source Logo Animation */
|
| 598 |
+
.opensource-logo {
|
| 599 |
+
position: absolute;
|
| 600 |
+
width: 80px;
|
| 601 |
+
height: 80px;
|
| 602 |
+
z-index: 1;
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
.github-stars {
|
| 606 |
+
position: absolute;
|
| 607 |
+
width: 100%;
|
| 608 |
+
height: 100%;
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
.star {
|
| 612 |
+
position: absolute;
|
| 613 |
+
font-size: 14px;
|
| 614 |
+
animation: starTwinkle 3s ease-in-out infinite;
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
.star-1 {
|
| 618 |
+
top: 10px;
|
| 619 |
+
left: 15px;
|
| 620 |
+
animation-delay: 0s;
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
.star-2 {
|
| 624 |
+
top: 15px;
|
| 625 |
+
right: 10px;
|
| 626 |
+
animation-delay: 1s;
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
.star-3 {
|
| 630 |
+
bottom: 10px;
|
| 631 |
+
left: 25px;
|
| 632 |
+
animation-delay: 2s;
|
| 633 |
+
}
|
| 634 |
+
|
| 635 |
+
.community-nodes {
|
| 636 |
+
position: absolute;
|
| 637 |
+
width: 100%;
|
| 638 |
+
height: 100%;
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
.community-node {
|
| 642 |
+
position: absolute;
|
| 643 |
+
width: 18px;
|
| 644 |
+
height: 18px;
|
| 645 |
+
background: rgba(100, 181, 246, 0.2);
|
| 646 |
+
border: 1px solid var(--neon-blue);
|
| 647 |
+
border-radius: 50%;
|
| 648 |
+
display: flex;
|
| 649 |
+
align-items: center;
|
| 650 |
+
justify-content: center;
|
| 651 |
+
font-size: 10px;
|
| 652 |
+
animation: communityFloat 4s ease-in-out infinite;
|
| 653 |
+
}
|
| 654 |
+
|
| 655 |
+
.community-node:nth-child(1) {
|
| 656 |
+
top: 35px;
|
| 657 |
+
left: 5px;
|
| 658 |
+
animation-delay: 0s;
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
.community-node:nth-child(2) {
|
| 662 |
+
top: 25px;
|
| 663 |
+
right: 8px;
|
| 664 |
+
animation-delay: 1.3s;
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
.community-node:nth-child(3) {
|
| 668 |
+
bottom: 20px;
|
| 669 |
+
left: 30px;
|
| 670 |
+
animation-delay: 2.6s;
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
@keyframes starTwinkle {
|
| 674 |
+
0%, 100% {
|
| 675 |
+
opacity: 0.5;
|
| 676 |
+
transform: scale(1);
|
| 677 |
+
}
|
| 678 |
+
50% {
|
| 679 |
+
opacity: 1;
|
| 680 |
+
transform: scale(1.3);
|
| 681 |
+
}
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
@keyframes communityFloat {
|
| 685 |
+
0%, 100% {
|
| 686 |
+
transform: translateY(0);
|
| 687 |
+
opacity: 0.7;
|
| 688 |
+
}
|
| 689 |
+
50% {
|
| 690 |
+
transform: translateY(-5px);
|
| 691 |
+
opacity: 1;
|
| 692 |
+
}
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
/* Typing number animation */
|
| 696 |
+
.typing-number {
|
| 697 |
+
animation: numberCount 2s ease-out;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
@keyframes numberCount {
|
| 701 |
+
0% {
|
| 702 |
+
opacity: 0;
|
| 703 |
+
transform: scale(0.5);
|
| 704 |
+
}
|
| 705 |
+
100% {
|
| 706 |
+
opacity: 1;
|
| 707 |
+
transform: scale(1);
|
| 708 |
+
}
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
/* Sidebar buttons - tech style */
|
| 712 |
+
.css-1d391kg .stButton button {
|
| 713 |
+
background: linear-gradient(135deg, var(--neon-cyan) 0%, var(--neon-blue) 100%) !important;
|
| 714 |
+
color: #000000 !important;
|
| 715 |
+
font-weight: 800 !important;
|
| 716 |
+
border: 2px solid var(--neon-cyan) !important;
|
| 717 |
+
border-radius: 10px !important;
|
| 718 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.4) !important;
|
| 719 |
+
text-shadow: none !important;
|
| 720 |
+
transition: all 0.3s ease !important;
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
.css-1d391kg .stButton button:hover {
|
| 724 |
+
box-shadow: 0 0 30px rgba(77, 208, 225, 0.6) !important;
|
| 725 |
+
transform: translateY(-2px) !important;
|
| 726 |
+
}
|
| 727 |
+
|
| 728 |
+
/* Light mode sidebar buttons - gentle and modern */
|
| 729 |
+
@media (prefers-color-scheme: light) {
|
| 730 |
+
.css-1d391kg .stButton button {
|
| 731 |
+
background: linear-gradient(135deg, var(--light-accent-blue) 0%, var(--light-accent-cyan) 100%) !important;
|
| 732 |
+
color: #ffffff !important;
|
| 733 |
+
font-weight: 600 !important;
|
| 734 |
+
border: 1px solid var(--light-accent-blue) !important;
|
| 735 |
+
border-radius: 6px !important;
|
| 736 |
+
box-shadow: 0 2px 4px rgba(59, 130, 246, 0.15) !important;
|
| 737 |
+
text-shadow: none !important;
|
| 738 |
+
transition: all 0.2s ease !important;
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
.css-1d391kg .stButton button:hover {
|
| 742 |
+
box-shadow: 0 4px 8px rgba(59, 130, 246, 0.25) !important;
|
| 743 |
+
transform: translateY(-1px) !important;
|
| 744 |
+
}
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
/* Alternative light theme detection for buttons */
|
| 748 |
+
[data-theme="light"] .css-1d391kg .stButton button,
|
| 749 |
+
.css-1d391kg[data-theme="light"] .stButton button {
|
| 750 |
+
background: linear-gradient(135deg, var(--light-accent-blue) 0%, var(--light-accent-cyan) 100%) !important;
|
| 751 |
+
color: #ffffff !important;
|
| 752 |
+
font-weight: 600 !important;
|
| 753 |
+
border: 1px solid var(--light-accent-blue) !important;
|
| 754 |
+
border-radius: 6px !important;
|
| 755 |
+
box-shadow: 0 2px 4px rgba(59, 130, 246, 0.15) !important;
|
| 756 |
+
text-shadow: none !important;
|
| 757 |
+
transition: all 0.2s ease !important;
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
[data-theme="light"] .css-1d391kg .stButton button:hover,
|
| 761 |
+
.css-1d391kg[data-theme="light"] .stButton button:hover {
|
| 762 |
+
box-shadow: 0 4px 8px rgba(59, 130, 246, 0.25) !important;
|
| 763 |
+
transform: translateY(-1px) !important;
|
| 764 |
+
}
|
| 765 |
+
|
| 766 |
+
/* Sidebar expanders - dark tech theme */
|
| 767 |
+
.css-1d391kg .streamlit-expanderHeader {
|
| 768 |
+
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%) !important;
|
| 769 |
+
color: var(--text-primary) !important;
|
| 770 |
+
border: 2px solid var(--neon-purple) !important;
|
| 771 |
+
font-weight: 700 !important;
|
| 772 |
+
border-radius: 10px !important;
|
| 773 |
+
box-shadow: 0 0 10px rgba(186, 104, 200, 0.3) !important;
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
.css-1d391kg .streamlit-expanderContent {
|
| 777 |
+
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%) !important;
|
| 778 |
+
border: 2px solid var(--neon-purple) !important;
|
| 779 |
+
color: var(--text-primary) !important;
|
| 780 |
+
border-radius: 0 0 10px 10px !important;
|
| 781 |
+
box-shadow: 0 0 10px rgba(186, 104, 200, 0.2) !important;
|
| 782 |
+
}
|
| 783 |
+
|
| 784 |
+
/* Light mode sidebar expanders - clean and minimal */
|
| 785 |
+
@media (prefers-color-scheme: light) {
|
| 786 |
+
.css-1d391kg .streamlit-expanderHeader {
|
| 787 |
+
background: var(--light-card-bg) !important;
|
| 788 |
+
color: var(--light-text-primary) !important;
|
| 789 |
+
border: 1px solid var(--light-border-medium) !important;
|
| 790 |
+
font-weight: 600 !important;
|
| 791 |
+
border-radius: 6px !important;
|
| 792 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.08) !important;
|
| 793 |
+
}
|
| 794 |
+
|
| 795 |
+
.css-1d391kg .streamlit-expanderContent {
|
| 796 |
+
background: var(--light-card-bg) !important;
|
| 797 |
+
border: 1px solid var(--light-border-medium) !important;
|
| 798 |
+
color: var(--light-text-primary) !important;
|
| 799 |
+
border-radius: 0 0 6px 6px !important;
|
| 800 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05) !important;
|
| 801 |
+
border-top: none !important;
|
| 802 |
+
}
|
| 803 |
+
}
|
| 804 |
+
|
| 805 |
+
/* Alternative light theme detection for expanders */
|
| 806 |
+
[data-theme="light"] .css-1d391kg .streamlit-expanderHeader,
|
| 807 |
+
.css-1d391kg[data-theme="light"] .streamlit-expanderHeader {
|
| 808 |
+
background: var(--light-card-bg) !important;
|
| 809 |
+
color: var(--light-text-primary) !important;
|
| 810 |
+
border: 1px solid var(--light-border-medium) !important;
|
| 811 |
+
font-weight: 600 !important;
|
| 812 |
+
border-radius: 6px !important;
|
| 813 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.08) !important;
|
| 814 |
+
}
|
| 815 |
+
|
| 816 |
+
[data-theme="light"] .css-1d391kg .streamlit-expanderContent,
|
| 817 |
+
.css-1d391kg[data-theme="light"] .streamlit-expanderContent {
|
| 818 |
+
background: var(--light-card-bg) !important;
|
| 819 |
+
border: 1px solid var(--light-border-medium) !important;
|
| 820 |
+
color: var(--light-text-primary) !important;
|
| 821 |
+
border-radius: 0 0 6px 6px !important;
|
| 822 |
+
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05) !important;
|
| 823 |
+
border-top: none !important;
|
| 824 |
+
}
|
| 825 |
+
|
| 826 |
+
/* Force high contrast for all sidebar text elements */
|
| 827 |
+
.css-1d391kg span,
|
| 828 |
+
.css-1d391kg p,
|
| 829 |
+
.css-1d391kg div,
|
| 830 |
+
.css-1d391kg label,
|
| 831 |
+
.css-1d391kg strong,
|
| 832 |
+
.css-1d391kg b {
|
| 833 |
+
color: #ffffff !important;
|
| 834 |
+
font-weight: 600 !important;
|
| 835 |
+
}
|
| 836 |
+
|
| 837 |
+
/* Light mode: Override all sidebar text colors */
|
| 838 |
+
@media (prefers-color-scheme: light) {
|
| 839 |
+
.css-1d391kg span,
|
| 840 |
+
.css-1d391kg p,
|
| 841 |
+
.css-1d391kg div,
|
| 842 |
+
.css-1d391kg label,
|
| 843 |
+
.css-1d391kg strong,
|
| 844 |
+
.css-1d391kg b {
|
| 845 |
+
color: var(--light-text-primary) !important;
|
| 846 |
+
font-weight: 600 !important;
|
| 847 |
+
}
|
| 848 |
+
}
|
| 849 |
+
|
| 850 |
+
/* Alternative light theme detection for all sidebar text */
|
| 851 |
+
[data-theme="light"] .css-1d391kg span,
|
| 852 |
+
[data-theme="light"] .css-1d391kg p,
|
| 853 |
+
[data-theme="light"] .css-1d391kg div,
|
| 854 |
+
[data-theme="light"] .css-1d391kg label,
|
| 855 |
+
[data-theme="light"] .css-1d391kg strong,
|
| 856 |
+
[data-theme="light"] .css-1d391kg b,
|
| 857 |
+
.css-1d391kg[data-theme="light"] span,
|
| 858 |
+
.css-1d391kg[data-theme="light"] p,
|
| 859 |
+
.css-1d391kg[data-theme="light"] div,
|
| 860 |
+
.css-1d391kg[data-theme="light"] label,
|
| 861 |
+
.css-1d391kg[data-theme="light"] strong,
|
| 862 |
+
.css-1d391kg[data-theme="light"] b {
|
| 863 |
+
color: var(--light-text-primary) !important;
|
| 864 |
+
font-weight: 600 !important;
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
/* Sidebar markdown content */
|
| 868 |
+
.css-1d391kg [data-testid="stMarkdownContainer"] p {
|
| 869 |
+
color: #ffffff !important;
|
| 870 |
+
font-weight: 600 !important;
|
| 871 |
+
background: none !important;
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
/* Light mode: Override sidebar markdown text */
|
| 875 |
+
@media (prefers-color-scheme: light) {
|
| 876 |
+
.css-1d391kg [data-testid="stMarkdownContainer"] p {
|
| 877 |
+
color: var(--light-text-primary) !important;
|
| 878 |
+
font-weight: 600 !important;
|
| 879 |
+
background: none !important;
|
| 880 |
+
}
|
| 881 |
+
}
|
| 882 |
+
|
| 883 |
+
/* Alternative light theme detection for markdown content */
|
| 884 |
+
[data-theme="light"] .css-1d391kg [data-testid="stMarkdownContainer"] p,
|
| 885 |
+
.css-1d391kg[data-theme="light"] [data-testid="stMarkdownContainer"] p {
|
| 886 |
+
color: var(--light-text-primary) !important;
|
| 887 |
+
font-weight: 600 !important;
|
| 888 |
+
background: none !important;
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
/* Sidebar special styles - system info boxes */
|
| 892 |
+
.css-1d391kg .element-container {
|
| 893 |
+
background: none !important;
|
| 894 |
+
}
|
| 895 |
+
|
| 896 |
+
.css-1d391kg .element-container div {
|
| 897 |
+
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%) !important;
|
| 898 |
+
border: 1px solid var(--neon-cyan) !important;
|
| 899 |
+
border-radius: 8px !important;
|
| 900 |
+
padding: 0.8rem !important;
|
| 901 |
+
box-shadow: 0 0 10px rgba(77, 208, 225, 0.2) !important;
|
| 902 |
+
margin: 0.3rem 0 !important;
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
/* Processing History special handling */
|
| 906 |
+
.css-1d391kg .stExpander {
|
| 907 |
+
background: linear-gradient(135deg, #0d1117 0%, #161b22 100%) !important;
|
| 908 |
+
border: 2px solid var(--neon-green) !important;
|
| 909 |
+
border-radius: 12px !important;
|
| 910 |
+
box-shadow: 0 0 15px rgba(129, 199, 132, 0.3) !important;
|
| 911 |
+
margin: 0.5rem 0 !important;
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
/* Ensure all text is visible on dark background */
|
| 915 |
+
.css-1d391kg .stExpander div,
|
| 916 |
+
.css-1d391kg .stExpander p,
|
| 917 |
+
.css-1d391kg .stExpander span {
|
| 918 |
+
color: #ffffff !important;
|
| 919 |
+
font-weight: 600 !important;
|
| 920 |
+
background: none !important;
|
| 921 |
+
}
|
| 922 |
+
|
| 923 |
+
/* Light mode: Override expander text colors */
|
| 924 |
+
@media (prefers-color-scheme: light) {
|
| 925 |
+
.css-1d391kg .stExpander div,
|
| 926 |
+
.css-1d391kg .stExpander p,
|
| 927 |
+
.css-1d391kg .stExpander span {
|
| 928 |
+
color: var(--light-text-primary) !important;
|
| 929 |
+
font-weight: 600 !important;
|
| 930 |
+
background: none !important;
|
| 931 |
+
}
|
| 932 |
+
}
|
| 933 |
+
|
| 934 |
+
/* Alternative light theme detection for expander text */
|
| 935 |
+
[data-theme="light"] .css-1d391kg .stExpander div,
|
| 936 |
+
[data-theme="light"] .css-1d391kg .stExpander p,
|
| 937 |
+
[data-theme="light"] .css-1d391kg .stExpander span,
|
| 938 |
+
.css-1d391kg[data-theme="light"] .stExpander div,
|
| 939 |
+
.css-1d391kg[data-theme="light"] .stExpander p,
|
| 940 |
+
.css-1d391kg[data-theme="light"] .stExpander span {
|
| 941 |
+
color: var(--light-text-primary) !important;
|
| 942 |
+
font-weight: 600 !important;
|
| 943 |
+
background: none !important;
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
/* Main header area - enhanced version */
|
| 947 |
+
.main-header {
|
| 948 |
+
position: relative;
|
| 949 |
+
background: linear-gradient(135deg,
|
| 950 |
+
rgba(100, 181, 246, 0.12) 0%,
|
| 951 |
+
rgba(77, 208, 225, 0.10) 30%,
|
| 952 |
+
rgba(186, 104, 200, 0.12) 70%,
|
| 953 |
+
rgba(129, 199, 132, 0.10) 100%);
|
| 954 |
+
backdrop-filter: blur(25px);
|
| 955 |
+
border: 1px solid transparent;
|
| 956 |
+
background-clip: padding-box;
|
| 957 |
+
padding: 4rem 2rem;
|
| 958 |
+
border-radius: 25px;
|
| 959 |
+
margin-bottom: 3rem;
|
| 960 |
+
text-align: center;
|
| 961 |
+
overflow: hidden;
|
| 962 |
+
box-shadow:
|
| 963 |
+
0 20px 60px rgba(0, 0, 0, 0.4),
|
| 964 |
+
0 8px 32px rgba(100, 181, 246, 0.2),
|
| 965 |
+
inset 0 1px 0 rgba(255, 255, 255, 0.1);
|
| 966 |
+
}
|
| 967 |
+
|
| 968 |
+
.main-header::before {
|
| 969 |
+
content: '';
|
| 970 |
+
position: absolute;
|
| 971 |
+
top: 0;
|
| 972 |
+
left: 0;
|
| 973 |
+
right: 0;
|
| 974 |
+
bottom: 0;
|
| 975 |
+
background: linear-gradient(45deg,
|
| 976 |
+
var(--neon-cyan) 0%,
|
| 977 |
+
var(--neon-purple) 25%,
|
| 978 |
+
var(--neon-blue) 50%,
|
| 979 |
+
var(--neon-green) 75%,
|
| 980 |
+
var(--neon-cyan) 100%);
|
| 981 |
+
background-size: 400% 400%;
|
| 982 |
+
border-radius: 25px;
|
| 983 |
+
padding: 1px;
|
| 984 |
+
margin: -1px;
|
| 985 |
+
z-index: -1;
|
| 986 |
+
animation: borderGlow 6s ease-in-out infinite;
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
.main-header::after {
|
| 990 |
+
content: '';
|
| 991 |
+
position: absolute;
|
| 992 |
+
top: 50%;
|
| 993 |
+
left: 50%;
|
| 994 |
+
width: 300%;
|
| 995 |
+
height: 300%;
|
| 996 |
+
background: radial-gradient(circle, transparent 30%, rgba(77, 208, 225, 0.03) 60%, transparent 70%);
|
| 997 |
+
transform: translate(-50%, -50%);
|
| 998 |
+
animation: headerPulse 8s ease-in-out infinite;
|
| 999 |
+
pointer-events: none;
|
| 1000 |
+
}
|
| 1001 |
+
|
| 1002 |
+
@keyframes headerPulse {
|
| 1003 |
+
0%, 100% {
|
| 1004 |
+
opacity: 0.3;
|
| 1005 |
+
transform: translate(-50%, -50%) scale(1);
|
| 1006 |
+
}
|
| 1007 |
+
50% {
|
| 1008 |
+
opacity: 0.7;
|
| 1009 |
+
transform: translate(-50%, -50%) scale(1.1);
|
| 1010 |
+
}
|
| 1011 |
+
}
|
| 1012 |
+
|
| 1013 |
+
.main-header h1 {
|
| 1014 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1015 |
+
font-size: 3.8rem !important;
|
| 1016 |
+
font-weight: 800 !important;
|
| 1017 |
+
background: linear-gradient(135deg, var(--neon-blue) 0%, var(--neon-cyan) 40%, #90caf9 80%, var(--neon-blue) 100%) !important;
|
| 1018 |
+
background-size: 200% 200% !important;
|
| 1019 |
+
-webkit-background-clip: text !important;
|
| 1020 |
+
-webkit-text-fill-color: transparent !important;
|
| 1021 |
+
background-clip: text !important;
|
| 1022 |
+
text-shadow: 0 0 25px rgba(100, 181, 246, 0.4) !important;
|
| 1023 |
+
margin-bottom: 1.2rem !important;
|
| 1024 |
+
animation: titleGlow 4s ease-in-out infinite alternate, gradientShift 3s ease-in-out infinite !important;
|
| 1025 |
+
position: relative;
|
| 1026 |
+
z-index: 2;
|
| 1027 |
+
}
|
| 1028 |
+
|
| 1029 |
+
@keyframes titleGlow {
|
| 1030 |
+
0% {
|
| 1031 |
+
filter: drop-shadow(0 0 10px rgba(100, 181, 246, 0.3)) drop-shadow(0 0 15px rgba(100, 181, 246, 0.2));
|
| 1032 |
+
text-shadow: 0 0 25px rgba(100, 181, 246, 0.4);
|
| 1033 |
+
}
|
| 1034 |
+
33% {
|
| 1035 |
+
filter: drop-shadow(0 0 12px rgba(77, 208, 225, 0.4)) drop-shadow(0 0 18px rgba(77, 208, 225, 0.25));
|
| 1036 |
+
text-shadow: 0 0 30px rgba(77, 208, 225, 0.5);
|
| 1037 |
+
}
|
| 1038 |
+
66% {
|
| 1039 |
+
filter: drop-shadow(0 0 14px rgba(144, 202, 249, 0.35)) drop-shadow(0 0 20px rgba(144, 202, 249, 0.2));
|
| 1040 |
+
text-shadow: 0 0 35px rgba(144, 202, 249, 0.45);
|
| 1041 |
+
}
|
| 1042 |
+
100% {
|
| 1043 |
+
filter: drop-shadow(0 0 10px rgba(100, 181, 246, 0.3)) drop-shadow(0 0 15px rgba(100, 181, 246, 0.2));
|
| 1044 |
+
text-shadow: 0 0 25px rgba(100, 181, 246, 0.4);
|
| 1045 |
+
}
|
| 1046 |
+
}
|
| 1047 |
+
|
| 1048 |
+
@keyframes gradientShift {
|
| 1049 |
+
0%, 100% { background-position: 0% 50%; }
|
| 1050 |
+
50% { background-position: 100% 50%; }
|
| 1051 |
+
}
|
| 1052 |
+
|
| 1053 |
+
.main-header h3 {
|
| 1054 |
+
font-family: 'Inter', sans-serif !important;
|
| 1055 |
+
font-size: 1.2rem !important;
|
| 1056 |
+
font-weight: 400 !important;
|
| 1057 |
+
color: var(--text-secondary) !important;
|
| 1058 |
+
letter-spacing: 2px !important;
|
| 1059 |
+
margin-bottom: 0.5rem !important;
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
.main-header p {
|
| 1063 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1064 |
+
font-size: 0.9rem !important;
|
| 1065 |
+
color: var(--neon-green) !important;
|
| 1066 |
+
letter-spacing: 1px !important;
|
| 1067 |
+
font-weight: 600 !important;
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
/* Streamlit component style overrides */
|
| 1071 |
+
.stMarkdown h3 {
|
| 1072 |
+
color: var(--neon-cyan) !important;
|
| 1073 |
+
font-family: 'Inter', sans-serif !important;
|
| 1074 |
+
font-weight: 700 !important;
|
| 1075 |
+
font-size: 1.5rem !important;
|
| 1076 |
+
text-shadow: 0 0 10px rgba(77, 208, 225, 0.3) !important;
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
/* Radio button styles */
|
| 1080 |
+
.stRadio > div {
|
| 1081 |
+
background: var(--card-bg) !important;
|
| 1082 |
+
border: 1px solid var(--border-color) !important;
|
| 1083 |
+
border-radius: 12px !important;
|
| 1084 |
+
padding: 1rem !important;
|
| 1085 |
+
backdrop-filter: blur(10px) !important;
|
| 1086 |
+
}
|
| 1087 |
+
|
| 1088 |
+
.stRadio label {
|
| 1089 |
+
color: var(--text-primary) !important;
|
| 1090 |
+
font-weight: 600 !important;
|
| 1091 |
+
font-size: 1rem !important;
|
| 1092 |
+
}
|
| 1093 |
+
|
| 1094 |
+
.stRadio > div > div > div > label {
|
| 1095 |
+
color: var(--text-secondary) !important;
|
| 1096 |
+
font-weight: 500 !important;
|
| 1097 |
+
font-size: 1rem !important;
|
| 1098 |
+
}
|
| 1099 |
+
|
| 1100 |
+
/* File uploader */
|
| 1101 |
+
.stFileUploader > div {
|
| 1102 |
+
background: var(--card-bg) !important;
|
| 1103 |
+
border: 2px dashed var(--border-color) !important;
|
| 1104 |
+
border-radius: 15px !important;
|
| 1105 |
+
transition: all 0.3s ease !important;
|
| 1106 |
+
backdrop-filter: blur(10px) !important;
|
| 1107 |
+
}
|
| 1108 |
+
|
| 1109 |
+
.stFileUploader > div:hover {
|
| 1110 |
+
border-color: var(--neon-cyan) !important;
|
| 1111 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.3) !important;
|
| 1112 |
+
}
|
| 1113 |
+
|
| 1114 |
+
.stFileUploader label {
|
| 1115 |
+
color: var(--text-primary) !important;
|
| 1116 |
+
font-weight: 600 !important;
|
| 1117 |
+
}
|
| 1118 |
+
|
| 1119 |
+
.stFileUploader span {
|
| 1120 |
+
color: var(--text-secondary) !important;
|
| 1121 |
+
font-weight: 500 !important;
|
| 1122 |
+
}
|
| 1123 |
+
|
| 1124 |
+
/* Text input fields */
|
| 1125 |
+
.stTextInput > div > div > input {
|
| 1126 |
+
background: var(--card-bg) !important;
|
| 1127 |
+
border: 1px solid var(--border-color) !important;
|
| 1128 |
+
border-radius: 10px !important;
|
| 1129 |
+
color: var(--text-primary) !important;
|
| 1130 |
+
font-weight: 500 !important;
|
| 1131 |
+
backdrop-filter: blur(10px) !important;
|
| 1132 |
+
}
|
| 1133 |
+
|
| 1134 |
+
.stTextInput > div > div > input:focus {
|
| 1135 |
+
border-color: var(--neon-cyan) !important;
|
| 1136 |
+
box-shadow: 0 0 0 1px var(--neon-cyan) !important;
|
| 1137 |
+
}
|
| 1138 |
+
|
| 1139 |
+
.stTextInput label {
|
| 1140 |
+
color: var(--text-primary) !important;
|
| 1141 |
+
font-weight: 600 !important;
|
| 1142 |
+
}
|
| 1143 |
+
|
| 1144 |
+
/* Button styles */
|
| 1145 |
+
.stButton > button {
|
| 1146 |
+
width: 100% !important;
|
| 1147 |
+
background: var(--primary-gradient) !important;
|
| 1148 |
+
color: white !important;
|
| 1149 |
+
border: none !important;
|
| 1150 |
+
border-radius: 12px !important;
|
| 1151 |
+
padding: 0.8rem 2rem !important;
|
| 1152 |
+
font-family: 'Inter', sans-serif !important;
|
| 1153 |
+
font-weight: 600 !important;
|
| 1154 |
+
font-size: 1rem !important;
|
| 1155 |
+
letter-spacing: 0.5px !important;
|
| 1156 |
+
transition: all 0.3s ease !important;
|
| 1157 |
+
box-shadow: 0 4px 15px rgba(102, 126, 234, 0.3) !important;
|
| 1158 |
+
}
|
| 1159 |
+
|
| 1160 |
+
.stButton > button:hover {
|
| 1161 |
+
transform: translateY(-2px) !important;
|
| 1162 |
+
box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important;
|
| 1163 |
+
}
|
| 1164 |
+
|
| 1165 |
+
/* Status message styles */
|
| 1166 |
+
.status-success, .stSuccess {
|
| 1167 |
+
background: linear-gradient(135deg, rgba(129, 199, 132, 0.15) 0%, rgba(129, 199, 132, 0.05) 100%) !important;
|
| 1168 |
+
color: var(--neon-green) !important;
|
| 1169 |
+
padding: 1rem 1.5rem !important;
|
| 1170 |
+
border-radius: 10px !important;
|
| 1171 |
+
border: 1px solid rgba(129, 199, 132, 0.3) !important;
|
| 1172 |
+
backdrop-filter: blur(10px) !important;
|
| 1173 |
+
font-weight: 600 !important;
|
| 1174 |
+
}
|
| 1175 |
+
|
| 1176 |
+
.status-error, .stError {
|
| 1177 |
+
background: linear-gradient(135deg, rgba(244, 67, 54, 0.15) 0%, rgba(244, 67, 54, 0.05) 100%) !important;
|
| 1178 |
+
color: #ff8a80 !important;
|
| 1179 |
+
padding: 1rem 1.5rem !important;
|
| 1180 |
+
border-radius: 10px !important;
|
| 1181 |
+
border: 1px solid rgba(244, 67, 54, 0.3) !important;
|
| 1182 |
+
backdrop-filter: blur(10px) !important;
|
| 1183 |
+
font-weight: 600 !important;
|
| 1184 |
+
}
|
| 1185 |
+
|
| 1186 |
+
.status-warning, .stWarning {
|
| 1187 |
+
background: linear-gradient(135deg, rgba(255, 193, 7, 0.15) 0%, rgba(255, 193, 7, 0.05) 100%) !important;
|
| 1188 |
+
color: #ffcc02 !important;
|
| 1189 |
+
padding: 1rem 1.5rem !important;
|
| 1190 |
+
border-radius: 10px !important;
|
| 1191 |
+
border: 1px solid rgba(255, 193, 7, 0.3) !important;
|
| 1192 |
+
backdrop-filter: blur(10px) !important;
|
| 1193 |
+
font-weight: 600 !important;
|
| 1194 |
+
}
|
| 1195 |
+
|
| 1196 |
+
.status-info, .stInfo {
|
| 1197 |
+
background: linear-gradient(135deg, rgba(77, 208, 225, 0.15) 0%, rgba(77, 208, 225, 0.05) 100%) !important;
|
| 1198 |
+
color: var(--neon-cyan) !important;
|
| 1199 |
+
padding: 1rem 1.5rem !important;
|
| 1200 |
+
border-radius: 10px !important;
|
| 1201 |
+
border: 1px solid rgba(77, 208, 225, 0.3) !important;
|
| 1202 |
+
backdrop-filter: blur(10px) !important;
|
| 1203 |
+
font-weight: 600 !important;
|
| 1204 |
+
}
|
| 1205 |
+
|
| 1206 |
+
/* Progress bar */
|
| 1207 |
+
.progress-container {
|
| 1208 |
+
margin: 1.5rem 0;
|
| 1209 |
+
padding: 2rem;
|
| 1210 |
+
background: var(--card-bg);
|
| 1211 |
+
backdrop-filter: blur(15px);
|
| 1212 |
+
border: 1px solid var(--border-color);
|
| 1213 |
+
border-radius: 15px;
|
| 1214 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3);
|
| 1215 |
+
}
|
| 1216 |
+
|
| 1217 |
+
.stProgress > div > div > div {
|
| 1218 |
+
background: var(--accent-gradient) !important;
|
| 1219 |
+
border-radius: 10px !important;
|
| 1220 |
+
}
|
| 1221 |
+
|
| 1222 |
+
/* Text area */
|
| 1223 |
+
.stTextArea > div > div > textarea {
|
| 1224 |
+
background: var(--card-bg) !important;
|
| 1225 |
+
border: 1px solid var(--border-color) !important;
|
| 1226 |
+
border-radius: 10px !important;
|
| 1227 |
+
color: var(--text-primary) !important;
|
| 1228 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1229 |
+
backdrop-filter: blur(10px) !important;
|
| 1230 |
+
}
|
| 1231 |
+
|
| 1232 |
+
/* Expander */
|
| 1233 |
+
.streamlit-expanderHeader {
|
| 1234 |
+
background: var(--card-bg) !important;
|
| 1235 |
+
color: var(--text-primary) !important;
|
| 1236 |
+
border: 1px solid var(--border-color) !important;
|
| 1237 |
+
font-weight: 600 !important;
|
| 1238 |
+
}
|
| 1239 |
+
|
| 1240 |
+
.streamlit-expanderContent {
|
| 1241 |
+
background: var(--card-bg) !important;
|
| 1242 |
+
border: 1px solid var(--border-color) !important;
|
| 1243 |
+
}
|
| 1244 |
+
|
| 1245 |
+
/* Ensure all Markdown content is visible */
|
| 1246 |
+
[data-testid="stMarkdownContainer"] p {
|
| 1247 |
+
color: var(--text-secondary) !important;
|
| 1248 |
+
font-weight: 500 !important;
|
| 1249 |
+
}
|
| 1250 |
+
|
| 1251 |
+
/* Dividers */
|
| 1252 |
+
hr {
|
| 1253 |
+
border-color: var(--border-color) !important;
|
| 1254 |
+
opacity: 0.5 !important;
|
| 1255 |
+
}
|
| 1256 |
+
|
| 1257 |
+
/* Scrollbars */
|
| 1258 |
+
::-webkit-scrollbar {
|
| 1259 |
+
width: 8px;
|
| 1260 |
+
}
|
| 1261 |
+
|
| 1262 |
+
::-webkit-scrollbar-track {
|
| 1263 |
+
background: var(--accent-bg);
|
| 1264 |
+
border-radius: 10px;
|
| 1265 |
+
}
|
| 1266 |
+
|
| 1267 |
+
::-webkit-scrollbar-thumb {
|
| 1268 |
+
background: var(--accent-gradient);
|
| 1269 |
+
border-radius: 10px;
|
| 1270 |
+
}
|
| 1271 |
+
|
| 1272 |
+
::-webkit-scrollbar-thumb:hover {
|
| 1273 |
+
background: var(--primary-gradient);
|
| 1274 |
+
}
|
| 1275 |
+
|
| 1276 |
+
/* Placeholder text */
|
| 1277 |
+
::placeholder {
|
| 1278 |
+
color: var(--text-muted) !important;
|
| 1279 |
+
opacity: 0.7 !important;
|
| 1280 |
+
}
|
| 1281 |
+
|
| 1282 |
+
/* ================================
|
| 1283 |
+
AI AGENT CAPABILITIES DISPLAY
|
| 1284 |
+
================================ */
|
| 1285 |
+
|
| 1286 |
+
/* AI Capabilities section - simplified to avoid conflicts with main header */
|
| 1287 |
+
.ai-capabilities-section {
|
| 1288 |
+
position: relative;
|
| 1289 |
+
background: linear-gradient(135deg,
|
| 1290 |
+
rgba(77, 208, 225, 0.08) 0%,
|
| 1291 |
+
rgba(186, 104, 200, 0.06) 50%,
|
| 1292 |
+
rgba(129, 199, 132, 0.08) 100%);
|
| 1293 |
+
backdrop-filter: blur(15px);
|
| 1294 |
+
border: 1px solid rgba(77, 208, 225, 0.2);
|
| 1295 |
+
padding: 2rem 1.5rem;
|
| 1296 |
+
border-radius: 20px;
|
| 1297 |
+
margin: 2rem 0;
|
| 1298 |
+
text-align: center;
|
| 1299 |
+
overflow: hidden;
|
| 1300 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.2);
|
| 1301 |
+
}
|
| 1302 |
+
|
| 1303 |
+
.ai-capabilities-section::before {
|
| 1304 |
+
content: '';
|
| 1305 |
+
position: absolute;
|
| 1306 |
+
top: 0;
|
| 1307 |
+
left: 0;
|
| 1308 |
+
right: 0;
|
| 1309 |
+
bottom: 0;
|
| 1310 |
+
background: linear-gradient(90deg,
|
| 1311 |
+
transparent 0%,
|
| 1312 |
+
rgba(77, 208, 225, 0.1) 50%,
|
| 1313 |
+
transparent 100%);
|
| 1314 |
+
animation: shimmer 3s ease-in-out infinite;
|
| 1315 |
+
}
|
| 1316 |
+
|
| 1317 |
+
@keyframes shimmer {
|
| 1318 |
+
0% { transform: translateX(-100%); }
|
| 1319 |
+
100% { transform: translateX(100%); }
|
| 1320 |
+
}
|
| 1321 |
+
|
| 1322 |
+
@keyframes borderGlow {
|
| 1323 |
+
0%, 100% { background-position: 0% 50%; }
|
| 1324 |
+
50% { background-position: 100% 50%; }
|
| 1325 |
+
}
|
| 1326 |
+
|
| 1327 |
+
/* Neural network animation */
|
| 1328 |
+
.neural-network {
|
| 1329 |
+
position: absolute;
|
| 1330 |
+
top: 1rem;
|
| 1331 |
+
right: 2rem;
|
| 1332 |
+
display: flex;
|
| 1333 |
+
gap: 0.5rem;
|
| 1334 |
+
}
|
| 1335 |
+
|
| 1336 |
+
.neuron {
|
| 1337 |
+
width: 12px;
|
| 1338 |
+
height: 12px;
|
| 1339 |
+
border-radius: 50%;
|
| 1340 |
+
background: var(--neon-cyan);
|
| 1341 |
+
box-shadow: 0 0 10px var(--neon-cyan);
|
| 1342 |
+
animation-duration: 2s;
|
| 1343 |
+
animation-iteration-count: infinite;
|
| 1344 |
+
animation-timing-function: ease-in-out;
|
| 1345 |
+
}
|
| 1346 |
+
|
| 1347 |
+
.pulse-1 { animation-name: neuronPulse; animation-delay: 0s; }
|
| 1348 |
+
.pulse-2 { animation-name: neuronPulse; animation-delay: 0.3s; }
|
| 1349 |
+
.pulse-3 { animation-name: neuronPulse; animation-delay: 0.6s; }
|
| 1350 |
+
|
| 1351 |
+
@keyframes neuronPulse {
|
| 1352 |
+
0%, 100% {
|
| 1353 |
+
transform: scale(1);
|
| 1354 |
+
opacity: 0.7;
|
| 1355 |
+
box-shadow: 0 0 10px var(--neon-cyan);
|
| 1356 |
+
}
|
| 1357 |
+
50% {
|
| 1358 |
+
transform: scale(1.3);
|
| 1359 |
+
opacity: 1;
|
| 1360 |
+
box-shadow: 0 0 20px var(--neon-cyan), 0 0 30px var(--neon-cyan);
|
| 1361 |
+
}
|
| 1362 |
+
}
|
| 1363 |
+
|
| 1364 |
+
.capabilities-title {
|
| 1365 |
+
font-family: 'Inter', sans-serif !important;
|
| 1366 |
+
font-size: 2rem !important;
|
| 1367 |
+
font-weight: 700 !important;
|
| 1368 |
+
background: linear-gradient(135deg, var(--neon-cyan), var(--neon-purple), var(--neon-green));
|
| 1369 |
+
background-clip: text;
|
| 1370 |
+
-webkit-background-clip: text;
|
| 1371 |
+
-webkit-text-fill-color: transparent;
|
| 1372 |
+
text-shadow: 0 0 20px rgba(77, 208, 225, 0.3);
|
| 1373 |
+
margin-bottom: 0.5rem !important;
|
| 1374 |
+
letter-spacing: -0.5px;
|
| 1375 |
+
}
|
| 1376 |
+
|
| 1377 |
+
.capabilities-subtitle {
|
| 1378 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1379 |
+
color: var(--neon-cyan) !important;
|
| 1380 |
+
font-size: 0.9rem !important;
|
| 1381 |
+
letter-spacing: 1.5px !important;
|
| 1382 |
+
font-weight: 500 !important;
|
| 1383 |
+
text-transform: uppercase;
|
| 1384 |
+
opacity: 0.8;
|
| 1385 |
+
}
|
| 1386 |
+
|
| 1387 |
+
/* Enhanced feature card system - ensure alignment */
|
| 1388 |
+
.feature-card {
|
| 1389 |
+
position: relative;
|
| 1390 |
+
background: var(--card-bg);
|
| 1391 |
+
backdrop-filter: blur(20px);
|
| 1392 |
+
border: 1px solid var(--border-color);
|
| 1393 |
+
padding: 2.5rem;
|
| 1394 |
+
border-radius: 20px;
|
| 1395 |
+
margin: 1.5rem 0;
|
| 1396 |
+
transition: all 0.4s cubic-bezier(0.175, 0.885, 0.32, 1.275);
|
| 1397 |
+
box-shadow: 0 8px 40px rgba(0, 0, 0, 0.3);
|
| 1398 |
+
overflow: hidden;
|
| 1399 |
+
/* Ensure card alignment */
|
| 1400 |
+
min-height: 420px;
|
| 1401 |
+
display: flex;
|
| 1402 |
+
flex-direction: column;
|
| 1403 |
+
justify-content: space-between;
|
| 1404 |
+
}
|
| 1405 |
+
|
| 1406 |
+
/* NEW VERTICAL LAYOUT FEATURE CARDS */
|
| 1407 |
+
.feature-card-vertical {
|
| 1408 |
+
position: relative;
|
| 1409 |
+
background: linear-gradient(135deg, var(--card-bg) 0%, rgba(45, 55, 72, 0.8) 100%);
|
| 1410 |
+
backdrop-filter: blur(25px);
|
| 1411 |
+
border: 1px solid var(--border-color);
|
| 1412 |
+
padding: 0;
|
| 1413 |
+
border-radius: 24px;
|
| 1414 |
+
margin: 2.5rem 0;
|
| 1415 |
+
transition: all 0.5s cubic-bezier(0.175, 0.885, 0.32, 1.275);
|
| 1416 |
+
box-shadow: 0 12px 60px rgba(0, 0, 0, 0.4);
|
| 1417 |
+
overflow: hidden;
|
| 1418 |
+
min-height: 500px;
|
| 1419 |
+
}
|
| 1420 |
+
|
| 1421 |
+
.feature-card-vertical:hover {
|
| 1422 |
+
transform: translateY(-8px) scale(1.01);
|
| 1423 |
+
box-shadow: 0 20px 80px rgba(0, 0, 0, 0.5);
|
| 1424 |
+
}
|
| 1425 |
+
|
| 1426 |
+
/* Card glow effect for vertical cards */
|
| 1427 |
+
.card-glow-vertical {
|
| 1428 |
+
position: absolute;
|
| 1429 |
+
top: -50%;
|
| 1430 |
+
left: -50%;
|
| 1431 |
+
width: 200%;
|
| 1432 |
+
height: 200%;
|
| 1433 |
+
background: radial-gradient(circle, transparent 30%, rgba(77, 208, 225, 0.03) 60%, transparent 80%);
|
| 1434 |
+
opacity: 0;
|
| 1435 |
+
transition: opacity 0.5s ease;
|
| 1436 |
+
pointer-events: none;
|
| 1437 |
+
animation: verticalGlowPulse 8s ease-in-out infinite;
|
| 1438 |
+
}
|
| 1439 |
+
|
| 1440 |
+
.feature-card-vertical:hover .card-glow-vertical {
|
| 1441 |
+
opacity: 1;
|
| 1442 |
+
}
|
| 1443 |
+
|
| 1444 |
+
@keyframes verticalGlowPulse {
|
| 1445 |
+
0%, 100% {
|
| 1446 |
+
transform: rotate(0deg) scale(1);
|
| 1447 |
+
opacity: 0.3;
|
| 1448 |
+
}
|
| 1449 |
+
50% {
|
| 1450 |
+
transform: rotate(180deg) scale(1.1);
|
| 1451 |
+
opacity: 0.7;
|
| 1452 |
+
}
|
| 1453 |
+
}
|
| 1454 |
+
|
| 1455 |
+
/* Feature header section */
|
| 1456 |
+
.feature-header {
|
| 1457 |
+
display: flex;
|
| 1458 |
+
align-items: center;
|
| 1459 |
+
padding: 2.5rem 3rem 1.5rem 3rem;
|
| 1460 |
+
background: linear-gradient(135deg, rgba(77, 208, 225, 0.08) 0%, rgba(186, 104, 200, 0.06) 100%);
|
| 1461 |
+
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
|
| 1462 |
+
gap: 2rem;
|
| 1463 |
+
}
|
| 1464 |
+
|
| 1465 |
+
.feature-logo-container {
|
| 1466 |
+
position: relative;
|
| 1467 |
+
display: flex;
|
| 1468 |
+
align-items: center;
|
| 1469 |
+
justify-content: center;
|
| 1470 |
+
width: 80px;
|
| 1471 |
+
height: 80px;
|
| 1472 |
+
flex-shrink: 0;
|
| 1473 |
+
}
|
| 1474 |
+
|
| 1475 |
+
.feature-icon-large {
|
| 1476 |
+
font-size: 3.5rem;
|
| 1477 |
+
z-index: 2;
|
| 1478 |
+
filter: drop-shadow(0 0 15px rgba(77, 208, 225, 0.5));
|
| 1479 |
+
}
|
| 1480 |
+
|
| 1481 |
+
.feature-header-content {
|
| 1482 |
+
flex: 1;
|
| 1483 |
+
}
|
| 1484 |
+
|
| 1485 |
+
.feature-title-large {
|
| 1486 |
+
font-family: 'Inter', sans-serif !important;
|
| 1487 |
+
color: var(--text-primary) !important;
|
| 1488 |
+
font-size: 2rem !important;
|
| 1489 |
+
font-weight: 700 !important;
|
| 1490 |
+
margin-bottom: 0.5rem !important;
|
| 1491 |
+
text-shadow: 0 0 20px rgba(255, 255, 255, 0.3);
|
| 1492 |
+
background: linear-gradient(135deg, var(--neon-cyan), var(--neon-blue));
|
| 1493 |
+
background-clip: text;
|
| 1494 |
+
-webkit-background-clip: text;
|
| 1495 |
+
-webkit-text-fill-color: transparent;
|
| 1496 |
+
}
|
| 1497 |
+
|
| 1498 |
+
.feature-subtitle {
|
| 1499 |
+
color: var(--text-secondary) !important;
|
| 1500 |
+
font-size: 1rem !important;
|
| 1501 |
+
font-weight: 500 !important;
|
| 1502 |
+
opacity: 0.9;
|
| 1503 |
+
}
|
| 1504 |
+
|
| 1505 |
+
.feature-stats {
|
| 1506 |
+
display: flex;
|
| 1507 |
+
flex-direction: column;
|
| 1508 |
+
gap: 1rem;
|
| 1509 |
+
align-items: flex-end;
|
| 1510 |
+
}
|
| 1511 |
+
|
| 1512 |
+
.stat-item {
|
| 1513 |
+
text-align: center;
|
| 1514 |
+
padding: 0.8rem 1.2rem;
|
| 1515 |
+
background: rgba(77, 208, 225, 0.1);
|
| 1516 |
+
border: 1px solid rgba(77, 208, 225, 0.3);
|
| 1517 |
+
border-radius: 12px;
|
| 1518 |
+
backdrop-filter: blur(10px);
|
| 1519 |
+
min-width: 80px;
|
| 1520 |
+
}
|
| 1521 |
+
|
| 1522 |
+
.stat-number {
|
| 1523 |
+
display: block;
|
| 1524 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1525 |
+
color: var(--neon-cyan) !important;
|
| 1526 |
+
font-size: 1.5rem !important;
|
| 1527 |
+
font-weight: 700 !important;
|
| 1528 |
+
text-shadow: 0 0 10px rgba(77, 208, 225, 0.5);
|
| 1529 |
+
}
|
| 1530 |
+
|
| 1531 |
+
.stat-label {
|
| 1532 |
+
display: block;
|
| 1533 |
+
color: var(--text-secondary) !important;
|
| 1534 |
+
font-size: 0.75rem !important;
|
| 1535 |
+
font-weight: 500 !important;
|
| 1536 |
+
text-transform: uppercase;
|
| 1537 |
+
letter-spacing: 0.5px;
|
| 1538 |
+
margin-top: 0.2rem;
|
| 1539 |
+
}
|
| 1540 |
+
|
| 1541 |
+
/* Feature content section */
|
| 1542 |
+
.feature-content {
|
| 1543 |
+
display: flex;
|
| 1544 |
+
padding: 2.5rem 3rem;
|
| 1545 |
+
gap: 3rem;
|
| 1546 |
+
align-items: flex-start;
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
.content-left {
|
| 1550 |
+
flex: 1.2;
|
| 1551 |
+
}
|
| 1552 |
+
|
| 1553 |
+
.content-right {
|
| 1554 |
+
flex: 1;
|
| 1555 |
+
display: flex;
|
| 1556 |
+
justify-content: center;
|
| 1557 |
+
align-items: center;
|
| 1558 |
+
}
|
| 1559 |
+
|
| 1560 |
+
.feature-description-large {
|
| 1561 |
+
color: var(--text-secondary) !important;
|
| 1562 |
+
font-size: 1.1rem !important;
|
| 1563 |
+
line-height: 1.7 !important;
|
| 1564 |
+
font-weight: 500 !important;
|
| 1565 |
+
margin-bottom: 2rem;
|
| 1566 |
+
}
|
| 1567 |
+
|
| 1568 |
+
/* Card glow effect */
|
| 1569 |
+
.card-glow {
|
| 1570 |
+
position: absolute;
|
| 1571 |
+
top: -50%;
|
| 1572 |
+
left: -50%;
|
| 1573 |
+
width: 200%;
|
| 1574 |
+
height: 200%;
|
| 1575 |
+
background: radial-gradient(circle, transparent 20%, rgba(77, 208, 225, 0.05) 50%, transparent 70%);
|
| 1576 |
+
opacity: 0;
|
| 1577 |
+
transition: opacity 0.4s ease;
|
| 1578 |
+
pointer-events: none;
|
| 1579 |
+
}
|
| 1580 |
+
|
| 1581 |
+
.feature-card:hover .card-glow {
|
| 1582 |
+
opacity: 1;
|
| 1583 |
+
animation: glowRotate 3s linear infinite;
|
| 1584 |
+
}
|
| 1585 |
+
|
| 1586 |
+
@keyframes glowRotate {
|
| 1587 |
+
0% { transform: rotate(0deg); }
|
| 1588 |
+
100% { transform: rotate(360deg); }
|
| 1589 |
+
}
|
| 1590 |
+
|
| 1591 |
+
/* Different themed card styles */
|
| 1592 |
+
.feature-card.primary {
|
| 1593 |
+
border-color: var(--neon-cyan);
|
| 1594 |
+
background: linear-gradient(135deg,
|
| 1595 |
+
rgba(77, 208, 225, 0.1) 0%,
|
| 1596 |
+
rgba(45, 55, 72, 0.95) 30%);
|
| 1597 |
+
}
|
| 1598 |
+
|
| 1599 |
+
.feature-card.primary:hover {
|
| 1600 |
+
transform: translateY(-8px) scale(1.02);
|
| 1601 |
+
border-color: var(--neon-cyan);
|
| 1602 |
+
box-shadow:
|
| 1603 |
+
0 20px 60px rgba(77, 208, 225, 0.3),
|
| 1604 |
+
0 0 50px rgba(77, 208, 225, 0.2);
|
| 1605 |
+
}
|
| 1606 |
+
|
| 1607 |
+
.feature-card.secondary {
|
| 1608 |
+
border-color: var(--neon-purple);
|
| 1609 |
+
background: linear-gradient(135deg,
|
| 1610 |
+
rgba(186, 104, 200, 0.1) 0%,
|
| 1611 |
+
rgba(45, 55, 72, 0.95) 30%);
|
| 1612 |
+
}
|
| 1613 |
+
|
| 1614 |
+
.feature-card.secondary:hover {
|
| 1615 |
+
transform: translateY(-8px) scale(1.02);
|
| 1616 |
+
border-color: var(--neon-purple);
|
| 1617 |
+
box-shadow:
|
| 1618 |
+
0 20px 60px rgba(186, 104, 200, 0.3),
|
| 1619 |
+
0 0 50px rgba(186, 104, 200, 0.2);
|
| 1620 |
+
}
|
| 1621 |
+
|
| 1622 |
+
.feature-card.accent {
|
| 1623 |
+
border-color: var(--neon-green);
|
| 1624 |
+
background: linear-gradient(135deg,
|
| 1625 |
+
rgba(129, 199, 132, 0.1) 0%,
|
| 1626 |
+
rgba(45, 55, 72, 0.95) 30%);
|
| 1627 |
+
}
|
| 1628 |
+
|
| 1629 |
+
.feature-card.accent:hover {
|
| 1630 |
+
transform: translateY(-8px) scale(1.02);
|
| 1631 |
+
border-color: var(--neon-green);
|
| 1632 |
+
box-shadow:
|
| 1633 |
+
0 20px 60px rgba(129, 199, 132, 0.3),
|
| 1634 |
+
0 0 50px rgba(129, 199, 132, 0.2);
|
| 1635 |
+
}
|
| 1636 |
+
|
| 1637 |
+
.feature-card.tech {
|
| 1638 |
+
border-color: var(--neon-blue);
|
| 1639 |
+
background: linear-gradient(135deg,
|
| 1640 |
+
rgba(100, 181, 246, 0.1) 0%,
|
| 1641 |
+
rgba(45, 55, 72, 0.95) 30%);
|
| 1642 |
+
}
|
| 1643 |
+
|
| 1644 |
+
.feature-card.tech:hover {
|
| 1645 |
+
transform: translateY(-8px) scale(1.02);
|
| 1646 |
+
border-color: var(--neon-blue);
|
| 1647 |
+
box-shadow:
|
| 1648 |
+
0 20px 60px rgba(100, 181, 246, 0.3),
|
| 1649 |
+
0 0 50px rgba(100, 181, 246, 0.2);
|
| 1650 |
+
}
|
| 1651 |
+
|
| 1652 |
+
/* Feature icons */
|
| 1653 |
+
.feature-icon {
|
| 1654 |
+
font-size: 3rem;
|
| 1655 |
+
margin-bottom: 1rem;
|
| 1656 |
+
text-align: center;
|
| 1657 |
+
filter: drop-shadow(0 0 10px rgba(77, 208, 225, 0.5));
|
| 1658 |
+
flex-shrink: 0;
|
| 1659 |
+
}
|
| 1660 |
+
|
| 1661 |
+
/* Feature titles */
|
| 1662 |
+
.feature-title {
|
| 1663 |
+
font-family: 'Inter', sans-serif !important;
|
| 1664 |
+
color: var(--text-primary) !important;
|
| 1665 |
+
font-size: 1.3rem !important;
|
| 1666 |
+
font-weight: 700 !important;
|
| 1667 |
+
margin-bottom: 1rem !important;
|
| 1668 |
+
text-align: center;
|
| 1669 |
+
text-shadow: 0 0 15px rgba(255, 255, 255, 0.3);
|
| 1670 |
+
flex-shrink: 0;
|
| 1671 |
+
}
|
| 1672 |
+
|
| 1673 |
+
/* Feature descriptions */
|
| 1674 |
+
.feature-description {
|
| 1675 |
+
color: var(--text-secondary) !important;
|
| 1676 |
+
line-height: 1.6 !important;
|
| 1677 |
+
font-weight: 500 !important;
|
| 1678 |
+
flex: 1;
|
| 1679 |
+
display: flex;
|
| 1680 |
+
flex-direction: column;
|
| 1681 |
+
justify-content: space-between;
|
| 1682 |
+
}
|
| 1683 |
+
|
| 1684 |
+
/* Typing animation effect */
|
| 1685 |
+
.typing-text {
|
| 1686 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1687 |
+
font-size: 0.95rem !important;
|
| 1688 |
+
margin-bottom: 1.5rem;
|
| 1689 |
+
border-right: 2px solid var(--neon-cyan);
|
| 1690 |
+
white-space: nowrap;
|
| 1691 |
+
overflow: hidden;
|
| 1692 |
+
animation: typing 3s steps(60, end), blink 1s infinite;
|
| 1693 |
+
}
|
| 1694 |
+
|
| 1695 |
+
@keyframes typing {
|
| 1696 |
+
from { width: 0; }
|
| 1697 |
+
to { width: 100%; }
|
| 1698 |
+
}
|
| 1699 |
+
|
| 1700 |
+
@keyframes blink {
|
| 1701 |
+
0%, 50% { border-color: var(--neon-cyan); }
|
| 1702 |
+
51%, 100% { border-color: transparent; }
|
| 1703 |
+
}
|
| 1704 |
+
|
| 1705 |
+
/* Technology tags */
|
| 1706 |
+
.tech-specs {
|
| 1707 |
+
display: flex;
|
| 1708 |
+
flex-wrap: wrap;
|
| 1709 |
+
gap: 0.5rem;
|
| 1710 |
+
margin-top: 1rem;
|
| 1711 |
+
}
|
| 1712 |
+
|
| 1713 |
+
.spec-tag {
|
| 1714 |
+
background: linear-gradient(135deg, var(--neon-cyan), var(--neon-blue));
|
| 1715 |
+
color: #000 !important;
|
| 1716 |
+
padding: 0.3rem 0.8rem;
|
| 1717 |
+
border-radius: 15px;
|
| 1718 |
+
font-size: 0.8rem;
|
| 1719 |
+
font-weight: 600;
|
| 1720 |
+
letter-spacing: 0.5px;
|
| 1721 |
+
box-shadow: 0 2px 10px rgba(77, 208, 225, 0.3);
|
| 1722 |
+
}
|
| 1723 |
+
|
| 1724 |
+
/* Progress bar animation */
|
| 1725 |
+
.progress-bar {
|
| 1726 |
+
width: 100%;
|
| 1727 |
+
height: 6px;
|
| 1728 |
+
background: rgba(255, 255, 255, 0.1);
|
| 1729 |
+
border-radius: 3px;
|
| 1730 |
+
overflow: hidden;
|
| 1731 |
+
margin-top: 1rem;
|
| 1732 |
+
}
|
| 1733 |
+
|
| 1734 |
+
.progress-fill {
|
| 1735 |
+
height: 100%;
|
| 1736 |
+
background: linear-gradient(90deg, var(--neon-purple), var(--neon-cyan), var(--neon-green));
|
| 1737 |
+
background-size: 200% 100%;
|
| 1738 |
+
border-radius: 3px;
|
| 1739 |
+
animation: progressMove 2s ease-in-out infinite;
|
| 1740 |
+
width: 75%;
|
| 1741 |
+
}
|
| 1742 |
+
|
| 1743 |
+
@keyframes progressMove {
|
| 1744 |
+
0% { background-position: -200% 0; }
|
| 1745 |
+
100% { background-position: 200% 0; }
|
| 1746 |
+
}
|
| 1747 |
+
|
| 1748 |
+
/* Code preview area */
|
| 1749 |
+
.code-preview {
|
| 1750 |
+
background: rgba(0, 0, 0, 0.4);
|
| 1751 |
+
border: 1px solid var(--neon-green);
|
| 1752 |
+
border-radius: 10px;
|
| 1753 |
+
padding: 1rem;
|
| 1754 |
+
margin-top: 1rem;
|
| 1755 |
+
font-family: 'JetBrains Mono', monospace;
|
| 1756 |
+
}
|
| 1757 |
+
|
| 1758 |
+
.code-line {
|
| 1759 |
+
font-size: 0.85rem;
|
| 1760 |
+
line-height: 1.6;
|
| 1761 |
+
margin-bottom: 0.5rem;
|
| 1762 |
+
color: var(--neon-green) !important;
|
| 1763 |
+
}
|
| 1764 |
+
|
| 1765 |
+
.code-line.generating {
|
| 1766 |
+
color: var(--neon-cyan) !important;
|
| 1767 |
+
animation: textGlow 2s ease-in-out infinite;
|
| 1768 |
+
}
|
| 1769 |
+
|
| 1770 |
+
@keyframes textGlow {
|
| 1771 |
+
0%, 100% { text-shadow: 0 0 5px var(--neon-cyan); }
|
| 1772 |
+
50% { text-shadow: 0 0 15px var(--neon-cyan), 0 0 25px var(--neon-cyan); }
|
| 1773 |
+
}
|
| 1774 |
+
|
| 1775 |
+
/* Progress dots */
|
| 1776 |
+
.code-progress {
|
| 1777 |
+
margin-top: 1rem;
|
| 1778 |
+
}
|
| 1779 |
+
|
| 1780 |
+
.progress-dots {
|
| 1781 |
+
display: flex;
|
| 1782 |
+
gap: 0.5rem;
|
| 1783 |
+
justify-content: center;
|
| 1784 |
+
}
|
| 1785 |
+
|
| 1786 |
+
.dot {
|
| 1787 |
+
width: 8px;
|
| 1788 |
+
height: 8px;
|
| 1789 |
+
border-radius: 50%;
|
| 1790 |
+
background: rgba(255, 255, 255, 0.3);
|
| 1791 |
+
transition: all 0.3s ease;
|
| 1792 |
+
}
|
| 1793 |
+
|
| 1794 |
+
.dot.active {
|
| 1795 |
+
background: var(--neon-green);
|
| 1796 |
+
box-shadow: 0 0 10px var(--neon-green);
|
| 1797 |
+
animation: dotPulse 1.5s ease-in-out infinite;
|
| 1798 |
+
}
|
| 1799 |
+
|
| 1800 |
+
@keyframes dotPulse {
|
| 1801 |
+
0%, 100% { transform: scale(1); }
|
| 1802 |
+
50% { transform: scale(1.3); }
|
| 1803 |
+
}
|
| 1804 |
+
|
| 1805 |
+
/* Technology stack display */
|
| 1806 |
+
.tech-stack {
|
| 1807 |
+
display: flex;
|
| 1808 |
+
flex-direction: column;
|
| 1809 |
+
gap: 0.8rem;
|
| 1810 |
+
}
|
| 1811 |
+
|
| 1812 |
+
.stack-item {
|
| 1813 |
+
display: flex;
|
| 1814 |
+
align-items: center;
|
| 1815 |
+
gap: 0.8rem;
|
| 1816 |
+
padding: 0.8rem;
|
| 1817 |
+
background: rgba(255, 255, 255, 0.05);
|
| 1818 |
+
border: 1px solid rgba(255, 255, 255, 0.1);
|
| 1819 |
+
border-radius: 10px;
|
| 1820 |
+
transition: all 0.3s ease;
|
| 1821 |
+
}
|
| 1822 |
+
|
| 1823 |
+
.stack-item:hover {
|
| 1824 |
+
background: rgba(255, 255, 255, 0.1);
|
| 1825 |
+
border-color: var(--neon-blue);
|
| 1826 |
+
transform: translateX(5px);
|
| 1827 |
+
}
|
| 1828 |
+
|
| 1829 |
+
.stack-icon {
|
| 1830 |
+
font-size: 1.2rem;
|
| 1831 |
+
filter: drop-shadow(0 0 8px rgba(100, 181, 246, 0.6));
|
| 1832 |
+
}
|
| 1833 |
+
|
| 1834 |
+
.stack-name {
|
| 1835 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 1836 |
+
color: var(--text-primary) !important;
|
| 1837 |
+
font-weight: 600 !important;
|
| 1838 |
+
font-size: 0.9rem;
|
| 1839 |
+
}
|
| 1840 |
+
|
| 1841 |
+
/* Responsive design */
|
| 1842 |
+
@media (max-width: 768px) {
|
| 1843 |
+
.main-header {
|
| 1844 |
+
padding: 2.5rem 1.5rem;
|
| 1845 |
+
margin-bottom: 2rem;
|
| 1846 |
+
border-radius: 20px;
|
| 1847 |
+
}
|
| 1848 |
+
|
| 1849 |
+
.main-header h1 {
|
| 1850 |
+
font-size: 2.5rem !important;
|
| 1851 |
+
}
|
| 1852 |
+
|
| 1853 |
+
.main-header h3 {
|
| 1854 |
+
font-size: 1rem !important;
|
| 1855 |
+
letter-spacing: 1.5px !important;
|
| 1856 |
+
}
|
| 1857 |
+
|
| 1858 |
+
.main-header p {
|
| 1859 |
+
font-size: 0.8rem !important;
|
| 1860 |
+
letter-spacing: 0.5px !important;
|
| 1861 |
+
}
|
| 1862 |
+
|
| 1863 |
+
.ai-capabilities-section {
|
| 1864 |
+
padding: 1.5rem 1rem;
|
| 1865 |
+
margin: 1.5rem 0;
|
| 1866 |
+
border-radius: 15px;
|
| 1867 |
+
}
|
| 1868 |
+
|
| 1869 |
+
.capabilities-title {
|
| 1870 |
+
font-size: 1.6rem !important;
|
| 1871 |
+
}
|
| 1872 |
+
|
| 1873 |
+
.capabilities-subtitle {
|
| 1874 |
+
font-size: 0.8rem !important;
|
| 1875 |
+
letter-spacing: 1px !important;
|
| 1876 |
+
}
|
| 1877 |
+
|
| 1878 |
+
.feature-card {
|
| 1879 |
+
padding: 1.5rem;
|
| 1880 |
+
margin: 1rem 0;
|
| 1881 |
+
height: auto;
|
| 1882 |
+
min-height: 350px;
|
| 1883 |
+
border-radius: 15px;
|
| 1884 |
+
}
|
| 1885 |
+
|
| 1886 |
+
.neural-network {
|
| 1887 |
+
top: 0.5rem;
|
| 1888 |
+
right: 1rem;
|
| 1889 |
+
}
|
| 1890 |
+
|
| 1891 |
+
.typing-text {
|
| 1892 |
+
white-space: normal;
|
| 1893 |
+
border-right: none;
|
| 1894 |
+
animation: none;
|
| 1895 |
+
font-size: 0.85rem !important;
|
| 1896 |
+
}
|
| 1897 |
+
|
| 1898 |
+
.feature-icon {
|
| 1899 |
+
font-size: 2.5rem;
|
| 1900 |
+
}
|
| 1901 |
+
|
| 1902 |
+
.feature-title {
|
| 1903 |
+
font-size: 1.1rem !important;
|
| 1904 |
+
}
|
| 1905 |
+
}
|
| 1906 |
+
/* ================================
|
| 1907 |
+
CONTENT COMPONENT STYLES
|
| 1908 |
+
================================ */
|
| 1909 |
+
|
| 1910 |
+
/* Feature Flow Animation */
|
| 1911 |
+
.feature-flow {
|
| 1912 |
+
display: flex;
|
| 1913 |
+
align-items: center;
|
| 1914 |
+
gap: 1rem;
|
| 1915 |
+
flex-wrap: wrap;
|
| 1916 |
+
margin-top: 1.5rem;
|
| 1917 |
+
}
|
| 1918 |
+
|
| 1919 |
+
.flow-step {
|
| 1920 |
+
display: flex;
|
| 1921 |
+
align-items: center;
|
| 1922 |
+
gap: 0.5rem;
|
| 1923 |
+
padding: 0.8rem 1.2rem;
|
| 1924 |
+
background: rgba(77, 208, 225, 0.1);
|
| 1925 |
+
border: 1px solid rgba(77, 208, 225, 0.3);
|
| 1926 |
+
border-radius: 25px;
|
| 1927 |
+
transition: all 0.3s ease;
|
| 1928 |
+
}
|
| 1929 |
+
|
| 1930 |
+
.flow-step.active {
|
| 1931 |
+
background: rgba(77, 208, 225, 0.2);
|
| 1932 |
+
border-color: var(--neon-cyan);
|
| 1933 |
+
box-shadow: 0 0 15px rgba(77, 208, 225, 0.3);
|
| 1934 |
+
}
|
| 1935 |
+
|
| 1936 |
+
.flow-icon {
|
| 1937 |
+
font-size: 1.2rem;
|
| 1938 |
+
}
|
| 1939 |
+
|
| 1940 |
+
.flow-arrow {
|
| 1941 |
+
color: var(--neon-cyan);
|
| 1942 |
+
font-size: 1.2rem;
|
| 1943 |
+
animation: arrowFlow 2s ease-in-out infinite;
|
| 1944 |
+
}
|
| 1945 |
+
|
| 1946 |
+
@keyframes arrowFlow {
|
| 1947 |
+
0%, 100% { transform: translateX(0); opacity: 0.7; }
|
| 1948 |
+
50% { transform: translateX(5px); opacity: 1; }
|
| 1949 |
+
}
|
| 1950 |
+
|
| 1951 |
+
/* Code Simulation */
|
| 1952 |
+
.code-simulation {
|
| 1953 |
+
background: rgba(0, 0, 0, 0.6);
|
| 1954 |
+
border: 1px solid var(--neon-cyan);
|
| 1955 |
+
border-radius: 12px;
|
| 1956 |
+
padding: 1.5rem;
|
| 1957 |
+
font-family: 'JetBrains Mono', monospace;
|
| 1958 |
+
width: 100%;
|
| 1959 |
+
max-width: 400px;
|
| 1960 |
+
}
|
| 1961 |
+
|
| 1962 |
+
.code-header {
|
| 1963 |
+
display: flex;
|
| 1964 |
+
justify-content: space-between;
|
| 1965 |
+
align-items: center;
|
| 1966 |
+
margin-bottom: 1rem;
|
| 1967 |
+
padding-bottom: 0.5rem;
|
| 1968 |
+
border-bottom: 1px solid rgba(77, 208, 225, 0.3);
|
| 1969 |
+
}
|
| 1970 |
+
|
| 1971 |
+
.code-lang {
|
| 1972 |
+
color: var(--neon-cyan);
|
| 1973 |
+
font-weight: 600;
|
| 1974 |
+
font-size: 0.9rem;
|
| 1975 |
+
}
|
| 1976 |
+
|
| 1977 |
+
.code-status.generating {
|
| 1978 |
+
color: var(--neon-green);
|
| 1979 |
+
font-size: 0.8rem;
|
| 1980 |
+
animation: statusPulse 2s ease-in-out infinite;
|
| 1981 |
+
}
|
| 1982 |
+
|
| 1983 |
+
@keyframes statusPulse {
|
| 1984 |
+
0%, 100% { opacity: 0.7; }
|
| 1985 |
+
50% { opacity: 1; }
|
| 1986 |
+
}
|
| 1987 |
+
|
| 1988 |
+
.code-lines {
|
| 1989 |
+
display: flex;
|
| 1990 |
+
flex-direction: column;
|
| 1991 |
+
gap: 0.3rem;
|
| 1992 |
+
}
|
| 1993 |
+
|
| 1994 |
+
.code-line {
|
| 1995 |
+
color: var(--text-secondary);
|
| 1996 |
+
font-size: 0.85rem;
|
| 1997 |
+
line-height: 1.4;
|
| 1998 |
+
opacity: 0;
|
| 1999 |
+
}
|
| 2000 |
+
|
| 2001 |
+
.code-line.typing {
|
| 2002 |
+
animation: typeIn 0.8s ease-out forwards;
|
| 2003 |
+
}
|
| 2004 |
+
|
| 2005 |
+
.code-line.delay-1 { animation-delay: 0.8s; }
|
| 2006 |
+
.code-line.delay-2 { animation-delay: 1.6s; }
|
| 2007 |
+
.code-line.delay-3 { animation-delay: 2.4s; }
|
| 2008 |
+
.code-line.delay-4 { animation-delay: 3.2s; }
|
| 2009 |
+
|
| 2010 |
+
@keyframes typeIn {
|
| 2011 |
+
0% {
|
| 2012 |
+
opacity: 0;
|
| 2013 |
+
transform: translateX(-10px);
|
| 2014 |
+
}
|
| 2015 |
+
100% {
|
| 2016 |
+
opacity: 1;
|
| 2017 |
+
transform: translateX(0);
|
| 2018 |
+
}
|
| 2019 |
+
}
|
| 2020 |
+
|
| 2021 |
+
/* Agent Grid */
|
| 2022 |
+
.agent-grid {
|
| 2023 |
+
display: grid;
|
| 2024 |
+
grid-template-columns: repeat(2, 1fr);
|
| 2025 |
+
gap: 1rem;
|
| 2026 |
+
margin-top: 1.5rem;
|
| 2027 |
+
}
|
| 2028 |
+
|
| 2029 |
+
.agent-card {
|
| 2030 |
+
padding: 1rem;
|
| 2031 |
+
background: rgba(186, 104, 200, 0.1);
|
| 2032 |
+
border: 1px solid rgba(186, 104, 200, 0.3);
|
| 2033 |
+
border-radius: 12px;
|
| 2034 |
+
text-align: center;
|
| 2035 |
+
transition: all 0.3s ease;
|
| 2036 |
+
}
|
| 2037 |
+
|
| 2038 |
+
.agent-card.active {
|
| 2039 |
+
background: rgba(186, 104, 200, 0.2);
|
| 2040 |
+
border-color: var(--neon-purple);
|
| 2041 |
+
box-shadow: 0 0 15px rgba(186, 104, 200, 0.3);
|
| 2042 |
+
}
|
| 2043 |
+
|
| 2044 |
+
.agent-avatar {
|
| 2045 |
+
font-size: 1.5rem;
|
| 2046 |
+
margin-bottom: 0.5rem;
|
| 2047 |
+
}
|
| 2048 |
+
|
| 2049 |
+
.agent-card h4 {
|
| 2050 |
+
color: var(--text-primary) !important;
|
| 2051 |
+
font-size: 0.9rem !important;
|
| 2052 |
+
font-weight: 600 !important;
|
| 2053 |
+
margin-bottom: 0.3rem !important;
|
| 2054 |
+
}
|
| 2055 |
+
|
| 2056 |
+
.agent-card p {
|
| 2057 |
+
color: var(--text-secondary) !important;
|
| 2058 |
+
font-size: 0.75rem !important;
|
| 2059 |
+
margin: 0 !important;
|
| 2060 |
+
}
|
| 2061 |
+
|
| 2062 |
+
/* Collaboration Visualization */
|
| 2063 |
+
.collaboration-viz {
|
| 2064 |
+
position: relative;
|
| 2065 |
+
width: 300px;
|
| 2066 |
+
height: 300px;
|
| 2067 |
+
margin: 0 auto;
|
| 2068 |
+
}
|
| 2069 |
+
|
| 2070 |
+
.collaboration-center {
|
| 2071 |
+
position: absolute;
|
| 2072 |
+
top: 50%;
|
| 2073 |
+
left: 50%;
|
| 2074 |
+
transform: translate(-50%, -50%);
|
| 2075 |
+
text-align: center;
|
| 2076 |
+
z-index: 2;
|
| 2077 |
+
}
|
| 2078 |
+
|
| 2079 |
+
.center-node {
|
| 2080 |
+
width: 60px;
|
| 2081 |
+
height: 60px;
|
| 2082 |
+
background: linear-gradient(135deg, var(--neon-purple), var(--neon-cyan));
|
| 2083 |
+
border-radius: 50%;
|
| 2084 |
+
display: flex;
|
| 2085 |
+
align-items: center;
|
| 2086 |
+
justify-content: center;
|
| 2087 |
+
font-size: 1.5rem;
|
| 2088 |
+
margin: 0 auto 0.5rem;
|
| 2089 |
+
animation: centerRotate 4s linear infinite;
|
| 2090 |
+
}
|
| 2091 |
+
|
| 2092 |
+
.collaboration-center span {
|
| 2093 |
+
color: var(--text-primary) !important;
|
| 2094 |
+
font-size: 0.8rem !important;
|
| 2095 |
+
font-weight: 600 !important;
|
| 2096 |
+
}
|
| 2097 |
+
|
| 2098 |
+
.collaboration-agents {
|
| 2099 |
+
position: relative;
|
| 2100 |
+
width: 100%;
|
| 2101 |
+
height: 100%;
|
| 2102 |
+
}
|
| 2103 |
+
|
| 2104 |
+
.collab-agent {
|
| 2105 |
+
position: absolute;
|
| 2106 |
+
width: 50px;
|
| 2107 |
+
height: 50px;
|
| 2108 |
+
background: rgba(77, 208, 225, 0.2);
|
| 2109 |
+
border: 2px solid var(--neon-cyan);
|
| 2110 |
+
border-radius: 50%;
|
| 2111 |
+
display: flex;
|
| 2112 |
+
align-items: center;
|
| 2113 |
+
justify-content: center;
|
| 2114 |
+
font-size: 1.2rem;
|
| 2115 |
+
}
|
| 2116 |
+
|
| 2117 |
+
.pulse-ring {
|
| 2118 |
+
position: absolute;
|
| 2119 |
+
width: 60px;
|
| 2120 |
+
height: 60px;
|
| 2121 |
+
border: 2px solid var(--neon-cyan);
|
| 2122 |
+
border-radius: 50%;
|
| 2123 |
+
top: -5px;
|
| 2124 |
+
left: -5px;
|
| 2125 |
+
animation: pulseRing 2s ease-out infinite;
|
| 2126 |
+
}
|
| 2127 |
+
|
| 2128 |
+
.agent-pos-1 {
|
| 2129 |
+
top: 20px;
|
| 2130 |
+
left: 50%;
|
| 2131 |
+
transform: translateX(-50%);
|
| 2132 |
+
animation-delay: 0s;
|
| 2133 |
+
}
|
| 2134 |
+
|
| 2135 |
+
.agent-pos-2 {
|
| 2136 |
+
top: 50%;
|
| 2137 |
+
right: 20px;
|
| 2138 |
+
transform: translateY(-50%);
|
| 2139 |
+
animation-delay: 0.5s;
|
| 2140 |
+
}
|
| 2141 |
+
|
| 2142 |
+
.agent-pos-3 {
|
| 2143 |
+
bottom: 20px;
|
| 2144 |
+
left: 50%;
|
| 2145 |
+
transform: translateX(-50%);
|
| 2146 |
+
animation-delay: 1s;
|
| 2147 |
+
}
|
| 2148 |
+
|
| 2149 |
+
.agent-pos-4 {
|
| 2150 |
+
top: 50%;
|
| 2151 |
+
left: 20px;
|
| 2152 |
+
transform: translateY(-50%);
|
| 2153 |
+
animation-delay: 1.5s;
|
| 2154 |
+
}
|
| 2155 |
+
|
| 2156 |
+
@keyframes centerRotate {
|
| 2157 |
+
0% { transform: translate(-50%, -50%) rotate(0deg); }
|
| 2158 |
+
100% { transform: translate(-50%, -50%) rotate(360deg); }
|
| 2159 |
+
}
|
| 2160 |
+
|
| 2161 |
+
@keyframes pulseRing {
|
| 2162 |
+
0% {
|
| 2163 |
+
transform: scale(0.8);
|
| 2164 |
+
opacity: 1;
|
| 2165 |
+
}
|
| 2166 |
+
100% {
|
| 2167 |
+
transform: scale(1.4);
|
| 2168 |
+
opacity: 0;
|
| 2169 |
+
}
|
| 2170 |
+
}
|
| 2171 |
+
|
| 2172 |
+
/* Vision Demo */
|
| 2173 |
+
.vision-demo {
|
| 2174 |
+
margin-top: 1.5rem;
|
| 2175 |
+
padding: 1.5rem;
|
| 2176 |
+
background: rgba(129, 199, 132, 0.1);
|
| 2177 |
+
border: 1px solid rgba(129, 199, 132, 0.3);
|
| 2178 |
+
border-radius: 15px;
|
| 2179 |
+
}
|
| 2180 |
+
|
| 2181 |
+
.demo-input {
|
| 2182 |
+
display: flex;
|
| 2183 |
+
align-items: center;
|
| 2184 |
+
gap: 1rem;
|
| 2185 |
+
padding: 1rem;
|
| 2186 |
+
background: rgba(0, 0, 0, 0.3);
|
| 2187 |
+
border-radius: 10px;
|
| 2188 |
+
margin-bottom: 1rem;
|
| 2189 |
+
}
|
| 2190 |
+
|
| 2191 |
+
.input-icon {
|
| 2192 |
+
font-size: 1.5rem;
|
| 2193 |
+
}
|
| 2194 |
+
|
| 2195 |
+
.input-text {
|
| 2196 |
+
flex: 1;
|
| 2197 |
+
color: var(--neon-green) !important;
|
| 2198 |
+
font-family: 'JetBrains Mono', monospace !important;
|
| 2199 |
+
font-size: 0.9rem !important;
|
| 2200 |
+
}
|
| 2201 |
+
|
| 2202 |
+
.input-text.typing {
|
| 2203 |
+
border-right: 2px solid var(--neon-green);
|
| 2204 |
+
animation: inputTyping 4s steps(60, end), inputBlink 1s infinite;
|
| 2205 |
+
}
|
| 2206 |
+
|
| 2207 |
+
@keyframes inputTyping {
|
| 2208 |
+
from { width: 0; }
|
| 2209 |
+
to { width: 100%; }
|
| 2210 |
+
}
|
| 2211 |
+
|
| 2212 |
+
@keyframes inputBlink {
|
| 2213 |
+
0%, 50% { border-color: var(--neon-green); }
|
| 2214 |
+
51%, 100% { border-color: transparent; }
|
| 2215 |
+
}
|
| 2216 |
+
|
| 2217 |
+
.demo-arrow {
|
| 2218 |
+
text-align: center;
|
| 2219 |
+
font-size: 1.5rem;
|
| 2220 |
+
color: var(--neon-green);
|
| 2221 |
+
margin: 1rem 0;
|
| 2222 |
+
animation: arrowBounce 2s ease-in-out infinite;
|
| 2223 |
+
}
|
| 2224 |
+
|
| 2225 |
+
@keyframes arrowBounce {
|
| 2226 |
+
0%, 100% { transform: translateY(0); }
|
| 2227 |
+
50% { transform: translateY(5px); }
|
| 2228 |
+
}
|
| 2229 |
+
|
| 2230 |
+
.demo-output {
|
| 2231 |
+
padding: 1rem;
|
| 2232 |
+
background: rgba(0, 0, 0, 0.3);
|
| 2233 |
+
border-radius: 10px;
|
| 2234 |
+
}
|
| 2235 |
+
|
| 2236 |
+
.output-items {
|
| 2237 |
+
display: flex;
|
| 2238 |
+
flex-direction: column;
|
| 2239 |
+
gap: 0.5rem;
|
| 2240 |
+
}
|
| 2241 |
+
|
| 2242 |
+
.output-item {
|
| 2243 |
+
padding: 0.5rem 1rem;
|
| 2244 |
+
background: rgba(129, 199, 132, 0.2);
|
| 2245 |
+
border: 1px solid rgba(129, 199, 132, 0.4);
|
| 2246 |
+
border-radius: 8px;
|
| 2247 |
+
color: var(--neon-green) !important;
|
| 2248 |
+
font-size: 0.85rem !important;
|
| 2249 |
+
animation: itemAppear 0.8s ease-out forwards;
|
| 2250 |
+
opacity: 0;
|
| 2251 |
+
}
|
| 2252 |
+
|
| 2253 |
+
.output-item:nth-child(1) { animation-delay: 0.5s; }
|
| 2254 |
+
.output-item:nth-child(2) { animation-delay: 1s; }
|
| 2255 |
+
.output-item:nth-child(3) { animation-delay: 1.5s; }
|
| 2256 |
+
.output-item:nth-child(4) { animation-delay: 2s; }
|
| 2257 |
+
|
| 2258 |
+
@keyframes itemAppear {
|
| 2259 |
+
0% {
|
| 2260 |
+
opacity: 0;
|
| 2261 |
+
transform: translateX(-20px);
|
| 2262 |
+
}
|
| 2263 |
+
100% {
|
| 2264 |
+
opacity: 1;
|
| 2265 |
+
transform: translateX(0);
|
| 2266 |
+
}
|
| 2267 |
+
}
|
| 2268 |
+
|
| 2269 |
+
/* Future Timeline */
|
| 2270 |
+
.future-timeline {
|
| 2271 |
+
display: flex;
|
| 2272 |
+
flex-direction: column;
|
| 2273 |
+
gap: 1.5rem;
|
| 2274 |
+
padding: 1rem;
|
| 2275 |
+
max-width: 300px;
|
| 2276 |
+
}
|
| 2277 |
+
|
| 2278 |
+
.timeline-item {
|
| 2279 |
+
display: flex;
|
| 2280 |
+
align-items: center;
|
| 2281 |
+
gap: 1rem;
|
| 2282 |
+
padding: 1rem;
|
| 2283 |
+
border-radius: 12px;
|
| 2284 |
+
transition: all 0.3s ease;
|
| 2285 |
+
}
|
| 2286 |
+
|
| 2287 |
+
.timeline-item.completed {
|
| 2288 |
+
background: rgba(77, 208, 225, 0.1);
|
| 2289 |
+
border: 1px solid rgba(77, 208, 225, 0.3);
|
| 2290 |
+
}
|
| 2291 |
+
|
| 2292 |
+
.timeline-item.active {
|
| 2293 |
+
background: rgba(129, 199, 132, 0.1);
|
| 2294 |
+
border: 1px solid rgba(129, 199, 132, 0.3);
|
| 2295 |
+
box-shadow: 0 0 15px rgba(129, 199, 132, 0.3);
|
| 2296 |
+
}
|
| 2297 |
+
|
| 2298 |
+
.timeline-item.future {
|
| 2299 |
+
background: rgba(186, 104, 200, 0.1);
|
| 2300 |
+
border: 1px solid rgba(186, 104, 200, 0.3);
|
| 2301 |
+
opacity: 0.7;
|
| 2302 |
+
}
|
| 2303 |
+
|
| 2304 |
+
.timeline-marker {
|
| 2305 |
+
width: 40px;
|
| 2306 |
+
height: 40px;
|
| 2307 |
+
background: var(--card-bg);
|
| 2308 |
+
border-radius: 50%;
|
| 2309 |
+
display: flex;
|
| 2310 |
+
align-items: center;
|
| 2311 |
+
justify-content: center;
|
| 2312 |
+
font-size: 1.2rem;
|
| 2313 |
+
flex-shrink: 0;
|
| 2314 |
+
}
|
| 2315 |
+
|
| 2316 |
+
.timeline-content h4 {
|
| 2317 |
+
color: var(--text-primary) !important;
|
| 2318 |
+
font-size: 1rem !important;
|
| 2319 |
+
font-weight: 600 !important;
|
| 2320 |
+
margin-bottom: 0.2rem !important;
|
| 2321 |
+
}
|
| 2322 |
+
|
| 2323 |
+
.timeline-content p {
|
| 2324 |
+
color: var(--text-secondary) !important;
|
| 2325 |
+
font-size: 0.8rem !important;
|
| 2326 |
+
margin: 0 !important;
|
| 2327 |
+
}
|
| 2328 |
+
|
| 2329 |
+
/* Community Features */
|
| 2330 |
+
.community-features {
|
| 2331 |
+
display: flex;
|
| 2332 |
+
flex-direction: column;
|
| 2333 |
+
gap: 1rem;
|
| 2334 |
+
margin-top: 1.5rem;
|
| 2335 |
+
}
|
| 2336 |
+
|
| 2337 |
+
.community-feature {
|
| 2338 |
+
display: flex;
|
| 2339 |
+
align-items: flex-start;
|
| 2340 |
+
gap: 1rem;
|
| 2341 |
+
padding: 1rem;
|
| 2342 |
+
background: rgba(100, 181, 246, 0.1);
|
| 2343 |
+
border: 1px solid rgba(100, 181, 246, 0.3);
|
| 2344 |
+
border-radius: 12px;
|
| 2345 |
+
transition: all 0.3s ease;
|
| 2346 |
+
}
|
| 2347 |
+
|
| 2348 |
+
.community-feature:hover {
|
| 2349 |
+
background: rgba(100, 181, 246, 0.15);
|
| 2350 |
+
border-color: var(--neon-blue);
|
| 2351 |
+
}
|
| 2352 |
+
|
| 2353 |
+
.feature-icon-small {
|
| 2354 |
+
font-size: 1.5rem;
|
| 2355 |
+
flex-shrink: 0;
|
| 2356 |
+
margin-top: 0.2rem;
|
| 2357 |
+
}
|
| 2358 |
+
|
| 2359 |
+
.feature-text h4 {
|
| 2360 |
+
color: var(--text-primary) !important;
|
| 2361 |
+
font-size: 1rem !important;
|
| 2362 |
+
font-weight: 600 !important;
|
| 2363 |
+
margin-bottom: 0.3rem !important;
|
| 2364 |
+
}
|
| 2365 |
+
|
| 2366 |
+
.feature-text p {
|
| 2367 |
+
color: var(--text-secondary) !important;
|
| 2368 |
+
font-size: 0.85rem !important;
|
| 2369 |
+
line-height: 1.4 !important;
|
| 2370 |
+
margin: 0 !important;
|
| 2371 |
+
}
|
| 2372 |
+
|
| 2373 |
+
/* Tech Ecosystem */
|
| 2374 |
+
.tech-ecosystem {
|
| 2375 |
+
position: relative;
|
| 2376 |
+
width: 300px;
|
| 2377 |
+
height: 300px;
|
| 2378 |
+
margin: 0 auto;
|
| 2379 |
+
}
|
| 2380 |
+
|
| 2381 |
+
.ecosystem-center {
|
| 2382 |
+
position: absolute;
|
| 2383 |
+
top: 50%;
|
| 2384 |
+
left: 50%;
|
| 2385 |
+
transform: translate(-50%, -50%);
|
| 2386 |
+
text-align: center;
|
| 2387 |
+
z-index: 2;
|
| 2388 |
+
}
|
| 2389 |
+
|
| 2390 |
+
.center-logo {
|
| 2391 |
+
width: 60px;
|
| 2392 |
+
height: 60px;
|
| 2393 |
+
background: linear-gradient(135deg, var(--neon-blue), var(--neon-cyan));
|
| 2394 |
+
border-radius: 50%;
|
| 2395 |
+
display: flex;
|
| 2396 |
+
align-items: center;
|
| 2397 |
+
justify-content: center;
|
| 2398 |
+
font-size: 1.5rem;
|
| 2399 |
+
margin: 0 auto 0.5rem;
|
| 2400 |
+
animation: logoFloat 3s ease-in-out infinite;
|
| 2401 |
+
}
|
| 2402 |
+
|
| 2403 |
+
.ecosystem-center span {
|
| 2404 |
+
color: var(--text-primary) !important;
|
| 2405 |
+
font-size: 0.9rem !important;
|
| 2406 |
+
font-weight: 600 !important;
|
| 2407 |
+
}
|
| 2408 |
+
|
| 2409 |
+
.ecosystem-ring {
|
| 2410 |
+
position: relative;
|
| 2411 |
+
width: 100%;
|
| 2412 |
+
height: 100%;
|
| 2413 |
+
}
|
| 2414 |
+
|
| 2415 |
+
.ecosystem-item {
|
| 2416 |
+
position: absolute;
|
| 2417 |
+
display: flex;
|
| 2418 |
+
flex-direction: column;
|
| 2419 |
+
align-items: center;
|
| 2420 |
+
gap: 0.3rem;
|
| 2421 |
+
padding: 0.8rem;
|
| 2422 |
+
background: rgba(100, 181, 246, 0.1);
|
| 2423 |
+
border: 1px solid rgba(100, 181, 246, 0.3);
|
| 2424 |
+
border-radius: 12px;
|
| 2425 |
+
animation: ecosystemOrbit 8s linear infinite;
|
| 2426 |
+
}
|
| 2427 |
+
|
| 2428 |
+
.ecosystem-item.item-1 {
|
| 2429 |
+
top: 10px;
|
| 2430 |
+
left: 50%;
|
| 2431 |
+
transform: translateX(-50%);
|
| 2432 |
+
}
|
| 2433 |
+
|
| 2434 |
+
.ecosystem-item.item-2 {
|
| 2435 |
+
top: 50%;
|
| 2436 |
+
right: 10px;
|
| 2437 |
+
transform: translateY(-50%);
|
| 2438 |
+
}
|
| 2439 |
+
|
| 2440 |
+
.ecosystem-item.item-3 {
|
| 2441 |
+
bottom: 10px;
|
| 2442 |
+
left: 50%;
|
| 2443 |
+
transform: translateX(-50%);
|
| 2444 |
+
}
|
| 2445 |
+
|
| 2446 |
+
.ecosystem-item.item-4 {
|
| 2447 |
+
top: 50%;
|
| 2448 |
+
left: 10px;
|
| 2449 |
+
transform: translateY(-50%);
|
| 2450 |
+
}
|
| 2451 |
+
|
| 2452 |
+
.item-icon {
|
| 2453 |
+
font-size: 1.2rem;
|
| 2454 |
+
}
|
| 2455 |
+
|
| 2456 |
+
.ecosystem-item span {
|
| 2457 |
+
color: var(--text-primary) !important;
|
| 2458 |
+
font-size: 0.7rem !important;
|
| 2459 |
+
font-weight: 600 !important;
|
| 2460 |
+
text-align: center;
|
| 2461 |
+
}
|
| 2462 |
+
|
| 2463 |
+
@keyframes logoFloat {
|
| 2464 |
+
0%, 100% {
|
| 2465 |
+
transform: translate(-50%, -50%) scale(1);
|
| 2466 |
+
}
|
| 2467 |
+
50% {
|
| 2468 |
+
transform: translate(-50%, -50%) scale(1.05);
|
| 2469 |
+
}
|
| 2470 |
+
}
|
| 2471 |
+
|
| 2472 |
+
@keyframes ecosystemOrbit {
|
| 2473 |
+
0% {
|
| 2474 |
+
box-shadow: 0 0 10px rgba(100, 181, 246, 0.3);
|
| 2475 |
+
}
|
| 2476 |
+
50% {
|
| 2477 |
+
box-shadow: 0 0 20px rgba(100, 181, 246, 0.5);
|
| 2478 |
+
}
|
| 2479 |
+
100% {
|
| 2480 |
+
box-shadow: 0 0 10px rgba(100, 181, 246, 0.3);
|
| 2481 |
+
}
|
| 2482 |
+
}
|
| 2483 |
+
|
| 2484 |
+
/* Responsive adjustments for vertical cards */
|
| 2485 |
+
@media (max-width: 768px) {
|
| 2486 |
+
.feature-content {
|
| 2487 |
+
flex-direction: column;
|
| 2488 |
+
gap: 2rem;
|
| 2489 |
+
}
|
| 2490 |
+
|
| 2491 |
+
.feature-header {
|
| 2492 |
+
flex-direction: column;
|
| 2493 |
+
text-align: center;
|
| 2494 |
+
gap: 1rem;
|
| 2495 |
+
}
|
| 2496 |
+
|
| 2497 |
+
.feature-stats {
|
| 2498 |
+
flex-direction: row;
|
| 2499 |
+
justify-content: center;
|
| 2500 |
+
}
|
| 2501 |
+
|
| 2502 |
+
.collaboration-viz,
|
| 2503 |
+
.tech-ecosystem {
|
| 2504 |
+
width: 250px;
|
| 2505 |
+
height: 250px;
|
| 2506 |
+
}
|
| 2507 |
+
}
|
| 2508 |
+
|
| 2509 |
+
/* Additional sidebar styles for newer Streamlit versions */
|
| 2510 |
+
/* Cover any new sidebar containers or data-testid selectors */
|
| 2511 |
+
[data-testid="stSidebar"],
|
| 2512 |
+
[data-testid="stSidebarNav"],
|
| 2513 |
+
[data-testid="stSidebarNavItems"],
|
| 2514 |
+
.stSidebar,
|
| 2515 |
+
.sidebar,
|
| 2516 |
+
.sidebar-content,
|
| 2517 |
+
section[data-testid="stSidebar"] {
|
| 2518 |
+
background: linear-gradient(180deg, #0d1117 0%, #161b22 50%, #21262d 100%) !important;
|
| 2519 |
+
border-right: 2px solid var(--neon-cyan) !important;
|
| 2520 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.3) !important;
|
| 2521 |
+
color: var(--text-primary) !important;
|
| 2522 |
+
}
|
| 2523 |
+
|
| 2524 |
+
/* Light mode override for new sidebar selectors */
|
| 2525 |
+
@media (prefers-color-scheme: light) {
|
| 2526 |
+
[data-testid="stSidebar"],
|
| 2527 |
+
[data-testid="stSidebarNav"],
|
| 2528 |
+
[data-testid="stSidebarNavItems"],
|
| 2529 |
+
.stSidebar,
|
| 2530 |
+
.sidebar,
|
| 2531 |
+
.sidebar-content,
|
| 2532 |
+
section[data-testid="stSidebar"] {
|
| 2533 |
+
background: linear-gradient(180deg, #0d1117 0%, #161b22 50%, #21262d 100%) !important;
|
| 2534 |
+
border-right: 2px solid var(--neon-cyan) !important;
|
| 2535 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.3) !important;
|
| 2536 |
+
color: var(--text-primary) !important;
|
| 2537 |
+
}
|
| 2538 |
+
}
|
| 2539 |
+
|
| 2540 |
+
/* Alternative light theme detection for new sidebar selectors */
|
| 2541 |
+
[data-theme="light"] [data-testid="stSidebar"],
|
| 2542 |
+
[data-theme="light"] [data-testid="stSidebarNav"],
|
| 2543 |
+
[data-theme="light"] [data-testid="stSidebarNavItems"],
|
| 2544 |
+
[data-theme="light"] .stSidebar,
|
| 2545 |
+
[data-theme="light"] .sidebar,
|
| 2546 |
+
[data-theme="light"] .sidebar-content,
|
| 2547 |
+
[data-theme="light"] section[data-testid="stSidebar"] {
|
| 2548 |
+
background: linear-gradient(180deg, #0d1117 0%, #161b22 50%, #21262d 100%) !important;
|
| 2549 |
+
border-right: 2px solid var(--neon-cyan) !important;
|
| 2550 |
+
box-shadow: 0 0 20px rgba(77, 208, 225, 0.3) !important;
|
| 2551 |
+
color: var(--text-primary) !important;
|
| 2552 |
+
}
|
| 2553 |
+
|
| 2554 |
+
/* Force all text in sidebar containers to use dark theme colors */
|
| 2555 |
+
[data-testid="stSidebar"] *,
|
| 2556 |
+
[data-testid="stSidebarNav"] *,
|
| 2557 |
+
[data-testid="stSidebarNavItems"] *,
|
| 2558 |
+
.stSidebar *,
|
| 2559 |
+
.sidebar *,
|
| 2560 |
+
.sidebar-content *,
|
| 2561 |
+
section[data-testid="stSidebar"] * {
|
| 2562 |
+
color: var(--text-primary) !important;
|
| 2563 |
+
}
|
| 2564 |
+
|
| 2565 |
+
/* Light mode: Force all text in sidebar containers to use dark theme colors */
|
| 2566 |
+
@media (prefers-color-scheme: light) {
|
| 2567 |
+
[data-testid="stSidebar"] *,
|
| 2568 |
+
[data-testid="stSidebarNav"] *,
|
| 2569 |
+
[data-testid="stSidebarNavItems"] *,
|
| 2570 |
+
.stSidebar *,
|
| 2571 |
+
.sidebar *,
|
| 2572 |
+
.sidebar-content *,
|
| 2573 |
+
section[data-testid="stSidebar"] * {
|
| 2574 |
+
color: var(--text-primary) !important;
|
| 2575 |
+
}
|
| 2576 |
+
}
|
| 2577 |
+
|
| 2578 |
+
/* Alternative light theme detection for sidebar text */
|
| 2579 |
+
[data-theme="light"] [data-testid="stSidebar"] *,
|
| 2580 |
+
[data-theme="light"] [data-testid="stSidebarNav"] *,
|
| 2581 |
+
[data-theme="light"] [data-testid="stSidebarNavItems"] *,
|
| 2582 |
+
[data-theme="light"] .stSidebar *,
|
| 2583 |
+
[data-theme="light"] .sidebar *,
|
| 2584 |
+
[data-theme="light"] .sidebar-content *,
|
| 2585 |
+
[data-theme="light"] section[data-testid="stSidebar"] * {
|
| 2586 |
+
color: var(--text-primary) !important;
|
| 2587 |
+
}
|
| 2588 |
+
|
| 2589 |
+
</style>
|
| 2590 |
+
"""
|
projects/ui/DeepCode/utils/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utils package for paper processing tools.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from .file_processor import FileProcessor
|
| 6 |
+
from .dialogue_logger import (
|
| 7 |
+
DialogueLogger,
|
| 8 |
+
create_dialogue_logger,
|
| 9 |
+
extract_paper_id_from_path,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"FileProcessor",
|
| 14 |
+
"DialogueLogger",
|
| 15 |
+
"create_dialogue_logger",
|
| 16 |
+
"extract_paper_id_from_path",
|
| 17 |
+
]
|
projects/ui/DeepCode/utils/cli_interface.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Professional CLI Interface Module
|
| 4 |
+
专业CLI界面模块 - 包含logo、颜色定义和界面组件
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import time
|
| 9 |
+
import platform
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Optional
|
| 12 |
+
import tkinter as tk
|
| 13 |
+
from tkinter import filedialog
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Colors:
|
| 17 |
+
"""ANSI color codes for terminal styling"""
|
| 18 |
+
|
| 19 |
+
HEADER = "\033[95m"
|
| 20 |
+
OKBLUE = "\033[94m"
|
| 21 |
+
OKCYAN = "\033[96m"
|
| 22 |
+
OKGREEN = "\033[92m"
|
| 23 |
+
WARNING = "\033[93m"
|
| 24 |
+
FAIL = "\033[91m"
|
| 25 |
+
ENDC = "\033[0m"
|
| 26 |
+
BOLD = "\033[1m"
|
| 27 |
+
UNDERLINE = "\033[4m"
|
| 28 |
+
|
| 29 |
+
# Gradient colors
|
| 30 |
+
PURPLE = "\033[35m"
|
| 31 |
+
MAGENTA = "\033[95m"
|
| 32 |
+
BLUE = "\033[34m"
|
| 33 |
+
CYAN = "\033[36m"
|
| 34 |
+
GREEN = "\033[32m"
|
| 35 |
+
YELLOW = "\033[33m"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class CLIInterface:
|
| 39 |
+
"""Professional CLI interface with modern styling"""
|
| 40 |
+
|
| 41 |
+
def __init__(self):
|
| 42 |
+
self.uploaded_file = None
|
| 43 |
+
self.is_running = True
|
| 44 |
+
|
| 45 |
+
# Check tkinter availability
|
| 46 |
+
self.tkinter_available = True
|
| 47 |
+
try:
|
| 48 |
+
import tkinter as tk
|
| 49 |
+
|
| 50 |
+
# Test if tkinter can create a window (some systems have tkinter but no display)
|
| 51 |
+
test_root = tk.Tk()
|
| 52 |
+
test_root.withdraw()
|
| 53 |
+
test_root.destroy()
|
| 54 |
+
except Exception:
|
| 55 |
+
self.tkinter_available = False
|
| 56 |
+
|
| 57 |
+
def clear_screen(self):
|
| 58 |
+
"""Clear terminal screen"""
|
| 59 |
+
os.system("cls" if os.name == "nt" else "clear")
|
| 60 |
+
|
| 61 |
+
def print_logo(self):
|
| 62 |
+
"""Print a beautiful ASCII logo with gradient colors and tech elements"""
|
| 63 |
+
# 确保每行总共79个字符(不包括颜色代码),边框完美对齐
|
| 64 |
+
logo = f"""
|
| 65 |
+
{Colors.CYAN}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 66 |
+
║ ║
|
| 67 |
+
║ {Colors.BOLD}{Colors.MAGENTA}██████╗ ███████╗██████╗ ██████╗ ██████╗ █████╗ ██╗{Colors.CYAN} ║
|
| 68 |
+
║ {Colors.BOLD}{Colors.PURPLE}██╔══██╗ ██╔════╝██╔══██╗██╔══██╗██╔═══██╗ ██╔══██╗██║{Colors.CYAN} ║
|
| 69 |
+
║ {Colors.BOLD}{Colors.BLUE}██████╔╝ █████╗ ██████╔╝██████╔╝██║ ██║ ███████║██║{Colors.CYAN} ║
|
| 70 |
+
║ {Colors.BOLD}{Colors.OKBLUE}██╔══██╗ ██╔══╝ ██╔═══╝ ██╔══██╗██║ ██║ ██╔══██║██║{Colors.CYAN} ║
|
| 71 |
+
║ {Colors.BOLD}{Colors.OKCYAN}██║ ██║ ███████╗██║ ██║ ██║╚██████╔╝ ██║ ██║██║{Colors.CYAN} ║
|
| 72 |
+
║ {Colors.BOLD}{Colors.GREEN}╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝{Colors.CYAN} ║
|
| 73 |
+
║ ║
|
| 74 |
+
║ {Colors.BOLD}{Colors.YELLOW}┌─────────────────────────────────────────────────────────────────────────┐{Colors.CYAN} ║
|
| 75 |
+
║ {Colors.BOLD}{Colors.YELLOW}│ 🤖 AI-POWERED RESEARCH PAPER REPRODUCTION ENGINE 🚀 │{Colors.CYAN} ║
|
| 76 |
+
║ {Colors.BOLD}{Colors.YELLOW}│ ⚡ INTELLIGENT • AUTOMATED • CUTTING-EDGE ⚡ │{Colors.CYAN} ║
|
| 77 |
+
║ {Colors.BOLD}{Colors.YELLOW}└─────────────────────────────────────────────────────────────────────────┘{Colors.CYAN} ║
|
| 78 |
+
║ ║
|
| 79 |
+
║ {Colors.BOLD}{Colors.GREEN}💎 CORE CAPABILITIES:{Colors.ENDC} {Colors.CYAN}║
|
| 80 |
+
║ {Colors.BOLD}{Colors.OKCYAN}▶ Neural PDF Analysis & Code Extraction {Colors.CYAN}║
|
| 81 |
+
║ {Colors.BOLD}{Colors.OKCYAN}▶ Advanced Document Processing Engine {Colors.CYAN}║
|
| 82 |
+
║ {Colors.BOLD}{Colors.OKCYAN}▶ Multi-Format Support (PDF•DOCX•PPTX•HTML) {Colors.CYAN}║
|
| 83 |
+
║ {Colors.BOLD}{Colors.OKCYAN}▶ Smart File Upload Interface {Colors.CYAN}║
|
| 84 |
+
║ {Colors.BOLD}{Colors.OKCYAN}▶ Automated Repository Management {Colors.CYAN}║
|
| 85 |
+
║ ║
|
| 86 |
+
║ {Colors.BOLD}{Colors.PURPLE}🔬 TECH STACK: Python•AI•MCP•Docling•LLM {Colors.CYAN}║
|
| 87 |
+
║ ║
|
| 88 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 89 |
+
"""
|
| 90 |
+
print(logo)
|
| 91 |
+
|
| 92 |
+
def print_welcome_banner(self):
|
| 93 |
+
"""Print welcome banner with version info"""
|
| 94 |
+
banner = f"""
|
| 95 |
+
{Colors.BOLD}{Colors.CYAN}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 96 |
+
║ WELCOME TO ReproAI ║
|
| 97 |
+
╠═══════════════════════════════════════════════════════════════════════════════╣
|
| 98 |
+
║ ║
|
| 99 |
+
║ {Colors.YELLOW}Version: 2.0.0 | Build: Professional Edition {Colors.CYAN}║
|
| 100 |
+
║ {Colors.GREEN}Status: Ready | Engine: Initialized {Colors.CYAN}║
|
| 101 |
+
║ {Colors.PURPLE}Author: AI Research Team | License: MIT {Colors.CYAN}║
|
| 102 |
+
║ ║
|
| 103 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 104 |
+
"""
|
| 105 |
+
print(banner)
|
| 106 |
+
|
| 107 |
+
def print_separator(self, char="═", length=79, color=Colors.CYAN):
|
| 108 |
+
"""Print a styled separator line"""
|
| 109 |
+
print(f"{color}{char * length}{Colors.ENDC}")
|
| 110 |
+
|
| 111 |
+
def print_status(self, message: str, status_type: str = "info"):
|
| 112 |
+
"""Print status message with appropriate styling"""
|
| 113 |
+
status_styles = {
|
| 114 |
+
"success": f"{Colors.OKGREEN}✅",
|
| 115 |
+
"error": f"{Colors.FAIL}❌",
|
| 116 |
+
"warning": f"{Colors.WARNING}⚠️ ",
|
| 117 |
+
"info": f"{Colors.OKBLUE}ℹ️ ",
|
| 118 |
+
"processing": f"{Colors.YELLOW}⏳",
|
| 119 |
+
"upload": f"{Colors.PURPLE}📁",
|
| 120 |
+
"download": f"{Colors.CYAN}📥",
|
| 121 |
+
"analysis": f"{Colors.MAGENTA}🔍",
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
icon = status_styles.get(status_type, status_styles["info"])
|
| 125 |
+
print(f"{icon} {Colors.BOLD}{message}{Colors.ENDC}")
|
| 126 |
+
|
| 127 |
+
def create_menu(self):
|
| 128 |
+
"""Create an interactive menu"""
|
| 129 |
+
menu = f"""
|
| 130 |
+
{Colors.BOLD}{Colors.CYAN}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 131 |
+
║ MAIN MENU ║
|
| 132 |
+
╠═══════════════════════════════════════════════════════════════════════════════╣
|
| 133 |
+
║ ║
|
| 134 |
+
║ {Colors.OKGREEN}🌐 [U] Process URL {Colors.CYAN}│ {Colors.PURPLE}📁 [F] Upload File {Colors.CYAN}│ {Colors.FAIL}❌ [Q] Quit{Colors.CYAN} ║
|
| 135 |
+
║ ║
|
| 136 |
+
║ {Colors.YELLOW}📝 Enter a research paper URL (arXiv, IEEE, ACM, etc.) {Colors.CYAN}║
|
| 137 |
+
║ {Colors.YELLOW} or upload a PDF/DOC file for intelligent analysis {Colors.CYAN}║
|
| 138 |
+
║ ║
|
| 139 |
+
║ {Colors.OKCYAN}💡 Tip: Press 'F' to open file browser or 'U' to enter URL manually {Colors.CYAN}║
|
| 140 |
+
║ ║
|
| 141 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 142 |
+
"""
|
| 143 |
+
print(menu)
|
| 144 |
+
|
| 145 |
+
def get_user_input(self):
|
| 146 |
+
"""Get user input with styled prompt"""
|
| 147 |
+
print(f"\n{Colors.BOLD}{Colors.OKCYAN}➤ Your choice: {Colors.ENDC}", end="")
|
| 148 |
+
return input().strip().lower()
|
| 149 |
+
|
| 150 |
+
def upload_file_gui(self) -> Optional[str]:
|
| 151 |
+
"""Modern file upload interface using tkinter with cross-platform compatibility"""
|
| 152 |
+
# Check if tkinter is available
|
| 153 |
+
if not self.tkinter_available:
|
| 154 |
+
self.print_status("GUI file dialog not available on this system", "warning")
|
| 155 |
+
self.print_status("Using manual file path input instead", "info")
|
| 156 |
+
return self._get_manual_file_path()
|
| 157 |
+
|
| 158 |
+
def select_file():
|
| 159 |
+
try:
|
| 160 |
+
# Create a hidden root window
|
| 161 |
+
root = tk.Tk()
|
| 162 |
+
root.withdraw() # Hide the main window
|
| 163 |
+
|
| 164 |
+
# Platform-specific configurations
|
| 165 |
+
system = platform.system()
|
| 166 |
+
|
| 167 |
+
if system == "Darwin": # macOS
|
| 168 |
+
# macOS specific settings
|
| 169 |
+
try:
|
| 170 |
+
root.call("wm", "attributes", ".", "-topmost", True)
|
| 171 |
+
except Exception:
|
| 172 |
+
pass
|
| 173 |
+
|
| 174 |
+
# macOS compatible file types
|
| 175 |
+
file_types = [
|
| 176 |
+
("PDF Files", ".pdf"),
|
| 177 |
+
("Word Documents", ".docx .doc"),
|
| 178 |
+
("PowerPoint Files", ".pptx .ppt"),
|
| 179 |
+
("HTML Files", ".html .htm"),
|
| 180 |
+
("Text Files", ".txt .md"),
|
| 181 |
+
("All Files", ".*"),
|
| 182 |
+
]
|
| 183 |
+
else:
|
| 184 |
+
# Windows and Linux
|
| 185 |
+
root.attributes("-topmost", True)
|
| 186 |
+
|
| 187 |
+
# Windows/Linux compatible file types
|
| 188 |
+
file_types = [
|
| 189 |
+
("PDF Files", "*.pdf"),
|
| 190 |
+
("Word Documents", "*.docx;*.doc"),
|
| 191 |
+
("PowerPoint Files", "*.pptx;*.ppt"),
|
| 192 |
+
("HTML Files", "*.html;*.htm"),
|
| 193 |
+
("Text Files", "*.txt;*.md"),
|
| 194 |
+
("All Files", "*.*"),
|
| 195 |
+
]
|
| 196 |
+
|
| 197 |
+
# Set window title
|
| 198 |
+
root.title("Repro-AI - File Selector")
|
| 199 |
+
|
| 200 |
+
try:
|
| 201 |
+
# Open file dialog with platform-appropriate settings
|
| 202 |
+
file_path = filedialog.askopenfilename(
|
| 203 |
+
title="Select Research Paper File",
|
| 204 |
+
filetypes=file_types,
|
| 205 |
+
initialdir=os.getcwd(),
|
| 206 |
+
)
|
| 207 |
+
except Exception as e:
|
| 208 |
+
self.print_status(f"File dialog error: {str(e)}", "error")
|
| 209 |
+
return None
|
| 210 |
+
finally:
|
| 211 |
+
# Clean up
|
| 212 |
+
try:
|
| 213 |
+
root.destroy()
|
| 214 |
+
except Exception:
|
| 215 |
+
pass
|
| 216 |
+
|
| 217 |
+
return file_path
|
| 218 |
+
|
| 219 |
+
except Exception as e:
|
| 220 |
+
# Fallback: destroy root if it exists
|
| 221 |
+
try:
|
| 222 |
+
if "root" in locals():
|
| 223 |
+
root.destroy()
|
| 224 |
+
except Exception:
|
| 225 |
+
pass
|
| 226 |
+
|
| 227 |
+
# Print error and suggest alternative
|
| 228 |
+
self.print_status(f"GUI file dialog failed: {str(e)}", "error")
|
| 229 |
+
self.print_status(
|
| 230 |
+
"Please use manual file path input instead", "warning"
|
| 231 |
+
)
|
| 232 |
+
return self._get_manual_file_path()
|
| 233 |
+
|
| 234 |
+
self.print_status("Opening file browser dialog...", "upload")
|
| 235 |
+
file_path = select_file()
|
| 236 |
+
|
| 237 |
+
if file_path:
|
| 238 |
+
# Validate file
|
| 239 |
+
if not os.path.exists(file_path):
|
| 240 |
+
self.print_status("File not found!", "error")
|
| 241 |
+
return None
|
| 242 |
+
|
| 243 |
+
file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
|
| 244 |
+
file_ext = Path(file_path).suffix.lower()
|
| 245 |
+
|
| 246 |
+
# Display file info with beautiful formatting
|
| 247 |
+
file_name = Path(file_path).name
|
| 248 |
+
directory = str(Path(file_path).parent)
|
| 249 |
+
|
| 250 |
+
# Truncate long paths for display
|
| 251 |
+
if len(file_name) > 50:
|
| 252 |
+
display_name = file_name[:47] + "..."
|
| 253 |
+
else:
|
| 254 |
+
display_name = file_name
|
| 255 |
+
|
| 256 |
+
if len(directory) > 49:
|
| 257 |
+
display_dir = "..." + directory[-46:]
|
| 258 |
+
else:
|
| 259 |
+
display_dir = directory
|
| 260 |
+
|
| 261 |
+
print(f"""
|
| 262 |
+
{Colors.OKGREEN}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 263 |
+
║ FILE SELECTED ║
|
| 264 |
+
╠═══════════════════════════════════════════════════════════════════════════════╣
|
| 265 |
+
║ ║
|
| 266 |
+
║ {Colors.BOLD}📄 File Name:{Colors.ENDC} {Colors.CYAN}{display_name:<50}{Colors.OKGREEN}║
|
| 267 |
+
║ {Colors.BOLD}📁 Directory:{Colors.ENDC} {Colors.YELLOW}{display_dir:<49}{Colors.OKGREEN}║
|
| 268 |
+
║ {Colors.BOLD}📊 File Size:{Colors.ENDC} {Colors.PURPLE}{file_size:.2f} MB{Colors.OKGREEN} ║
|
| 269 |
+
║ {Colors.BOLD}🔖 File Type:{Colors.ENDC} {Colors.MAGENTA}{file_ext.upper():<50}{Colors.OKGREEN}║
|
| 270 |
+
║ ║
|
| 271 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 272 |
+
""")
|
| 273 |
+
|
| 274 |
+
self.print_status(f"File successfully selected: {file_name}", "success")
|
| 275 |
+
return file_path
|
| 276 |
+
else:
|
| 277 |
+
self.print_status("No file selected", "warning")
|
| 278 |
+
return None
|
| 279 |
+
|
| 280 |
+
def _get_manual_file_path(self) -> Optional[str]:
|
| 281 |
+
"""Fallback method for manual file path input when GUI fails"""
|
| 282 |
+
print(
|
| 283 |
+
f"\n{Colors.BOLD}{Colors.CYAN}╔═══════════════════════════════════════════════════════════════════════════════╗"
|
| 284 |
+
)
|
| 285 |
+
print(
|
| 286 |
+
"║ MANUAL FILE INPUT ║"
|
| 287 |
+
)
|
| 288 |
+
print(
|
| 289 |
+
f"╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
print(f"\n{Colors.YELLOW}📝 Supported file types:{Colors.ENDC}")
|
| 293 |
+
print(f" {Colors.CYAN}• PDF files (.pdf)")
|
| 294 |
+
print(f" {Colors.CYAN}• Word documents (.docx, .doc)")
|
| 295 |
+
print(f" {Colors.CYAN}• PowerPoint files (.pptx, .ppt)")
|
| 296 |
+
print(f" {Colors.CYAN}• HTML files (.html, .htm)")
|
| 297 |
+
print(f" {Colors.CYAN}• Text files (.txt, .md){Colors.ENDC}")
|
| 298 |
+
|
| 299 |
+
print(
|
| 300 |
+
f"\n{Colors.BOLD}{Colors.OKCYAN}📁 Enter file path (or drag & drop): {Colors.ENDC}",
|
| 301 |
+
end="",
|
| 302 |
+
)
|
| 303 |
+
file_path = input().strip()
|
| 304 |
+
|
| 305 |
+
# Clean up the path (remove quotes if present)
|
| 306 |
+
file_path = file_path.strip("\"'")
|
| 307 |
+
|
| 308 |
+
if file_path:
|
| 309 |
+
# Expand user directory if needed
|
| 310 |
+
file_path = os.path.expanduser(file_path)
|
| 311 |
+
|
| 312 |
+
# Check if file exists
|
| 313 |
+
if os.path.exists(file_path):
|
| 314 |
+
self.print_status(
|
| 315 |
+
f"File found: {os.path.basename(file_path)}", "success"
|
| 316 |
+
)
|
| 317 |
+
return file_path
|
| 318 |
+
else:
|
| 319 |
+
self.print_status("File not found at the specified path", "error")
|
| 320 |
+
return None
|
| 321 |
+
else:
|
| 322 |
+
self.print_status("No file path provided", "warning")
|
| 323 |
+
return None
|
| 324 |
+
|
| 325 |
+
def get_url_input(self) -> str:
|
| 326 |
+
"""Get URL input with validation and examples"""
|
| 327 |
+
print(
|
| 328 |
+
f"\n{Colors.BOLD}{Colors.CYAN}╔═══════════════════════════════════════════════════════════════════════════════╗"
|
| 329 |
+
)
|
| 330 |
+
print(
|
| 331 |
+
"║ URL INPUT ║"
|
| 332 |
+
)
|
| 333 |
+
print(
|
| 334 |
+
f"╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}"
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
print(f"\n{Colors.YELLOW}📝 Supported URL Examples:{Colors.ENDC}")
|
| 338 |
+
print(f" {Colors.CYAN}• arXiv: https://arxiv.org/pdf/2403.00813")
|
| 339 |
+
print(f" {Colors.CYAN}• arXiv: @https://arxiv.org/pdf/2403.00813")
|
| 340 |
+
print(f" {Colors.CYAN}• IEEE: https://ieeexplore.ieee.org/document/...")
|
| 341 |
+
print(f" {Colors.CYAN}• ACM: https://dl.acm.org/doi/...")
|
| 342 |
+
print(
|
| 343 |
+
f" {Colors.CYAN}• Direct PDF: https://example.com/paper.pdf{Colors.ENDC}"
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
print(
|
| 347 |
+
f"\n{Colors.BOLD}{Colors.OKCYAN}🌐 Enter paper URL: {Colors.ENDC}", end=""
|
| 348 |
+
)
|
| 349 |
+
url = input().strip()
|
| 350 |
+
|
| 351 |
+
if url:
|
| 352 |
+
# Basic URL validation
|
| 353 |
+
if any(
|
| 354 |
+
domain in url.lower()
|
| 355 |
+
for domain in ["arxiv.org", "ieee", "acm.org", ".pdf", "researchgate"]
|
| 356 |
+
):
|
| 357 |
+
self.print_status(f"URL received: {url}", "success")
|
| 358 |
+
return url
|
| 359 |
+
else:
|
| 360 |
+
self.print_status("URL appears valid, proceeding...", "info")
|
| 361 |
+
return url
|
| 362 |
+
else:
|
| 363 |
+
self.print_status("No URL provided", "warning")
|
| 364 |
+
return ""
|
| 365 |
+
|
| 366 |
+
def show_progress_bar(self, message: str, duration: float = 2.0):
|
| 367 |
+
"""Show a progress animation with enhanced styling"""
|
| 368 |
+
print(f"\n{Colors.YELLOW}{message}{Colors.ENDC}")
|
| 369 |
+
|
| 370 |
+
# Progress bar animation with different styles
|
| 371 |
+
bar_length = 50
|
| 372 |
+
for i in range(bar_length + 1):
|
| 373 |
+
percent = (i / bar_length) * 100
|
| 374 |
+
filled = "█" * i
|
| 375 |
+
empty = "░" * (bar_length - i)
|
| 376 |
+
|
| 377 |
+
# Color gradient effect
|
| 378 |
+
if percent < 33:
|
| 379 |
+
color = Colors.FAIL
|
| 380 |
+
elif percent < 66:
|
| 381 |
+
color = Colors.WARNING
|
| 382 |
+
else:
|
| 383 |
+
color = Colors.OKGREEN
|
| 384 |
+
|
| 385 |
+
print(
|
| 386 |
+
f"\r{color}[{filled}{empty}] {percent:6.1f}%{Colors.ENDC}",
|
| 387 |
+
end="",
|
| 388 |
+
flush=True,
|
| 389 |
+
)
|
| 390 |
+
time.sleep(duration / bar_length)
|
| 391 |
+
|
| 392 |
+
print(f"\n{Colors.OKGREEN}✅ {message} completed!{Colors.ENDC}\n")
|
| 393 |
+
|
| 394 |
+
def show_spinner(self, message: str, duration: float = 1.0):
|
| 395 |
+
"""Show a spinner animation"""
|
| 396 |
+
spinner_chars = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
|
| 397 |
+
end_time = time.time() + duration
|
| 398 |
+
|
| 399 |
+
while time.time() < end_time:
|
| 400 |
+
for char in spinner_chars:
|
| 401 |
+
print(
|
| 402 |
+
f"\r{Colors.CYAN}{char} {Colors.BOLD}{message}{Colors.ENDC}",
|
| 403 |
+
end="",
|
| 404 |
+
flush=True,
|
| 405 |
+
)
|
| 406 |
+
time.sleep(0.1)
|
| 407 |
+
if time.time() >= end_time:
|
| 408 |
+
break
|
| 409 |
+
|
| 410 |
+
print(f"\r{Colors.OKGREEN}✅ {Colors.BOLD}{message} - Done!{Colors.ENDC}")
|
| 411 |
+
|
| 412 |
+
def print_results_header(self):
|
| 413 |
+
"""Print results section header"""
|
| 414 |
+
header = f"""
|
| 415 |
+
{Colors.OKGREEN}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 416 |
+
║ PROCESSING RESULTS ║
|
| 417 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 418 |
+
"""
|
| 419 |
+
print(header)
|
| 420 |
+
|
| 421 |
+
def print_error_box(self, title: str, error_msg: str):
|
| 422 |
+
"""Print error message in a styled box"""
|
| 423 |
+
print(f"""
|
| 424 |
+
{Colors.FAIL}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 425 |
+
║ ERROR ║
|
| 426 |
+
╠═══════════════════════════════════════════════════════════════════════════════╣
|
| 427 |
+
║ ║
|
| 428 |
+
║ {Colors.BOLD}Title: {title:<66}{Colors.FAIL}║
|
| 429 |
+
║ {Colors.BOLD}Error: {error_msg:<66}{Colors.FAIL}║
|
| 430 |
+
║ ║
|
| 431 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 432 |
+
""")
|
| 433 |
+
|
| 434 |
+
def print_goodbye(self):
|
| 435 |
+
"""Print goodbye message"""
|
| 436 |
+
goodbye = f"""
|
| 437 |
+
{Colors.BOLD}{Colors.YELLOW}╔═══════════════════════════════════════════════════════════════════════════════╗
|
| 438 |
+
║ GOODBYE! ║
|
| 439 |
+
╠═══════════════════════════════════════════════════════════════════════════════╣
|
| 440 |
+
║ ║
|
| 441 |
+
║ {Colors.CYAN}Thank you for using ReproAI! {Colors.YELLOW}║
|
| 442 |
+
║ {Colors.GREEN}🌟 Star us on GitHub: https://github.com/your-repo {Colors.YELLOW}║
|
| 443 |
+
║ {Colors.PURPLE}📧 Contact: support@reproai.com {Colors.YELLOW}║
|
| 444 |
+
║ {Colors.MAGENTA}🐛 Report issues: https://github.com/your-repo/issues {Colors.YELLOW}║
|
| 445 |
+
║ ║
|
| 446 |
+
║ {Colors.OKGREEN}✨ Happy coding! See you next time! ✨ {Colors.YELLOW}║
|
| 447 |
+
║ ║
|
| 448 |
+
╚═══════════════════════════════════════════════════════════════════════════════╝{Colors.ENDC}
|
| 449 |
+
"""
|
| 450 |
+
print(goodbye)
|
| 451 |
+
|
| 452 |
+
def ask_continue(self) -> bool:
|
| 453 |
+
"""Ask user if they want to continue"""
|
| 454 |
+
print(
|
| 455 |
+
f"\n{Colors.BOLD}{Colors.CYAN}Press Enter to continue or 'q' to quit: {Colors.ENDC}",
|
| 456 |
+
end="",
|
| 457 |
+
)
|
| 458 |
+
choice = input().strip().lower()
|
| 459 |
+
return choice not in ["q", "quit", "exit"]
|
projects/ui/DeepCode/utils/dialogue_logger.py
ADDED
|
@@ -0,0 +1,671 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
"""
|
| 4 |
+
Comprehensive Dialogue Logger for Code Implementation Workflow
|
| 5 |
+
Logs complete conversation rounds with detailed formatting and paper-specific organization
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
from typing import Dict, Any, List
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DialogueLogger:
|
| 16 |
+
"""
|
| 17 |
+
Comprehensive dialogue logger for code implementation workflow
|
| 18 |
+
Captures complete conversation rounds with proper formatting and organization
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, paper_id: str, base_path: str = None):
|
| 22 |
+
"""
|
| 23 |
+
Initialize dialogue logger for a specific paper
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
paper_id: Paper identifier (e.g., "1", "2", etc.)
|
| 27 |
+
base_path: Base path for logs (defaults to agent_folders structure)
|
| 28 |
+
"""
|
| 29 |
+
self.paper_id = paper_id
|
| 30 |
+
self.base_path = (
|
| 31 |
+
base_path
|
| 32 |
+
or "/data2/bjdwhzzh/project-hku/Code-Agent2.0/Code-Agent/deepcode-mcp/agent_folders"
|
| 33 |
+
)
|
| 34 |
+
self.log_directory = os.path.join(
|
| 35 |
+
self.base_path, "papers", str(paper_id), "logs"
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
# Create log directory if it doesn't exist
|
| 39 |
+
Path(self.log_directory).mkdir(parents=True, exist_ok=True)
|
| 40 |
+
|
| 41 |
+
# Session tracking (initialize before log file creation)
|
| 42 |
+
self.round_counter = 0
|
| 43 |
+
self.session_start_time = datetime.now()
|
| 44 |
+
self.current_round_data = {}
|
| 45 |
+
|
| 46 |
+
# Generate log filename with timestamp
|
| 47 |
+
timestamp = self.session_start_time.strftime("%Y%m%d_%H%M%S")
|
| 48 |
+
self.log_filename = f"dialogue_log_{timestamp}.md"
|
| 49 |
+
self.log_filepath = os.path.join(self.log_directory, self.log_filename)
|
| 50 |
+
|
| 51 |
+
# Initialize log file with header
|
| 52 |
+
self._initialize_log_file()
|
| 53 |
+
|
| 54 |
+
print(f"📝 Dialogue Logger initialized for Paper {paper_id}")
|
| 55 |
+
print(f"📁 Log file: {self.log_filepath}")
|
| 56 |
+
|
| 57 |
+
def _initialize_log_file(self):
|
| 58 |
+
"""Initialize the log file with header information"""
|
| 59 |
+
header = f"""# Code Implementation Dialogue Log
|
| 60 |
+
|
| 61 |
+
**Paper ID:** {self.paper_id}
|
| 62 |
+
**Session Start:** {self.session_start_time.strftime('%Y-%m-%d %H:%M:%S')}
|
| 63 |
+
**Log File:** {self.log_filename}
|
| 64 |
+
|
| 65 |
+
---
|
| 66 |
+
|
| 67 |
+
## Session Overview
|
| 68 |
+
|
| 69 |
+
This log contains the complete conversation rounds between the user and assistant during the code implementation workflow. Each round includes:
|
| 70 |
+
|
| 71 |
+
- System prompts and user messages
|
| 72 |
+
- Assistant responses with tool calls
|
| 73 |
+
- Tool execution results
|
| 74 |
+
- Implementation progress markers
|
| 75 |
+
|
| 76 |
+
---
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
try:
|
| 80 |
+
with open(self.log_filepath, "w", encoding="utf-8") as f:
|
| 81 |
+
f.write(header)
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f"⚠️ Failed to initialize log file: {e}")
|
| 84 |
+
|
| 85 |
+
def start_new_round(
|
| 86 |
+
self, round_type: str = "implementation", context: Dict[str, Any] = None
|
| 87 |
+
):
|
| 88 |
+
"""
|
| 89 |
+
Start a new dialogue round
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
round_type: Type of round (implementation, summary, error_handling, etc.)
|
| 93 |
+
context: Additional context information (may include 'iteration' to sync with workflow)
|
| 94 |
+
"""
|
| 95 |
+
# Use iteration from context if provided, otherwise increment round_counter
|
| 96 |
+
if context and "iteration" in context:
|
| 97 |
+
self.round_counter = context["iteration"]
|
| 98 |
+
else:
|
| 99 |
+
self.round_counter += 1
|
| 100 |
+
|
| 101 |
+
self.current_round_data = {
|
| 102 |
+
"round_number": self.round_counter,
|
| 103 |
+
"round_type": round_type,
|
| 104 |
+
"start_time": datetime.now(),
|
| 105 |
+
"context": context or {},
|
| 106 |
+
"messages": [],
|
| 107 |
+
"tool_calls": [],
|
| 108 |
+
"results": [],
|
| 109 |
+
"metadata": {},
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
print(f"🔄 Starting Round {self.round_counter}: {round_type}")
|
| 113 |
+
|
| 114 |
+
def log_system_prompt(self, prompt: str, prompt_type: str = "system"):
|
| 115 |
+
"""
|
| 116 |
+
Log system prompt or instructions
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
prompt: System prompt content
|
| 120 |
+
prompt_type: Type of prompt (system, instruction, etc.)
|
| 121 |
+
"""
|
| 122 |
+
if not self.current_round_data:
|
| 123 |
+
self.start_new_round("system_setup")
|
| 124 |
+
|
| 125 |
+
self.current_round_data["messages"].append(
|
| 126 |
+
{
|
| 127 |
+
"role": "system",
|
| 128 |
+
"type": prompt_type,
|
| 129 |
+
"content": prompt,
|
| 130 |
+
"timestamp": datetime.now().isoformat(),
|
| 131 |
+
}
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
def log_user_message(self, message: str, message_type: str = "user_input"):
|
| 135 |
+
"""
|
| 136 |
+
Log user message
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
message: User message content
|
| 140 |
+
message_type: Type of message (user_input, feedback, guidance, etc.)
|
| 141 |
+
"""
|
| 142 |
+
if not self.current_round_data:
|
| 143 |
+
self.start_new_round("user_interaction")
|
| 144 |
+
|
| 145 |
+
self.current_round_data["messages"].append(
|
| 146 |
+
{
|
| 147 |
+
"role": "user",
|
| 148 |
+
"type": message_type,
|
| 149 |
+
"content": message,
|
| 150 |
+
"timestamp": datetime.now().isoformat(),
|
| 151 |
+
}
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
def log_assistant_response(
|
| 155 |
+
self, response: str, response_type: str = "assistant_response"
|
| 156 |
+
):
|
| 157 |
+
"""
|
| 158 |
+
Log assistant response
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
response: Assistant response content
|
| 162 |
+
response_type: Type of response (assistant_response, analysis, etc.)
|
| 163 |
+
"""
|
| 164 |
+
if not self.current_round_data:
|
| 165 |
+
self.start_new_round("assistant_interaction")
|
| 166 |
+
|
| 167 |
+
self.current_round_data["messages"].append(
|
| 168 |
+
{
|
| 169 |
+
"role": "assistant",
|
| 170 |
+
"type": response_type,
|
| 171 |
+
"content": response,
|
| 172 |
+
"timestamp": datetime.now().isoformat(),
|
| 173 |
+
}
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
def log_tool_calls(self, tool_calls: List[Dict[str, Any]]):
|
| 177 |
+
"""
|
| 178 |
+
Log tool calls made by the assistant
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
tool_calls: List of tool calls with id, name, and input
|
| 182 |
+
"""
|
| 183 |
+
if not self.current_round_data:
|
| 184 |
+
self.start_new_round("tool_execution")
|
| 185 |
+
|
| 186 |
+
for tool_call in tool_calls:
|
| 187 |
+
self.current_round_data["tool_calls"].append(
|
| 188 |
+
{
|
| 189 |
+
"id": tool_call.get("id", ""),
|
| 190 |
+
"name": tool_call.get("name", ""),
|
| 191 |
+
"input": tool_call.get("input", {}),
|
| 192 |
+
"timestamp": datetime.now().isoformat(),
|
| 193 |
+
}
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
def log_tool_results(self, tool_results: List[Dict[str, Any]]):
|
| 197 |
+
"""
|
| 198 |
+
Log tool execution results
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
tool_results: List of tool results with tool_name and result
|
| 202 |
+
"""
|
| 203 |
+
if not self.current_round_data:
|
| 204 |
+
self.start_new_round("tool_results")
|
| 205 |
+
|
| 206 |
+
for result in tool_results:
|
| 207 |
+
self.current_round_data["results"].append(
|
| 208 |
+
{
|
| 209 |
+
"tool_name": result.get("tool_name", ""),
|
| 210 |
+
"result": result.get("result", ""),
|
| 211 |
+
"timestamp": datetime.now().isoformat(),
|
| 212 |
+
}
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
def log_metadata(self, key: str, value: Any):
|
| 216 |
+
"""
|
| 217 |
+
Log metadata information
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
key: Metadata key
|
| 221 |
+
value: Metadata value
|
| 222 |
+
"""
|
| 223 |
+
if not self.current_round_data:
|
| 224 |
+
self.start_new_round("metadata")
|
| 225 |
+
|
| 226 |
+
self.current_round_data["metadata"][key] = value
|
| 227 |
+
|
| 228 |
+
def log_memory_optimization(
|
| 229 |
+
self,
|
| 230 |
+
messages_before: List[Dict],
|
| 231 |
+
messages_after: List[Dict],
|
| 232 |
+
optimization_stats: Dict[str, Any],
|
| 233 |
+
approach: str = "memory_optimization",
|
| 234 |
+
):
|
| 235 |
+
"""
|
| 236 |
+
Log memory optimization details including before/after message content
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
messages_before: Messages before optimization
|
| 240 |
+
messages_after: Messages after optimization
|
| 241 |
+
optimization_stats: Statistics about the optimization
|
| 242 |
+
approach: Optimization approach used
|
| 243 |
+
"""
|
| 244 |
+
if not self.current_round_data:
|
| 245 |
+
self.start_new_round("memory_optimization")
|
| 246 |
+
|
| 247 |
+
# Calculate what was removed/kept
|
| 248 |
+
removed_count = len(messages_before) - len(messages_after)
|
| 249 |
+
compression_ratio = (
|
| 250 |
+
(removed_count / len(messages_before) * 100) if messages_before else 0
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
# Log the optimization details
|
| 254 |
+
optimization_data = {
|
| 255 |
+
"approach": approach,
|
| 256 |
+
"messages_before_count": len(messages_before),
|
| 257 |
+
"messages_after_count": len(messages_after),
|
| 258 |
+
"messages_removed_count": removed_count,
|
| 259 |
+
"compression_ratio": f"{compression_ratio:.1f}%",
|
| 260 |
+
"optimization_stats": optimization_stats,
|
| 261 |
+
"timestamp": datetime.now().isoformat(),
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
# Store the optimization data
|
| 265 |
+
if "memory_optimizations" not in self.current_round_data:
|
| 266 |
+
self.current_round_data["memory_optimizations"] = []
|
| 267 |
+
|
| 268 |
+
self.current_round_data["memory_optimizations"].append(
|
| 269 |
+
{
|
| 270 |
+
"optimization_data": optimization_data,
|
| 271 |
+
"messages_before": messages_before,
|
| 272 |
+
"messages_after": messages_after,
|
| 273 |
+
}
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
# Log metadata
|
| 277 |
+
self.log_metadata("memory_optimization", optimization_data)
|
| 278 |
+
|
| 279 |
+
print(
|
| 280 |
+
f"🧹 Memory optimization logged: {len(messages_before)} → {len(messages_after)} messages ({compression_ratio:.1f}% compression)"
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
def complete_round(self, summary: str = "", status: str = "completed"):
|
| 284 |
+
"""
|
| 285 |
+
Complete the current round and write to log file
|
| 286 |
+
|
| 287 |
+
Args:
|
| 288 |
+
summary: Round summary
|
| 289 |
+
status: Round completion status
|
| 290 |
+
"""
|
| 291 |
+
if not self.current_round_data:
|
| 292 |
+
print("⚠️ No active round to complete")
|
| 293 |
+
return
|
| 294 |
+
|
| 295 |
+
self.current_round_data["end_time"] = datetime.now()
|
| 296 |
+
self.current_round_data["duration"] = (
|
| 297 |
+
self.current_round_data["end_time"] - self.current_round_data["start_time"]
|
| 298 |
+
).total_seconds()
|
| 299 |
+
self.current_round_data["summary"] = summary
|
| 300 |
+
self.current_round_data["status"] = status
|
| 301 |
+
|
| 302 |
+
# Write round to log file
|
| 303 |
+
self._write_round_to_log()
|
| 304 |
+
|
| 305 |
+
print(f"✅ Round {self.round_counter} completed: {status}")
|
| 306 |
+
|
| 307 |
+
# Clear current round data
|
| 308 |
+
self.current_round_data = {}
|
| 309 |
+
|
| 310 |
+
def _write_round_to_log(self):
|
| 311 |
+
"""Write the current round data to the log file in markdown format"""
|
| 312 |
+
try:
|
| 313 |
+
with open(self.log_filepath, "a", encoding="utf-8") as f:
|
| 314 |
+
round_data = self.current_round_data
|
| 315 |
+
|
| 316 |
+
# Round header
|
| 317 |
+
f.write(
|
| 318 |
+
f"\n## Round {round_data['round_number']}: {round_data['round_type'].title()}\n\n"
|
| 319 |
+
)
|
| 320 |
+
f.write(
|
| 321 |
+
f"**Start Time:** {round_data['start_time'].strftime('%Y-%m-%d %H:%M:%S')}\n"
|
| 322 |
+
)
|
| 323 |
+
f.write(
|
| 324 |
+
f"**End Time:** {round_data['end_time'].strftime('%Y-%m-%d %H:%M:%S')}\n"
|
| 325 |
+
)
|
| 326 |
+
f.write(f"**Duration:** {round_data['duration']:.2f} seconds\n")
|
| 327 |
+
f.write(f"**Status:** {round_data['status']}\n\n")
|
| 328 |
+
|
| 329 |
+
# Context information
|
| 330 |
+
if round_data.get("context"):
|
| 331 |
+
f.write("### Context\n\n")
|
| 332 |
+
for key, value in round_data["context"].items():
|
| 333 |
+
f.write(f"- **{key}:** {value}\n")
|
| 334 |
+
f.write("\n")
|
| 335 |
+
|
| 336 |
+
# Messages
|
| 337 |
+
if round_data.get("messages"):
|
| 338 |
+
f.write("### Messages\n\n")
|
| 339 |
+
for i, msg in enumerate(round_data["messages"], 1):
|
| 340 |
+
role_emoji = {
|
| 341 |
+
"system": "🔧",
|
| 342 |
+
"user": "👤",
|
| 343 |
+
"assistant": "🤖",
|
| 344 |
+
}.get(msg["role"], "📝")
|
| 345 |
+
f.write(
|
| 346 |
+
f"#### {role_emoji} {msg['role'].title()} Message {i}\n\n"
|
| 347 |
+
)
|
| 348 |
+
f.write(f"**Type:** {msg['type']}\n")
|
| 349 |
+
f.write(f"**Timestamp:** {msg['timestamp']}\n\n")
|
| 350 |
+
f.write("```\n")
|
| 351 |
+
f.write(msg["content"])
|
| 352 |
+
f.write("\n```\n\n")
|
| 353 |
+
|
| 354 |
+
# Tool calls
|
| 355 |
+
if round_data.get("tool_calls"):
|
| 356 |
+
f.write("### Tool Calls\n\n")
|
| 357 |
+
for i, tool_call in enumerate(round_data["tool_calls"], 1):
|
| 358 |
+
f.write(f"#### 🛠️ Tool Call {i}: {tool_call['name']}\n\n")
|
| 359 |
+
f.write(f"**ID:** {tool_call['id']}\n")
|
| 360 |
+
f.write(f"**Timestamp:** {tool_call['timestamp']}\n\n")
|
| 361 |
+
f.write("**Input:**\n")
|
| 362 |
+
f.write("```json\n")
|
| 363 |
+
f.write(
|
| 364 |
+
json.dumps(tool_call["input"], indent=2, ensure_ascii=False)
|
| 365 |
+
)
|
| 366 |
+
f.write("\n```\n\n")
|
| 367 |
+
|
| 368 |
+
# Tool results
|
| 369 |
+
if round_data.get("results"):
|
| 370 |
+
f.write("### Tool Results\n\n")
|
| 371 |
+
for i, result in enumerate(round_data["results"], 1):
|
| 372 |
+
f.write(f"#### 📊 Result {i}: {result['tool_name']}\n\n")
|
| 373 |
+
f.write(f"**Timestamp:** {result['timestamp']}\n\n")
|
| 374 |
+
f.write("**Result:**\n")
|
| 375 |
+
f.write("```\n")
|
| 376 |
+
f.write(str(result["result"]))
|
| 377 |
+
f.write("\n```\n\n")
|
| 378 |
+
|
| 379 |
+
# Memory Optimizations
|
| 380 |
+
if round_data.get("memory_optimizations"):
|
| 381 |
+
f.write("### Memory Optimizations\n\n")
|
| 382 |
+
for i, opt in enumerate(round_data["memory_optimizations"], 1):
|
| 383 |
+
opt_data = opt["optimization_data"]
|
| 384 |
+
messages_before = opt["messages_before"]
|
| 385 |
+
messages_after = opt["messages_after"]
|
| 386 |
+
|
| 387 |
+
f.write(f"#### 🧹 Memory Optimization {i}\n\n")
|
| 388 |
+
f.write(f"**Approach:** {opt_data['approach']}\n")
|
| 389 |
+
f.write(
|
| 390 |
+
f"**Messages Before:** {opt_data['messages_before_count']}\n"
|
| 391 |
+
)
|
| 392 |
+
f.write(
|
| 393 |
+
f"**Messages After:** {opt_data['messages_after_count']}\n"
|
| 394 |
+
)
|
| 395 |
+
f.write(
|
| 396 |
+
f"**Messages Removed:** {opt_data['messages_removed_count']}\n"
|
| 397 |
+
)
|
| 398 |
+
f.write(
|
| 399 |
+
f"**Compression Ratio:** {opt_data['compression_ratio']}\n"
|
| 400 |
+
)
|
| 401 |
+
f.write(f"**Timestamp:** {opt_data['timestamp']}\n\n")
|
| 402 |
+
|
| 403 |
+
# Show optimization stats
|
| 404 |
+
if opt_data.get("optimization_stats"):
|
| 405 |
+
f.write("**Optimization Statistics:**\n")
|
| 406 |
+
f.write("```json\n")
|
| 407 |
+
f.write(
|
| 408 |
+
json.dumps(
|
| 409 |
+
opt_data["optimization_stats"],
|
| 410 |
+
indent=2,
|
| 411 |
+
ensure_ascii=False,
|
| 412 |
+
)
|
| 413 |
+
)
|
| 414 |
+
f.write("\n```\n\n")
|
| 415 |
+
|
| 416 |
+
# Show messages before optimization (limited to last 5 for readability)
|
| 417 |
+
if messages_before:
|
| 418 |
+
f.write("**Messages Before Optimization (last 5):**\n\n")
|
| 419 |
+
for j, msg in enumerate(messages_before[-5:], 1):
|
| 420 |
+
role = msg.get("role", "unknown")
|
| 421 |
+
content = msg.get("content", "")
|
| 422 |
+
# Truncate very long messages
|
| 423 |
+
if len(content) > 3000:
|
| 424 |
+
content = content[:3000] + "...[truncated]"
|
| 425 |
+
f.write(
|
| 426 |
+
f"- **{role} {j}:** {content[:3000]}{'...' if len(content) > 100 else ''}\n"
|
| 427 |
+
)
|
| 428 |
+
f.write("\n")
|
| 429 |
+
|
| 430 |
+
# Show messages after optimization
|
| 431 |
+
if messages_after:
|
| 432 |
+
f.write("**Messages After Optimization:**\n\n")
|
| 433 |
+
for j, msg in enumerate(messages_after, 1):
|
| 434 |
+
role = msg.get("role", "unknown")
|
| 435 |
+
content = msg.get("content", "")
|
| 436 |
+
# Truncate very long messages
|
| 437 |
+
if len(content) > 3000:
|
| 438 |
+
content = content[:3000] + "...[truncated]"
|
| 439 |
+
f.write(
|
| 440 |
+
f"- **{role} {j}:** {content[:3000]}{'...' if len(content) > 100 else ''}\n"
|
| 441 |
+
)
|
| 442 |
+
f.write("\n")
|
| 443 |
+
|
| 444 |
+
# Show what was removed
|
| 445 |
+
if len(messages_before) > len(messages_after):
|
| 446 |
+
removed_messages = (
|
| 447 |
+
messages_before[: -len(messages_after)]
|
| 448 |
+
if messages_after
|
| 449 |
+
else messages_before
|
| 450 |
+
)
|
| 451 |
+
f.write(
|
| 452 |
+
f"**Messages Removed ({len(removed_messages)}):**\n\n"
|
| 453 |
+
)
|
| 454 |
+
for j, msg in enumerate(
|
| 455 |
+
removed_messages[-3:], 1
|
| 456 |
+
): # Show last 3 removed
|
| 457 |
+
role = msg.get("role", "unknown")
|
| 458 |
+
content = msg.get("content", "")
|
| 459 |
+
if len(content) > 3000:
|
| 460 |
+
content = content[:3000] + "...[truncated]"
|
| 461 |
+
f.write(f"- **{role} {j}:** {content}\n")
|
| 462 |
+
f.write("\n")
|
| 463 |
+
|
| 464 |
+
f.write("\n")
|
| 465 |
+
|
| 466 |
+
# Metadata
|
| 467 |
+
if round_data.get("metadata"):
|
| 468 |
+
f.write("### Metadata\n\n")
|
| 469 |
+
for key, value in round_data["metadata"].items():
|
| 470 |
+
if (
|
| 471 |
+
key != "memory_optimization"
|
| 472 |
+
): # Skip memory optimization metadata as it's shown above
|
| 473 |
+
f.write(f"- **{key}:** {value}\n")
|
| 474 |
+
f.write("\n")
|
| 475 |
+
|
| 476 |
+
# Summary
|
| 477 |
+
if round_data.get("summary"):
|
| 478 |
+
f.write("### Summary\n\n")
|
| 479 |
+
f.write(round_data["summary"])
|
| 480 |
+
f.write("\n\n")
|
| 481 |
+
|
| 482 |
+
# Separator
|
| 483 |
+
f.write("---\n\n")
|
| 484 |
+
|
| 485 |
+
except Exception as e:
|
| 486 |
+
print(f"⚠️ Failed to write round to log: {e}")
|
| 487 |
+
|
| 488 |
+
def log_complete_exchange(
|
| 489 |
+
self,
|
| 490 |
+
system_prompt: str = "",
|
| 491 |
+
user_message: str = "",
|
| 492 |
+
assistant_response: str = "",
|
| 493 |
+
tool_calls: List[Dict] = None,
|
| 494 |
+
tool_results: List[Dict] = None,
|
| 495 |
+
round_type: str = "exchange",
|
| 496 |
+
context: Dict = None,
|
| 497 |
+
summary: str = "",
|
| 498 |
+
):
|
| 499 |
+
"""
|
| 500 |
+
Log a complete exchange in a single call
|
| 501 |
+
|
| 502 |
+
Args:
|
| 503 |
+
system_prompt: System prompt (optional)
|
| 504 |
+
user_message: User message
|
| 505 |
+
assistant_response: Assistant response
|
| 506 |
+
tool_calls: Tool calls made
|
| 507 |
+
tool_results: Tool execution results
|
| 508 |
+
round_type: Type of round
|
| 509 |
+
context: Additional context
|
| 510 |
+
summary: Round summary
|
| 511 |
+
"""
|
| 512 |
+
self.start_new_round(round_type, context)
|
| 513 |
+
|
| 514 |
+
if system_prompt:
|
| 515 |
+
self.log_system_prompt(system_prompt)
|
| 516 |
+
|
| 517 |
+
if user_message:
|
| 518 |
+
self.log_user_message(user_message)
|
| 519 |
+
|
| 520 |
+
if assistant_response:
|
| 521 |
+
self.log_assistant_response(assistant_response)
|
| 522 |
+
|
| 523 |
+
if tool_calls:
|
| 524 |
+
self.log_tool_calls(tool_calls)
|
| 525 |
+
|
| 526 |
+
if tool_results:
|
| 527 |
+
self.log_tool_results(tool_results)
|
| 528 |
+
|
| 529 |
+
self.complete_round(summary)
|
| 530 |
+
|
| 531 |
+
def get_session_stats(self) -> Dict[str, Any]:
|
| 532 |
+
"""Get session statistics"""
|
| 533 |
+
return {
|
| 534 |
+
"paper_id": self.paper_id,
|
| 535 |
+
"session_start": self.session_start_time.isoformat(),
|
| 536 |
+
"total_rounds": self.round_counter,
|
| 537 |
+
"log_file": self.log_filepath,
|
| 538 |
+
"session_duration": (
|
| 539 |
+
datetime.now() - self.session_start_time
|
| 540 |
+
).total_seconds(),
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
def finalize_session(self, final_summary: str = ""):
|
| 544 |
+
"""
|
| 545 |
+
Finalize the logging session
|
| 546 |
+
|
| 547 |
+
Args:
|
| 548 |
+
final_summary: Final session summary
|
| 549 |
+
"""
|
| 550 |
+
try:
|
| 551 |
+
with open(self.log_filepath, "a", encoding="utf-8") as f:
|
| 552 |
+
f.write("\n## Session Summary\n\n")
|
| 553 |
+
f.write(f"**Total Rounds:** {self.round_counter}\n")
|
| 554 |
+
f.write(
|
| 555 |
+
f"**Session Duration:** {(datetime.now() - self.session_start_time).total_seconds():.2f} seconds\n"
|
| 556 |
+
)
|
| 557 |
+
f.write(
|
| 558 |
+
f"**End Time:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
| 559 |
+
)
|
| 560 |
+
|
| 561 |
+
if final_summary:
|
| 562 |
+
f.write("### Final Summary\n\n")
|
| 563 |
+
f.write(final_summary)
|
| 564 |
+
f.write("\n\n")
|
| 565 |
+
|
| 566 |
+
f.write("---\n\n")
|
| 567 |
+
f.write("*End of Session*\n")
|
| 568 |
+
|
| 569 |
+
except Exception as e:
|
| 570 |
+
print(f"⚠️ Failed to finalize session: {e}")
|
| 571 |
+
|
| 572 |
+
print(f"🎯 Session finalized: {self.round_counter} rounds logged")
|
| 573 |
+
|
| 574 |
+
|
| 575 |
+
# Utility functions for easy integration
|
| 576 |
+
def create_dialogue_logger(paper_id: str, base_path: str = None) -> DialogueLogger:
|
| 577 |
+
"""
|
| 578 |
+
Create a dialogue logger for a specific paper
|
| 579 |
+
|
| 580 |
+
Args:
|
| 581 |
+
paper_id: Paper identifier
|
| 582 |
+
base_path: Base path for logs
|
| 583 |
+
|
| 584 |
+
Returns:
|
| 585 |
+
DialogueLogger instance
|
| 586 |
+
"""
|
| 587 |
+
return DialogueLogger(paper_id, base_path)
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def extract_paper_id_from_path(path: str) -> str:
|
| 591 |
+
"""
|
| 592 |
+
Extract paper ID from a file path
|
| 593 |
+
|
| 594 |
+
Args:
|
| 595 |
+
path: File path containing paper information
|
| 596 |
+
|
| 597 |
+
Returns:
|
| 598 |
+
Paper ID string
|
| 599 |
+
"""
|
| 600 |
+
# Extract paper ID from path like "/data2/.../papers/1/initial_plan.txt"
|
| 601 |
+
parts = path.split("/")
|
| 602 |
+
for i, part in enumerate(parts):
|
| 603 |
+
if part == "papers" and i + 1 < len(parts):
|
| 604 |
+
return parts[i + 1]
|
| 605 |
+
return "unknown"
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
# Example usage
|
| 609 |
+
if __name__ == "__main__":
|
| 610 |
+
# Test the dialogue logger
|
| 611 |
+
logger = DialogueLogger("1")
|
| 612 |
+
|
| 613 |
+
# Log a complete exchange
|
| 614 |
+
logger.log_complete_exchange(
|
| 615 |
+
system_prompt="You are a code implementation assistant.",
|
| 616 |
+
user_message="Implement the transformer model",
|
| 617 |
+
assistant_response="I'll implement the transformer model step by step.",
|
| 618 |
+
tool_calls=[
|
| 619 |
+
{"id": "1", "name": "write_file", "input": {"filename": "transformer.py"}}
|
| 620 |
+
],
|
| 621 |
+
tool_results=[
|
| 622 |
+
{"tool_name": "write_file", "result": "File created successfully"}
|
| 623 |
+
],
|
| 624 |
+
round_type="implementation",
|
| 625 |
+
context={"files_implemented": 1},
|
| 626 |
+
summary="Successfully implemented transformer model",
|
| 627 |
+
)
|
| 628 |
+
|
| 629 |
+
# Test memory optimization logging
|
| 630 |
+
logger.start_new_round(
|
| 631 |
+
"memory_optimization", {"trigger_reason": "write_file_detected"}
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
# Mock messages before and after optimization
|
| 635 |
+
messages_before = [
|
| 636 |
+
{"role": "user", "content": "Original message 1"},
|
| 637 |
+
{"role": "assistant", "content": "Original response 1"},
|
| 638 |
+
{"role": "user", "content": "Original message 2"},
|
| 639 |
+
{"role": "assistant", "content": "Original response 2"},
|
| 640 |
+
{"role": "user", "content": "Original message 3"},
|
| 641 |
+
]
|
| 642 |
+
|
| 643 |
+
messages_after = [
|
| 644 |
+
{"role": "user", "content": "Original message 1"},
|
| 645 |
+
{"role": "assistant", "content": "Original response 1"},
|
| 646 |
+
{"role": "user", "content": "Original message 3"},
|
| 647 |
+
]
|
| 648 |
+
|
| 649 |
+
# Mock optimization stats
|
| 650 |
+
optimization_stats = {
|
| 651 |
+
"implemented_files_tracked": 2,
|
| 652 |
+
"current_round": 5,
|
| 653 |
+
"concise_mode_active": True,
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
# Log memory optimization
|
| 657 |
+
logger.log_memory_optimization(
|
| 658 |
+
messages_before=messages_before,
|
| 659 |
+
messages_after=messages_after,
|
| 660 |
+
optimization_stats=optimization_stats,
|
| 661 |
+
approach="clear_after_write_file",
|
| 662 |
+
)
|
| 663 |
+
|
| 664 |
+
logger.complete_round("Memory optimization test completed")
|
| 665 |
+
|
| 666 |
+
# Finalize session
|
| 667 |
+
logger.finalize_session(
|
| 668 |
+
"Test session with memory optimization logging completed successfully"
|
| 669 |
+
)
|
| 670 |
+
|
| 671 |
+
print("✅ Dialogue logger test completed with memory optimization")
|
projects/ui/DeepCode/utils/file_processor.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
File processing utilities for handling paper files and related operations.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
from typing import Dict, List, Optional, Union
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class FileProcessor:
|
| 12 |
+
"""
|
| 13 |
+
A class to handle file processing operations including path extraction and file reading.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def extract_file_path(file_info: Union[str, Dict]) -> Optional[str]:
|
| 18 |
+
"""
|
| 19 |
+
Extract paper directory path from the input information.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
file_info: Either a JSON string or a dictionary containing file information
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
Optional[str]: The extracted paper directory path or None if not found
|
| 26 |
+
"""
|
| 27 |
+
try:
|
| 28 |
+
# Handle direct file path input
|
| 29 |
+
if isinstance(file_info, str):
|
| 30 |
+
# Check if it's a file path (existing or not)
|
| 31 |
+
if file_info.endswith(
|
| 32 |
+
(".md", ".pdf", ".txt", ".docx", ".doc", ".html", ".htm")
|
| 33 |
+
):
|
| 34 |
+
# It's a file path, return the directory
|
| 35 |
+
return os.path.dirname(os.path.abspath(file_info))
|
| 36 |
+
elif os.path.exists(file_info):
|
| 37 |
+
if os.path.isfile(file_info):
|
| 38 |
+
return os.path.dirname(os.path.abspath(file_info))
|
| 39 |
+
elif os.path.isdir(file_info):
|
| 40 |
+
return os.path.abspath(file_info)
|
| 41 |
+
|
| 42 |
+
# Try to parse as JSON
|
| 43 |
+
try:
|
| 44 |
+
info_dict = json.loads(file_info)
|
| 45 |
+
except json.JSONDecodeError:
|
| 46 |
+
# 尝试从文本中提取JSON
|
| 47 |
+
info_dict = FileProcessor.extract_json_from_text(file_info)
|
| 48 |
+
if not info_dict:
|
| 49 |
+
# If not JSON and doesn't look like a file path, raise error
|
| 50 |
+
raise ValueError(
|
| 51 |
+
f"Input is neither a valid file path nor JSON: {file_info}"
|
| 52 |
+
)
|
| 53 |
+
else:
|
| 54 |
+
info_dict = file_info
|
| 55 |
+
|
| 56 |
+
# Extract paper path from dictionary
|
| 57 |
+
paper_path = info_dict.get("paper_path")
|
| 58 |
+
if not paper_path:
|
| 59 |
+
raise ValueError("No paper_path found in input dictionary")
|
| 60 |
+
|
| 61 |
+
# Get the directory path instead of the file path
|
| 62 |
+
paper_dir = os.path.dirname(paper_path)
|
| 63 |
+
|
| 64 |
+
# Convert to absolute path if relative
|
| 65 |
+
if not os.path.isabs(paper_dir):
|
| 66 |
+
paper_dir = os.path.abspath(paper_dir)
|
| 67 |
+
|
| 68 |
+
return paper_dir
|
| 69 |
+
|
| 70 |
+
except (AttributeError, TypeError) as e:
|
| 71 |
+
raise ValueError(f"Invalid input format: {str(e)}")
|
| 72 |
+
|
| 73 |
+
@staticmethod
|
| 74 |
+
def find_markdown_file(directory: str) -> Optional[str]:
|
| 75 |
+
"""
|
| 76 |
+
Find the first markdown file in the given directory.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
directory: Directory path to search
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Optional[str]: Path to the markdown file or None if not found
|
| 83 |
+
"""
|
| 84 |
+
if not os.path.isdir(directory):
|
| 85 |
+
return None
|
| 86 |
+
|
| 87 |
+
for file in os.listdir(directory):
|
| 88 |
+
if file.endswith(".md"):
|
| 89 |
+
return os.path.join(directory, file)
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def parse_markdown_sections(content: str) -> List[Dict[str, Union[str, int, List]]]:
|
| 94 |
+
"""
|
| 95 |
+
Parse markdown content and organize it by sections based on headers.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
content: The markdown content to parse
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
List[Dict]: A list of sections, each containing:
|
| 102 |
+
- level: The header level (1-6)
|
| 103 |
+
- title: The section title
|
| 104 |
+
- content: The section content
|
| 105 |
+
- subsections: List of subsections
|
| 106 |
+
"""
|
| 107 |
+
# Split content into lines
|
| 108 |
+
lines = content.split("\n")
|
| 109 |
+
sections = []
|
| 110 |
+
current_section = None
|
| 111 |
+
current_content = []
|
| 112 |
+
|
| 113 |
+
for line in lines:
|
| 114 |
+
# Check if line is a header
|
| 115 |
+
header_match = re.match(r"^(#{1,6})\s+(.+)$", line)
|
| 116 |
+
|
| 117 |
+
if header_match:
|
| 118 |
+
# If we were building a section, save its content
|
| 119 |
+
if current_section is not None:
|
| 120 |
+
current_section["content"] = "\n".join(current_content).strip()
|
| 121 |
+
sections.append(current_section)
|
| 122 |
+
|
| 123 |
+
# Start a new section
|
| 124 |
+
level = len(header_match.group(1))
|
| 125 |
+
title = header_match.group(2).strip()
|
| 126 |
+
current_section = {
|
| 127 |
+
"level": level,
|
| 128 |
+
"title": title,
|
| 129 |
+
"content": "",
|
| 130 |
+
"subsections": [],
|
| 131 |
+
}
|
| 132 |
+
current_content = []
|
| 133 |
+
elif current_section is not None:
|
| 134 |
+
current_content.append(line)
|
| 135 |
+
|
| 136 |
+
# Don't forget to save the last section
|
| 137 |
+
if current_section is not None:
|
| 138 |
+
current_section["content"] = "\n".join(current_content).strip()
|
| 139 |
+
sections.append(current_section)
|
| 140 |
+
|
| 141 |
+
return FileProcessor._organize_sections(sections)
|
| 142 |
+
|
| 143 |
+
@staticmethod
|
| 144 |
+
def _organize_sections(sections: List[Dict]) -> List[Dict]:
|
| 145 |
+
"""
|
| 146 |
+
Organize sections into a hierarchical structure based on their levels.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
sections: List of sections with their levels
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
List[Dict]: Organized hierarchical structure of sections
|
| 153 |
+
"""
|
| 154 |
+
result = []
|
| 155 |
+
section_stack = []
|
| 156 |
+
|
| 157 |
+
for section in sections:
|
| 158 |
+
while section_stack and section_stack[-1]["level"] >= section["level"]:
|
| 159 |
+
section_stack.pop()
|
| 160 |
+
|
| 161 |
+
if section_stack:
|
| 162 |
+
section_stack[-1]["subsections"].append(section)
|
| 163 |
+
else:
|
| 164 |
+
result.append(section)
|
| 165 |
+
|
| 166 |
+
section_stack.append(section)
|
| 167 |
+
|
| 168 |
+
return result
|
| 169 |
+
|
| 170 |
+
@staticmethod
|
| 171 |
+
async def read_file_content(file_path: str) -> str:
|
| 172 |
+
"""
|
| 173 |
+
Read the content of a file asynchronously.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
file_path: Path to the file to read
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
str: The content of the file
|
| 180 |
+
|
| 181 |
+
Raises:
|
| 182 |
+
FileNotFoundError: If the file doesn't exist
|
| 183 |
+
IOError: If there's an error reading the file
|
| 184 |
+
"""
|
| 185 |
+
try:
|
| 186 |
+
# Ensure the file exists
|
| 187 |
+
if not os.path.exists(file_path):
|
| 188 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
| 189 |
+
|
| 190 |
+
# Check if file is actually a PDF by reading the first few bytes
|
| 191 |
+
with open(file_path, "rb") as f:
|
| 192 |
+
header = f.read(8)
|
| 193 |
+
if header.startswith(b"%PDF"):
|
| 194 |
+
raise IOError(
|
| 195 |
+
f"File {file_path} is a PDF file, not a text file. Please convert it to markdown format or use PDF processing tools."
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# Read file content
|
| 199 |
+
# Note: Using async with would be better for large files
|
| 200 |
+
# but for simplicity and compatibility, using regular file reading
|
| 201 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 202 |
+
content = f.read()
|
| 203 |
+
|
| 204 |
+
return content
|
| 205 |
+
|
| 206 |
+
except UnicodeDecodeError as e:
|
| 207 |
+
raise IOError(
|
| 208 |
+
f"Error reading file {file_path}: File encoding is not UTF-8. Original error: {str(e)}"
|
| 209 |
+
)
|
| 210 |
+
except Exception as e:
|
| 211 |
+
raise IOError(f"Error reading file {file_path}: {str(e)}")
|
| 212 |
+
|
| 213 |
+
@staticmethod
|
| 214 |
+
def format_section_content(section: Dict) -> str:
|
| 215 |
+
"""
|
| 216 |
+
Format a section's content with standardized spacing and structure.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
section: Dictionary containing section information
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
str: Formatted section content
|
| 223 |
+
"""
|
| 224 |
+
# Start with section title
|
| 225 |
+
formatted = f"\n{'#' * section['level']} {section['title']}\n"
|
| 226 |
+
|
| 227 |
+
# Add section content if it exists
|
| 228 |
+
if section["content"]:
|
| 229 |
+
formatted += f"\n{section['content'].strip()}\n"
|
| 230 |
+
|
| 231 |
+
# Process subsections
|
| 232 |
+
if section["subsections"]:
|
| 233 |
+
# Add a separator before subsections if there's content
|
| 234 |
+
if section["content"]:
|
| 235 |
+
formatted += "\n---\n"
|
| 236 |
+
|
| 237 |
+
# Process each subsection
|
| 238 |
+
for subsection in section["subsections"]:
|
| 239 |
+
formatted += FileProcessor.format_section_content(subsection)
|
| 240 |
+
|
| 241 |
+
# Add section separator
|
| 242 |
+
formatted += "\n" + "=" * 80 + "\n"
|
| 243 |
+
|
| 244 |
+
return formatted
|
| 245 |
+
|
| 246 |
+
@staticmethod
|
| 247 |
+
def standardize_output(sections: List[Dict]) -> str:
|
| 248 |
+
"""
|
| 249 |
+
Convert structured sections into a standardized string format.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
sections: List of section dictionaries
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
str: Standardized string output
|
| 256 |
+
"""
|
| 257 |
+
output = []
|
| 258 |
+
|
| 259 |
+
# Process each top-level section
|
| 260 |
+
for section in sections:
|
| 261 |
+
output.append(FileProcessor.format_section_content(section))
|
| 262 |
+
|
| 263 |
+
# Join all sections with clear separation
|
| 264 |
+
return "\n".join(output)
|
| 265 |
+
|
| 266 |
+
@classmethod
|
| 267 |
+
async def process_file_input(
|
| 268 |
+
cls, file_input: Union[str, Dict], base_dir: str = None
|
| 269 |
+
) -> Dict:
|
| 270 |
+
"""
|
| 271 |
+
Process file input information and return the structured content.
|
| 272 |
+
|
| 273 |
+
Args:
|
| 274 |
+
file_input: File input information (JSON string, dict, or direct file path)
|
| 275 |
+
base_dir: Optional base directory to use for creating paper directories (for sync support)
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
Dict: The structured content with sections and standardized text
|
| 279 |
+
"""
|
| 280 |
+
try:
|
| 281 |
+
# 首先尝试从字符串中提取markdown文件路径
|
| 282 |
+
if isinstance(file_input, str):
|
| 283 |
+
import re
|
| 284 |
+
|
| 285 |
+
file_path_match = re.search(r"`([^`]+\.md)`", file_input)
|
| 286 |
+
if file_path_match:
|
| 287 |
+
paper_path = file_path_match.group(1)
|
| 288 |
+
file_input = {"paper_path": paper_path}
|
| 289 |
+
|
| 290 |
+
# Extract paper directory path
|
| 291 |
+
paper_dir = cls.extract_file_path(file_input)
|
| 292 |
+
|
| 293 |
+
# If base_dir is provided, adjust paper_dir to be relative to base_dir
|
| 294 |
+
if base_dir and paper_dir:
|
| 295 |
+
# If paper_dir is using default location, move it to base_dir
|
| 296 |
+
if paper_dir.endswith(("deepcode_lab", "agent_folders")):
|
| 297 |
+
paper_dir = base_dir
|
| 298 |
+
else:
|
| 299 |
+
# Extract the relative part and combine with base_dir
|
| 300 |
+
paper_name = os.path.basename(paper_dir)
|
| 301 |
+
# 保持原始目录名不变,不做任何替换
|
| 302 |
+
paper_dir = os.path.join(base_dir, "papers", paper_name)
|
| 303 |
+
|
| 304 |
+
# Ensure the directory exists
|
| 305 |
+
os.makedirs(paper_dir, exist_ok=True)
|
| 306 |
+
|
| 307 |
+
if not paper_dir:
|
| 308 |
+
raise ValueError("Could not determine paper directory path")
|
| 309 |
+
|
| 310 |
+
# Get the actual file path
|
| 311 |
+
file_path = None
|
| 312 |
+
if isinstance(file_input, str):
|
| 313 |
+
# 尝试解析为JSON(处理下载结果)
|
| 314 |
+
try:
|
| 315 |
+
parsed_json = json.loads(file_input)
|
| 316 |
+
if isinstance(parsed_json, dict) and "paper_path" in parsed_json:
|
| 317 |
+
file_path = parsed_json.get("paper_path")
|
| 318 |
+
# 如果文件不存在,尝试查找markdown文件
|
| 319 |
+
if file_path and not os.path.exists(file_path):
|
| 320 |
+
paper_dir = os.path.dirname(file_path)
|
| 321 |
+
if os.path.isdir(paper_dir):
|
| 322 |
+
file_path = cls.find_markdown_file(paper_dir)
|
| 323 |
+
if not file_path:
|
| 324 |
+
raise ValueError(
|
| 325 |
+
f"No markdown file found in directory: {paper_dir}"
|
| 326 |
+
)
|
| 327 |
+
else:
|
| 328 |
+
raise ValueError("Invalid JSON format: missing paper_path")
|
| 329 |
+
except json.JSONDecodeError:
|
| 330 |
+
# 尝试从文本中提取JSON(处理包含额外文本的下载结果)
|
| 331 |
+
extracted_json = cls.extract_json_from_text(file_input)
|
| 332 |
+
if extracted_json and "paper_path" in extracted_json:
|
| 333 |
+
file_path = extracted_json.get("paper_path")
|
| 334 |
+
# 如果文件不存在,尝试查找markdown文件
|
| 335 |
+
if file_path and not os.path.exists(file_path):
|
| 336 |
+
paper_dir = os.path.dirname(file_path)
|
| 337 |
+
if os.path.isdir(paper_dir):
|
| 338 |
+
file_path = cls.find_markdown_file(paper_dir)
|
| 339 |
+
if not file_path:
|
| 340 |
+
raise ValueError(
|
| 341 |
+
f"No markdown file found in directory: {paper_dir}"
|
| 342 |
+
)
|
| 343 |
+
else:
|
| 344 |
+
# 不是JSON,按文件路径处理
|
| 345 |
+
# Check if it's a file path (existing or not)
|
| 346 |
+
if file_input.endswith(
|
| 347 |
+
(".md", ".pdf", ".txt", ".docx", ".doc", ".html", ".htm")
|
| 348 |
+
):
|
| 349 |
+
if os.path.exists(file_input):
|
| 350 |
+
file_path = file_input
|
| 351 |
+
else:
|
| 352 |
+
# File doesn't exist, try to find markdown in the directory
|
| 353 |
+
file_path = cls.find_markdown_file(paper_dir)
|
| 354 |
+
if not file_path:
|
| 355 |
+
raise ValueError(
|
| 356 |
+
f"No markdown file found in directory: {paper_dir}"
|
| 357 |
+
)
|
| 358 |
+
elif os.path.exists(file_input):
|
| 359 |
+
if os.path.isfile(file_input):
|
| 360 |
+
file_path = file_input
|
| 361 |
+
elif os.path.isdir(file_input):
|
| 362 |
+
# If it's a directory, find the markdown file
|
| 363 |
+
file_path = cls.find_markdown_file(file_input)
|
| 364 |
+
if not file_path:
|
| 365 |
+
raise ValueError(
|
| 366 |
+
f"No markdown file found in directory: {file_input}"
|
| 367 |
+
)
|
| 368 |
+
else:
|
| 369 |
+
raise ValueError(f"Invalid input: {file_input}")
|
| 370 |
+
else:
|
| 371 |
+
# Dictionary input
|
| 372 |
+
file_path = file_input.get("paper_path")
|
| 373 |
+
# If the file doesn't exist, try to find markdown in the directory
|
| 374 |
+
if file_path and not os.path.exists(file_path):
|
| 375 |
+
paper_dir = os.path.dirname(file_path)
|
| 376 |
+
if os.path.isdir(paper_dir):
|
| 377 |
+
file_path = cls.find_markdown_file(paper_dir)
|
| 378 |
+
if not file_path:
|
| 379 |
+
raise ValueError(
|
| 380 |
+
f"No markdown file found in directory: {paper_dir}"
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
if not file_path:
|
| 384 |
+
raise ValueError("No valid file path found")
|
| 385 |
+
|
| 386 |
+
# Read file content
|
| 387 |
+
content = await cls.read_file_content(file_path)
|
| 388 |
+
|
| 389 |
+
# Parse and structure the content
|
| 390 |
+
structured_content = cls.parse_markdown_sections(content)
|
| 391 |
+
|
| 392 |
+
# Generate standardized text output
|
| 393 |
+
standardized_text = cls.standardize_output(structured_content)
|
| 394 |
+
|
| 395 |
+
return {
|
| 396 |
+
"paper_dir": paper_dir,
|
| 397 |
+
"file_path": file_path,
|
| 398 |
+
"sections": structured_content,
|
| 399 |
+
"standardized_text": standardized_text,
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
except Exception as e:
|
| 403 |
+
raise ValueError(f"Error processing file input: {str(e)}")
|
| 404 |
+
|
| 405 |
+
@staticmethod
|
| 406 |
+
def extract_json_from_text(text: str) -> Optional[Dict]:
|
| 407 |
+
"""
|
| 408 |
+
Extract JSON from text that may contain markdown code blocks or other content.
|
| 409 |
+
|
| 410 |
+
Args:
|
| 411 |
+
text: Text that may contain JSON
|
| 412 |
+
|
| 413 |
+
Returns:
|
| 414 |
+
Optional[Dict]: Extracted JSON as dictionary or None if not found
|
| 415 |
+
"""
|
| 416 |
+
import re
|
| 417 |
+
|
| 418 |
+
# Try to find JSON in markdown code blocks
|
| 419 |
+
json_pattern = r"```json\s*(\{.*?\})\s*```"
|
| 420 |
+
match = re.search(json_pattern, text, re.DOTALL)
|
| 421 |
+
if match:
|
| 422 |
+
try:
|
| 423 |
+
return json.loads(match.group(1))
|
| 424 |
+
except json.JSONDecodeError:
|
| 425 |
+
pass
|
| 426 |
+
|
| 427 |
+
# Try to find standalone JSON
|
| 428 |
+
json_pattern = r"(\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\})"
|
| 429 |
+
matches = re.findall(json_pattern, text, re.DOTALL)
|
| 430 |
+
for match in matches:
|
| 431 |
+
try:
|
| 432 |
+
parsed = json.loads(match)
|
| 433 |
+
if isinstance(parsed, dict) and "paper_path" in parsed:
|
| 434 |
+
return parsed
|
| 435 |
+
except json.JSONDecodeError:
|
| 436 |
+
continue
|
| 437 |
+
|
| 438 |
+
return None
|
projects/ui/DeepCode/utils/llm_utils.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
LLM utility functions for DeepCode project.
|
| 3 |
+
|
| 4 |
+
This module provides common LLM-related utilities to avoid circular imports
|
| 5 |
+
and reduce code duplication across the project.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import yaml
|
| 10 |
+
from typing import Any, Type, Dict, Tuple
|
| 11 |
+
|
| 12 |
+
# Import LLM classes
|
| 13 |
+
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
| 14 |
+
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def get_preferred_llm_class(config_path: str = "mcp_agent.secrets.yaml") -> Type[Any]:
|
| 18 |
+
"""
|
| 19 |
+
Automatically select the LLM class based on API key availability in configuration.
|
| 20 |
+
|
| 21 |
+
Reads from YAML config file and returns AnthropicAugmentedLLM if anthropic.api_key
|
| 22 |
+
is available, otherwise returns OpenAIAugmentedLLM.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
config_path: Path to the YAML configuration file
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
class: The preferred LLM class
|
| 29 |
+
"""
|
| 30 |
+
try:
|
| 31 |
+
# Try to read the configuration file
|
| 32 |
+
if os.path.exists(config_path):
|
| 33 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 34 |
+
config = yaml.safe_load(f)
|
| 35 |
+
|
| 36 |
+
# Check for anthropic API key in config
|
| 37 |
+
anthropic_config = config.get("anthropic", {})
|
| 38 |
+
anthropic_key = anthropic_config.get("api_key", "")
|
| 39 |
+
|
| 40 |
+
if anthropic_key and anthropic_key.strip() and not anthropic_key == "":
|
| 41 |
+
# print("🤖 Using AnthropicAugmentedLLM (Anthropic API key found in config)")
|
| 42 |
+
return AnthropicAugmentedLLM
|
| 43 |
+
else:
|
| 44 |
+
# print("🤖 Using OpenAIAugmentedLLM (Anthropic API key not configured)")
|
| 45 |
+
return OpenAIAugmentedLLM
|
| 46 |
+
else:
|
| 47 |
+
print(f"🤖 Config file {config_path} not found, using OpenAIAugmentedLLM")
|
| 48 |
+
return OpenAIAugmentedLLM
|
| 49 |
+
|
| 50 |
+
except Exception as e:
|
| 51 |
+
print(f"🤖 Error reading config file {config_path}: {e}")
|
| 52 |
+
print("🤖 Falling back to OpenAIAugmentedLLM")
|
| 53 |
+
return OpenAIAugmentedLLM
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def get_default_models(config_path: str = "mcp_agent.config.yaml"):
|
| 57 |
+
"""
|
| 58 |
+
Get default models from configuration file.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
config_path: Path to the configuration file
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
dict: Dictionary with 'anthropic' and 'openai' default models
|
| 65 |
+
"""
|
| 66 |
+
try:
|
| 67 |
+
if os.path.exists(config_path):
|
| 68 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 69 |
+
config = yaml.safe_load(f)
|
| 70 |
+
|
| 71 |
+
# Handle null values in config sections
|
| 72 |
+
anthropic_config = config.get("anthropic") or {}
|
| 73 |
+
openai_config = config.get("openai") or {}
|
| 74 |
+
|
| 75 |
+
anthropic_model = anthropic_config.get(
|
| 76 |
+
"default_model", "claude-sonnet-4-20250514"
|
| 77 |
+
)
|
| 78 |
+
openai_model = openai_config.get("default_model", "o3-mini")
|
| 79 |
+
|
| 80 |
+
return {"anthropic": anthropic_model, "openai": openai_model}
|
| 81 |
+
else:
|
| 82 |
+
print(f"Config file {config_path} not found, using default models")
|
| 83 |
+
return {"anthropic": "claude-sonnet-4-20250514", "openai": "o3-mini"}
|
| 84 |
+
|
| 85 |
+
except Exception as e:
|
| 86 |
+
print(f"❌Error reading config file {config_path}: {e}")
|
| 87 |
+
return {"anthropic": "claude-sonnet-4-20250514", "openai": "o3-mini"}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_document_segmentation_config(
|
| 91 |
+
config_path: str = "mcp_agent.config.yaml",
|
| 92 |
+
) -> Dict[str, Any]:
|
| 93 |
+
"""
|
| 94 |
+
Get document segmentation configuration from config file.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
config_path: Path to the main configuration file
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
Dict containing segmentation configuration with default values
|
| 101 |
+
"""
|
| 102 |
+
try:
|
| 103 |
+
if os.path.exists(config_path):
|
| 104 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 105 |
+
config = yaml.safe_load(f)
|
| 106 |
+
|
| 107 |
+
# Get document segmentation config with defaults
|
| 108 |
+
seg_config = config.get("document_segmentation", {})
|
| 109 |
+
return {
|
| 110 |
+
"enabled": seg_config.get("enabled", True),
|
| 111 |
+
"size_threshold_chars": seg_config.get("size_threshold_chars", 50000),
|
| 112 |
+
}
|
| 113 |
+
else:
|
| 114 |
+
print(
|
| 115 |
+
f"📄 Config file {config_path} not found, using default segmentation settings"
|
| 116 |
+
)
|
| 117 |
+
return {"enabled": True, "size_threshold_chars": 50000}
|
| 118 |
+
|
| 119 |
+
except Exception as e:
|
| 120 |
+
print(f"📄 Error reading segmentation config from {config_path}: {e}")
|
| 121 |
+
print("📄 Using default segmentation settings")
|
| 122 |
+
return {"enabled": True, "size_threshold_chars": 50000}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def should_use_document_segmentation(
|
| 126 |
+
document_content: str, config_path: str = "mcp_agent.config.yaml"
|
| 127 |
+
) -> Tuple[bool, str]:
|
| 128 |
+
"""
|
| 129 |
+
Determine whether to use document segmentation based on configuration and document size.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
document_content: The content of the document to analyze
|
| 133 |
+
config_path: Path to the configuration file
|
| 134 |
+
|
| 135 |
+
Returns:
|
| 136 |
+
Tuple of (should_segment, reason) where:
|
| 137 |
+
- should_segment: Boolean indicating whether to use segmentation
|
| 138 |
+
- reason: String explaining the decision
|
| 139 |
+
"""
|
| 140 |
+
seg_config = get_document_segmentation_config(config_path)
|
| 141 |
+
|
| 142 |
+
if not seg_config["enabled"]:
|
| 143 |
+
return False, "Document segmentation disabled in configuration"
|
| 144 |
+
|
| 145 |
+
doc_size = len(document_content)
|
| 146 |
+
threshold = seg_config["size_threshold_chars"]
|
| 147 |
+
|
| 148 |
+
if doc_size > threshold:
|
| 149 |
+
return (
|
| 150 |
+
True,
|
| 151 |
+
f"Document size ({doc_size:,} chars) exceeds threshold ({threshold:,} chars)",
|
| 152 |
+
)
|
| 153 |
+
else:
|
| 154 |
+
return (
|
| 155 |
+
False,
|
| 156 |
+
f"Document size ({doc_size:,} chars) below threshold ({threshold:,} chars)",
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def get_adaptive_agent_config(
|
| 161 |
+
use_segmentation: bool, search_server_names: list = None
|
| 162 |
+
) -> Dict[str, list]:
|
| 163 |
+
"""
|
| 164 |
+
Get adaptive agent configuration based on whether to use document segmentation.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
use_segmentation: Whether to include document-segmentation server
|
| 168 |
+
search_server_names: Base search server names (from get_search_server_names)
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
Dict containing server configurations for different agents
|
| 172 |
+
"""
|
| 173 |
+
if search_server_names is None:
|
| 174 |
+
search_server_names = []
|
| 175 |
+
|
| 176 |
+
# Base configuration
|
| 177 |
+
config = {
|
| 178 |
+
"concept_analysis": [],
|
| 179 |
+
"algorithm_analysis": search_server_names.copy(),
|
| 180 |
+
"code_planner": search_server_names.copy(),
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
# Add document-segmentation server if needed
|
| 184 |
+
if use_segmentation:
|
| 185 |
+
config["concept_analysis"] = ["document-segmentation"]
|
| 186 |
+
if "document-segmentation" not in config["algorithm_analysis"]:
|
| 187 |
+
config["algorithm_analysis"].append("document-segmentation")
|
| 188 |
+
if "document-segmentation" not in config["code_planner"]:
|
| 189 |
+
config["code_planner"].append("document-segmentation")
|
| 190 |
+
else:
|
| 191 |
+
config["concept_analysis"] = ["filesystem"]
|
| 192 |
+
if "filesystem" not in config["algorithm_analysis"]:
|
| 193 |
+
config["algorithm_analysis"].append("filesystem")
|
| 194 |
+
if "filesystem" not in config["code_planner"]:
|
| 195 |
+
config["code_planner"].append("filesystem")
|
| 196 |
+
|
| 197 |
+
return config
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def get_adaptive_prompts(use_segmentation: bool) -> Dict[str, str]:
|
| 201 |
+
"""
|
| 202 |
+
Get appropriate prompt versions based on segmentation usage.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
use_segmentation: Whether to use segmented reading prompts
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
Dict containing prompt configurations
|
| 209 |
+
"""
|
| 210 |
+
# Import here to avoid circular imports
|
| 211 |
+
from prompts.code_prompts import (
|
| 212 |
+
PAPER_CONCEPT_ANALYSIS_PROMPT,
|
| 213 |
+
PAPER_ALGORITHM_ANALYSIS_PROMPT,
|
| 214 |
+
CODE_PLANNING_PROMPT,
|
| 215 |
+
PAPER_CONCEPT_ANALYSIS_PROMPT_TRADITIONAL,
|
| 216 |
+
PAPER_ALGORITHM_ANALYSIS_PROMPT_TRADITIONAL,
|
| 217 |
+
CODE_PLANNING_PROMPT_TRADITIONAL,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
if use_segmentation:
|
| 221 |
+
return {
|
| 222 |
+
"concept_analysis": PAPER_CONCEPT_ANALYSIS_PROMPT,
|
| 223 |
+
"algorithm_analysis": PAPER_ALGORITHM_ANALYSIS_PROMPT,
|
| 224 |
+
"code_planning": CODE_PLANNING_PROMPT,
|
| 225 |
+
}
|
| 226 |
+
else:
|
| 227 |
+
return {
|
| 228 |
+
"concept_analysis": PAPER_CONCEPT_ANALYSIS_PROMPT_TRADITIONAL,
|
| 229 |
+
"algorithm_analysis": PAPER_ALGORITHM_ANALYSIS_PROMPT_TRADITIONAL,
|
| 230 |
+
"code_planning": CODE_PLANNING_PROMPT_TRADITIONAL,
|
| 231 |
+
}
|
projects/ui/DeepCode/utils/simple_llm_logger.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
"""
|
| 4 |
+
超简化LLM响应日志记录器
|
| 5 |
+
专注于记录LLM回复的核心内容,配置简单易用
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
import yaml
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Dict, Any
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class SimpleLLMLogger:
|
| 17 |
+
"""超简化的LLM响应日志记录器"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, config_path: str = "mcp_agent.config.yaml"):
|
| 20 |
+
"""
|
| 21 |
+
初始化日志记录器
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
config_path: 配置文件路径
|
| 25 |
+
"""
|
| 26 |
+
self.config = self._load_config(config_path)
|
| 27 |
+
self.llm_config = self.config.get("llm_logger", {})
|
| 28 |
+
|
| 29 |
+
# 如果禁用则直接返回
|
| 30 |
+
if not self.llm_config.get("enabled", True):
|
| 31 |
+
self.enabled = False
|
| 32 |
+
return
|
| 33 |
+
|
| 34 |
+
self.enabled = True
|
| 35 |
+
self._setup_logger()
|
| 36 |
+
|
| 37 |
+
def _load_config(self, config_path: str) -> Dict[str, Any]:
|
| 38 |
+
"""加载配置文件"""
|
| 39 |
+
try:
|
| 40 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 41 |
+
return yaml.safe_load(f)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"⚠️ 配置文件加载失败: {e},使用默认配置")
|
| 44 |
+
return self._get_default_config()
|
| 45 |
+
|
| 46 |
+
def _get_default_config(self) -> Dict[str, Any]:
|
| 47 |
+
"""获取默认配置"""
|
| 48 |
+
return {
|
| 49 |
+
"llm_logger": {
|
| 50 |
+
"enabled": True,
|
| 51 |
+
"output_format": "json",
|
| 52 |
+
"log_level": "basic",
|
| 53 |
+
"log_directory": "logs/llm_responses",
|
| 54 |
+
"filename_pattern": "llm_responses_{timestamp}.jsonl",
|
| 55 |
+
"include_models": ["claude-sonnet-4", "gpt-4", "o3-mini"],
|
| 56 |
+
"min_response_length": 50,
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
def _setup_logger(self):
|
| 61 |
+
"""设置日志记录器"""
|
| 62 |
+
log_dir = self.llm_config.get("log_directory", "logs/llm_responses")
|
| 63 |
+
|
| 64 |
+
# 创建日志目录
|
| 65 |
+
Path(log_dir).mkdir(parents=True, exist_ok=True)
|
| 66 |
+
|
| 67 |
+
# 生成日志文件名
|
| 68 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 69 |
+
filename_pattern = self.llm_config.get(
|
| 70 |
+
"filename_pattern", "llm_responses_{timestamp}.jsonl"
|
| 71 |
+
)
|
| 72 |
+
self.log_file = os.path.join(
|
| 73 |
+
log_dir, filename_pattern.format(timestamp=timestamp)
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
print(f"📝 LLM响应日志: {self.log_file}")
|
| 77 |
+
|
| 78 |
+
def log_response(self, content: str, model: str = "", agent: str = "", **kwargs):
|
| 79 |
+
"""
|
| 80 |
+
记录LLM响应 - 简化版本
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
content: LLM响应内容
|
| 84 |
+
model: 模型名称
|
| 85 |
+
agent: Agent名称
|
| 86 |
+
**kwargs: 其他可选信息
|
| 87 |
+
"""
|
| 88 |
+
if not self.enabled:
|
| 89 |
+
return
|
| 90 |
+
|
| 91 |
+
# 检查是否应该记录
|
| 92 |
+
if not self._should_log(content, model):
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
# 构建日志记录
|
| 96 |
+
log_entry = self._build_entry(content, model, agent, kwargs)
|
| 97 |
+
|
| 98 |
+
# 写入日志
|
| 99 |
+
self._write_log(log_entry)
|
| 100 |
+
|
| 101 |
+
# 控制台显示
|
| 102 |
+
self._console_log(content, model, agent)
|
| 103 |
+
|
| 104 |
+
def _should_log(self, content: str, model: str) -> bool:
|
| 105 |
+
"""检查是否应该记录"""
|
| 106 |
+
# 检查长度
|
| 107 |
+
min_length = self.llm_config.get("min_response_length", 50)
|
| 108 |
+
if len(content) < min_length:
|
| 109 |
+
return False
|
| 110 |
+
|
| 111 |
+
# 检查模型
|
| 112 |
+
include_models = self.llm_config.get("include_models", [])
|
| 113 |
+
if include_models and not any(m in model for m in include_models):
|
| 114 |
+
return False
|
| 115 |
+
|
| 116 |
+
return True
|
| 117 |
+
|
| 118 |
+
def _build_entry(self, content: str, model: str, agent: str, extra: Dict) -> Dict:
|
| 119 |
+
"""构建日志条目"""
|
| 120 |
+
log_level = self.llm_config.get("log_level", "basic")
|
| 121 |
+
|
| 122 |
+
if log_level == "basic":
|
| 123 |
+
# 基础级别:只记录核心内容
|
| 124 |
+
return {
|
| 125 |
+
"timestamp": datetime.now().isoformat(),
|
| 126 |
+
"content": content,
|
| 127 |
+
"model": model,
|
| 128 |
+
}
|
| 129 |
+
else:
|
| 130 |
+
# 详细级别:包含更多信息
|
| 131 |
+
entry = {
|
| 132 |
+
"timestamp": datetime.now().isoformat(),
|
| 133 |
+
"content": content,
|
| 134 |
+
"model": model,
|
| 135 |
+
"agent": agent,
|
| 136 |
+
}
|
| 137 |
+
# 添加额外信息
|
| 138 |
+
if "token_usage" in extra:
|
| 139 |
+
entry["tokens"] = extra["token_usage"]
|
| 140 |
+
if "session_id" in extra:
|
| 141 |
+
entry["session"] = extra["session_id"]
|
| 142 |
+
return entry
|
| 143 |
+
|
| 144 |
+
def _write_log(self, entry: Dict):
|
| 145 |
+
"""写入日志文件"""
|
| 146 |
+
output_format = self.llm_config.get("output_format", "json")
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
with open(self.log_file, "a", encoding="utf-8") as f:
|
| 150 |
+
if output_format == "json":
|
| 151 |
+
f.write(json.dumps(entry, ensure_ascii=False) + "\n")
|
| 152 |
+
elif output_format == "text":
|
| 153 |
+
timestamp = entry.get("timestamp", "")
|
| 154 |
+
model = entry.get("model", "")
|
| 155 |
+
content = entry.get("content", "")
|
| 156 |
+
f.write(f"[{timestamp}] {model}: {content}\n\n")
|
| 157 |
+
elif output_format == "markdown":
|
| 158 |
+
timestamp = entry.get("timestamp", "")
|
| 159 |
+
model = entry.get("model", "")
|
| 160 |
+
content = entry.get("content", "")
|
| 161 |
+
f.write(f"**{timestamp}** | {model}\n\n{content}\n\n---\n\n")
|
| 162 |
+
except Exception as e:
|
| 163 |
+
print(f"⚠️ 写入日志失败: {e}")
|
| 164 |
+
|
| 165 |
+
def _console_log(self, content: str, model: str, agent: str):
|
| 166 |
+
"""控制台简要显示"""
|
| 167 |
+
preview = content[:80] + "..." if len(content) > 80 else content
|
| 168 |
+
print(f"🤖 {model} ({agent}): {preview}")
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# 全局实例
|
| 172 |
+
_global_logger = None
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def get_llm_logger() -> SimpleLLMLogger:
|
| 176 |
+
"""获取全局LLM日志记录器实例"""
|
| 177 |
+
global _global_logger
|
| 178 |
+
if _global_logger is None:
|
| 179 |
+
_global_logger = SimpleLLMLogger()
|
| 180 |
+
return _global_logger
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def log_llm_response(content: str, model: str = "", agent: str = "", **kwargs):
|
| 184 |
+
"""便捷函数:记录LLM响应"""
|
| 185 |
+
logger = get_llm_logger()
|
| 186 |
+
logger.log_response(content, model, agent, **kwargs)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# 示例使用
|
| 190 |
+
if __name__ == "__main__":
|
| 191 |
+
# 测试日志记录
|
| 192 |
+
log_llm_response(
|
| 193 |
+
content="这是一个测试的LLM响应内容,用于验证简化日志记录器的功能是否正常工作。",
|
| 194 |
+
model="claude-sonnet-4-20250514",
|
| 195 |
+
agent="TestAgent",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
print("✅ 简化LLM日志测试完成")
|
projects/ui/DeepCode/workflows/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Intelligent Agent Orchestration Workflows for Research-to-Code Automation.
|
| 3 |
+
|
| 4 |
+
This package provides advanced AI-driven workflow orchestration capabilities
|
| 5 |
+
for automated research analysis and code implementation synthesis.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .agent_orchestration_engine import (
|
| 9 |
+
run_research_analyzer,
|
| 10 |
+
run_resource_processor,
|
| 11 |
+
run_code_analyzer,
|
| 12 |
+
github_repo_download,
|
| 13 |
+
paper_reference_analyzer,
|
| 14 |
+
execute_multi_agent_research_pipeline,
|
| 15 |
+
paper_code_preparation, # Deprecated, for backward compatibility
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
from .code_implementation_workflow import CodeImplementationWorkflow
|
| 19 |
+
|
| 20 |
+
__all__ = [
|
| 21 |
+
# Initial workflows
|
| 22 |
+
"run_research_analyzer",
|
| 23 |
+
"run_resource_processor",
|
| 24 |
+
"run_code_analyzer",
|
| 25 |
+
"github_repo_download",
|
| 26 |
+
"paper_reference_analyzer",
|
| 27 |
+
"execute_multi_agent_research_pipeline", # Main multi-agent pipeline function
|
| 28 |
+
"paper_code_preparation", # Deprecated, for backward compatibility
|
| 29 |
+
# Code implementation workflows
|
| 30 |
+
"CodeImplementationWorkflow",
|
| 31 |
+
]
|
projects/ui/DeepCode/workflows/agent_orchestration_engine.py
ADDED
|
@@ -0,0 +1,1572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Intelligent Agent Orchestration Engine for Research-to-Code Automation
|
| 3 |
+
|
| 4 |
+
This module serves as the core orchestration engine that coordinates multiple specialized
|
| 5 |
+
AI agents to automate the complete research-to-code transformation pipeline:
|
| 6 |
+
|
| 7 |
+
1. Research Analysis Agent - Intelligent content processing and extraction
|
| 8 |
+
2. Workspace Infrastructure Agent - Automated environment synthesis
|
| 9 |
+
3. Code Architecture Agent - AI-driven design and planning
|
| 10 |
+
4. Reference Intelligence Agent - Automated knowledge discovery
|
| 11 |
+
5. Repository Acquisition Agent - Intelligent code repository management
|
| 12 |
+
6. Codebase Intelligence Agent - Advanced relationship analysis
|
| 13 |
+
7. Code Implementation Agent - AI-powered code synthesis
|
| 14 |
+
|
| 15 |
+
Core Features:
|
| 16 |
+
- Multi-agent coordination with intelligent task distribution
|
| 17 |
+
- Local environment automation for seamless deployment
|
| 18 |
+
- Real-time progress monitoring with comprehensive error handling
|
| 19 |
+
- Adaptive workflow optimization based on processing requirements
|
| 20 |
+
- Advanced intelligence analysis with configurable performance modes
|
| 21 |
+
|
| 22 |
+
Architecture:
|
| 23 |
+
- Async/await based high-performance agent coordination
|
| 24 |
+
- Modular agent design with specialized role separation
|
| 25 |
+
- Intelligent resource management and optimization
|
| 26 |
+
- Comprehensive logging and monitoring infrastructure
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
import asyncio
|
| 30 |
+
import json
|
| 31 |
+
import os
|
| 32 |
+
import re
|
| 33 |
+
import yaml
|
| 34 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple
|
| 35 |
+
|
| 36 |
+
# MCP Agent imports
|
| 37 |
+
from mcp_agent.agents.agent import Agent
|
| 38 |
+
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
| 39 |
+
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
| 40 |
+
|
| 41 |
+
# Local imports
|
| 42 |
+
from prompts.code_prompts import (
|
| 43 |
+
PAPER_INPUT_ANALYZER_PROMPT,
|
| 44 |
+
PAPER_DOWNLOADER_PROMPT,
|
| 45 |
+
PAPER_REFERENCE_ANALYZER_PROMPT,
|
| 46 |
+
CHAT_AGENT_PLANNING_PROMPT,
|
| 47 |
+
)
|
| 48 |
+
from utils.file_processor import FileProcessor
|
| 49 |
+
from workflows.code_implementation_workflow import CodeImplementationWorkflow
|
| 50 |
+
from workflows.code_implementation_workflow_index import (
|
| 51 |
+
CodeImplementationWorkflowWithIndex,
|
| 52 |
+
)
|
| 53 |
+
from utils.llm_utils import (
|
| 54 |
+
get_preferred_llm_class,
|
| 55 |
+
should_use_document_segmentation,
|
| 56 |
+
get_adaptive_agent_config,
|
| 57 |
+
get_adaptive_prompts,
|
| 58 |
+
)
|
| 59 |
+
from workflows.agents.document_segmentation_agent import prepare_document_segments
|
| 60 |
+
|
| 61 |
+
# Environment configuration
|
| 62 |
+
os.environ["PYTHONDONTWRITEBYTECODE"] = "1" # Prevent .pyc file generation
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def get_default_search_server(config_path: str = "mcp_agent.config.yaml"):
|
| 66 |
+
"""
|
| 67 |
+
Get the default search server from configuration.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
config_path: Path to the main configuration file
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
str: The default search server name ("brave" or "bocha-mcp")
|
| 74 |
+
"""
|
| 75 |
+
try:
|
| 76 |
+
if os.path.exists(config_path):
|
| 77 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 78 |
+
config = yaml.safe_load(f)
|
| 79 |
+
|
| 80 |
+
default_server = config.get("default_search_server", "brave")
|
| 81 |
+
print(f"🔍 Using search server: {default_server}")
|
| 82 |
+
return default_server
|
| 83 |
+
else:
|
| 84 |
+
print(f"⚠️ Config file {config_path} not found, using default: brave")
|
| 85 |
+
return "brave"
|
| 86 |
+
except Exception as e:
|
| 87 |
+
print(f"⚠️ Error reading config file {config_path}: {e}")
|
| 88 |
+
print("🔍 Falling back to default search server: brave")
|
| 89 |
+
return "brave"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def get_search_server_names(
|
| 93 |
+
additional_servers: Optional[List[str]] = None,
|
| 94 |
+
) -> List[str]:
|
| 95 |
+
"""
|
| 96 |
+
Get server names list with the configured default search server.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
additional_servers: Optional list of additional servers to include
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
List[str]: List of server names including the default search server
|
| 103 |
+
"""
|
| 104 |
+
default_search = get_default_search_server()
|
| 105 |
+
server_names = [default_search]
|
| 106 |
+
|
| 107 |
+
if additional_servers:
|
| 108 |
+
# Add additional servers, avoiding duplicates
|
| 109 |
+
for server in additional_servers:
|
| 110 |
+
if server not in server_names:
|
| 111 |
+
server_names.append(server)
|
| 112 |
+
|
| 113 |
+
return server_names
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def extract_clean_json(llm_output: str) -> str:
|
| 117 |
+
"""
|
| 118 |
+
Extract clean JSON from LLM output, removing all extra text and formatting.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
llm_output: Raw LLM output
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
str: Clean JSON string
|
| 125 |
+
"""
|
| 126 |
+
try:
|
| 127 |
+
# Try to parse the entire output as JSON first
|
| 128 |
+
json.loads(llm_output.strip())
|
| 129 |
+
return llm_output.strip()
|
| 130 |
+
except json.JSONDecodeError:
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
# Remove markdown code blocks
|
| 134 |
+
if "```json" in llm_output:
|
| 135 |
+
pattern = r"```json\s*(.*?)\s*```"
|
| 136 |
+
match = re.search(pattern, llm_output, re.DOTALL)
|
| 137 |
+
if match:
|
| 138 |
+
json_text = match.group(1).strip()
|
| 139 |
+
try:
|
| 140 |
+
json.loads(json_text)
|
| 141 |
+
return json_text
|
| 142 |
+
except json.JSONDecodeError:
|
| 143 |
+
pass
|
| 144 |
+
|
| 145 |
+
# Find JSON object starting with {
|
| 146 |
+
lines = llm_output.split("\n")
|
| 147 |
+
json_lines = []
|
| 148 |
+
in_json = False
|
| 149 |
+
brace_count = 0
|
| 150 |
+
|
| 151 |
+
for line in lines:
|
| 152 |
+
stripped = line.strip()
|
| 153 |
+
if not in_json and stripped.startswith("{"):
|
| 154 |
+
in_json = True
|
| 155 |
+
json_lines = [line]
|
| 156 |
+
brace_count = stripped.count("{") - stripped.count("}")
|
| 157 |
+
elif in_json:
|
| 158 |
+
json_lines.append(line)
|
| 159 |
+
brace_count += stripped.count("{") - stripped.count("}")
|
| 160 |
+
if brace_count == 0:
|
| 161 |
+
break
|
| 162 |
+
|
| 163 |
+
if json_lines:
|
| 164 |
+
json_text = "\n".join(json_lines).strip()
|
| 165 |
+
try:
|
| 166 |
+
json.loads(json_text)
|
| 167 |
+
return json_text
|
| 168 |
+
except json.JSONDecodeError:
|
| 169 |
+
pass
|
| 170 |
+
|
| 171 |
+
# Last attempt: use regex to find JSON
|
| 172 |
+
pattern = r"\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}"
|
| 173 |
+
matches = re.findall(pattern, llm_output, re.DOTALL)
|
| 174 |
+
for match in matches:
|
| 175 |
+
try:
|
| 176 |
+
json.loads(match)
|
| 177 |
+
return match
|
| 178 |
+
except json.JSONDecodeError:
|
| 179 |
+
continue
|
| 180 |
+
|
| 181 |
+
# If all methods fail, return original output
|
| 182 |
+
return llm_output
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
async def run_research_analyzer(prompt_text: str, logger) -> str:
|
| 186 |
+
"""
|
| 187 |
+
Run the research analysis workflow using ResearchAnalyzerAgent.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
prompt_text: Input prompt text containing research information
|
| 191 |
+
logger: Logger instance for logging information
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
str: Analysis result from the agent
|
| 195 |
+
"""
|
| 196 |
+
try:
|
| 197 |
+
# Log input information for debugging
|
| 198 |
+
print("📊 Starting research analysis...")
|
| 199 |
+
print(f"Input prompt length: {len(prompt_text) if prompt_text else 0}")
|
| 200 |
+
print(f"Input preview: {prompt_text[:200] if prompt_text else 'None'}...")
|
| 201 |
+
|
| 202 |
+
if not prompt_text or prompt_text.strip() == "":
|
| 203 |
+
raise ValueError(
|
| 204 |
+
"Empty or None prompt_text provided to run_research_analyzer"
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
analyzer_agent = Agent(
|
| 208 |
+
name="ResearchAnalyzerAgent",
|
| 209 |
+
instruction=PAPER_INPUT_ANALYZER_PROMPT,
|
| 210 |
+
server_names=get_search_server_names(),
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
async with analyzer_agent:
|
| 214 |
+
print("analyzer: Connected to server, calling list_tools...")
|
| 215 |
+
try:
|
| 216 |
+
tools = await analyzer_agent.list_tools()
|
| 217 |
+
print(
|
| 218 |
+
"Tools available:",
|
| 219 |
+
tools.model_dump() if hasattr(tools, "model_dump") else str(tools),
|
| 220 |
+
)
|
| 221 |
+
except Exception as e:
|
| 222 |
+
print(f"Failed to list tools: {e}")
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
analyzer = await analyzer_agent.attach_llm(get_preferred_llm_class())
|
| 226 |
+
print("✅ LLM attached successfully")
|
| 227 |
+
except Exception as e:
|
| 228 |
+
print(f"❌ Failed to attach LLM: {e}")
|
| 229 |
+
raise
|
| 230 |
+
|
| 231 |
+
# Set higher token output for research analysis
|
| 232 |
+
analysis_params = RequestParams(
|
| 233 |
+
max_tokens=6144,
|
| 234 |
+
temperature=0.3,
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
print(
|
| 238 |
+
f"🔄 Making LLM request with params: max_tokens={analysis_params.max_tokens}, temperature={analysis_params.temperature}"
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
try:
|
| 242 |
+
raw_result = await analyzer.generate_str(
|
| 243 |
+
message=prompt_text, request_params=analysis_params
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
print("✅ LLM request completed")
|
| 247 |
+
print(f"Raw result type: {type(raw_result)}")
|
| 248 |
+
print(f"Raw result length: {len(raw_result) if raw_result else 0}")
|
| 249 |
+
|
| 250 |
+
if not raw_result:
|
| 251 |
+
print("❌ CRITICAL: raw_result is empty or None!")
|
| 252 |
+
print("This could indicate:")
|
| 253 |
+
print("1. LLM API call failed silently")
|
| 254 |
+
print("2. API rate limiting or quota exceeded")
|
| 255 |
+
print("3. Network connectivity issues")
|
| 256 |
+
print("4. MCP server communication problems")
|
| 257 |
+
raise ValueError("LLM returned empty result")
|
| 258 |
+
|
| 259 |
+
except Exception as e:
|
| 260 |
+
print(f"❌ LLM generation failed: {e}")
|
| 261 |
+
print(f"Exception type: {type(e)}")
|
| 262 |
+
raise
|
| 263 |
+
|
| 264 |
+
# Clean LLM output to ensure only pure JSON is returned
|
| 265 |
+
try:
|
| 266 |
+
clean_result = extract_clean_json(raw_result)
|
| 267 |
+
print(f"Raw LLM output: {raw_result}")
|
| 268 |
+
print(f"Cleaned JSON output: {clean_result}")
|
| 269 |
+
|
| 270 |
+
# Log to SimpleLLMLogger
|
| 271 |
+
if hasattr(logger, "log_response"):
|
| 272 |
+
logger.log_response(
|
| 273 |
+
clean_result,
|
| 274 |
+
model="ResearchAnalyzer",
|
| 275 |
+
agent="ResearchAnalyzerAgent",
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
if not clean_result or clean_result.strip() == "":
|
| 279 |
+
print("❌ CRITICAL: clean_result is empty after JSON extraction!")
|
| 280 |
+
print(f"Original raw_result was: {raw_result}")
|
| 281 |
+
raise ValueError("JSON extraction resulted in empty output")
|
| 282 |
+
|
| 283 |
+
return clean_result
|
| 284 |
+
|
| 285 |
+
except Exception as e:
|
| 286 |
+
print(f"❌ JSON extraction failed: {e}")
|
| 287 |
+
print(f"Raw result was: {raw_result}")
|
| 288 |
+
raise
|
| 289 |
+
|
| 290 |
+
except Exception as e:
|
| 291 |
+
print(f"❌ run_research_analyzer failed: {e}")
|
| 292 |
+
print(f"Exception details: {type(e).__name__}: {str(e)}")
|
| 293 |
+
raise
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
async def run_resource_processor(analysis_result: str, logger) -> str:
|
| 297 |
+
"""
|
| 298 |
+
Run the resource processing workflow using ResourceProcessorAgent.
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
analysis_result: Result from the research analyzer
|
| 302 |
+
logger: Logger instance for logging information
|
| 303 |
+
|
| 304 |
+
Returns:
|
| 305 |
+
str: Processing result from the agent
|
| 306 |
+
"""
|
| 307 |
+
processor_agent = Agent(
|
| 308 |
+
name="ResourceProcessorAgent",
|
| 309 |
+
instruction=PAPER_DOWNLOADER_PROMPT,
|
| 310 |
+
server_names=["filesystem", "file-downloader"],
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
async with processor_agent:
|
| 314 |
+
print("processor: Connected to server, calling list_tools...")
|
| 315 |
+
tools = await processor_agent.list_tools()
|
| 316 |
+
print(
|
| 317 |
+
"Tools available:",
|
| 318 |
+
tools.model_dump() if hasattr(tools, "model_dump") else str(tools),
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
processor = await processor_agent.attach_llm(get_preferred_llm_class())
|
| 322 |
+
|
| 323 |
+
# Set higher token output for resource processing
|
| 324 |
+
processor_params = RequestParams(
|
| 325 |
+
max_tokens=4096,
|
| 326 |
+
temperature=0.2,
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
return await processor.generate_str(
|
| 330 |
+
message=analysis_result, request_params=processor_params
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
async def run_code_analyzer(
|
| 335 |
+
paper_dir: str, logger, use_segmentation: bool = True
|
| 336 |
+
) -> str:
|
| 337 |
+
"""
|
| 338 |
+
Run the adaptive code analysis workflow using multiple agents for comprehensive code planning.
|
| 339 |
+
|
| 340 |
+
This function orchestrates three specialized agents with adaptive configuration:
|
| 341 |
+
- ConceptAnalysisAgent: Analyzes system architecture and conceptual framework
|
| 342 |
+
- AlgorithmAnalysisAgent: Extracts algorithms, formulas, and technical details
|
| 343 |
+
- CodePlannerAgent: Integrates outputs into a comprehensive implementation plan
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
paper_dir: Directory path containing the research paper and related resources
|
| 347 |
+
logger: Logger instance for logging information
|
| 348 |
+
use_segmentation: Whether to use document segmentation capabilities
|
| 349 |
+
|
| 350 |
+
Returns:
|
| 351 |
+
str: Comprehensive analysis result from the coordinated agents
|
| 352 |
+
"""
|
| 353 |
+
# Get adaptive configuration based on segmentation usage
|
| 354 |
+
search_server_names = get_search_server_names()
|
| 355 |
+
agent_config = get_adaptive_agent_config(use_segmentation, search_server_names)
|
| 356 |
+
prompts = get_adaptive_prompts(use_segmentation)
|
| 357 |
+
|
| 358 |
+
print(
|
| 359 |
+
f"📊 Code analysis mode: {'Segmented' if use_segmentation else 'Traditional'}"
|
| 360 |
+
)
|
| 361 |
+
print(f" Agent configurations: {agent_config}")
|
| 362 |
+
|
| 363 |
+
concept_analysis_agent = Agent(
|
| 364 |
+
name="ConceptAnalysisAgent",
|
| 365 |
+
instruction=prompts["concept_analysis"],
|
| 366 |
+
server_names=agent_config["concept_analysis"],
|
| 367 |
+
)
|
| 368 |
+
algorithm_analysis_agent = Agent(
|
| 369 |
+
name="AlgorithmAnalysisAgent",
|
| 370 |
+
instruction=prompts["algorithm_analysis"],
|
| 371 |
+
server_names=agent_config["algorithm_analysis"],
|
| 372 |
+
)
|
| 373 |
+
code_planner_agent = Agent(
|
| 374 |
+
name="CodePlannerAgent",
|
| 375 |
+
instruction=prompts["code_planning"],
|
| 376 |
+
server_names=agent_config["code_planner"],
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
code_aggregator_agent = ParallelLLM(
|
| 380 |
+
fan_in_agent=code_planner_agent,
|
| 381 |
+
fan_out_agents=[concept_analysis_agent, algorithm_analysis_agent],
|
| 382 |
+
llm_factory=get_preferred_llm_class(),
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Set appropriate token output limit for Claude models (max 8192)
|
| 386 |
+
enhanced_params = RequestParams(
|
| 387 |
+
max_tokens=8192, # Adjusted to Claude 3.5 Sonnet's actual limit
|
| 388 |
+
temperature=0.3,
|
| 389 |
+
)
|
| 390 |
+
|
| 391 |
+
# Concise message for multi-agent paper analysis and code planning
|
| 392 |
+
message = f"""Analyze the research paper in directory: {paper_dir}
|
| 393 |
+
|
| 394 |
+
Please locate and analyze the markdown (.md) file containing the research paper. Based on your analysis, generate a comprehensive code reproduction plan that includes:
|
| 395 |
+
|
| 396 |
+
1. Complete system architecture and component breakdown
|
| 397 |
+
2. All algorithms, formulas, and implementation details
|
| 398 |
+
3. Detailed file structure and implementation roadmap
|
| 399 |
+
|
| 400 |
+
The goal is to create a reproduction plan detailed enough for independent implementation."""
|
| 401 |
+
|
| 402 |
+
result = await code_aggregator_agent.generate_str(
|
| 403 |
+
message=message, request_params=enhanced_params
|
| 404 |
+
)
|
| 405 |
+
print(f"Code analysis result: {result}")
|
| 406 |
+
return result
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
async def github_repo_download(search_result: str, paper_dir: str, logger) -> str:
|
| 410 |
+
"""
|
| 411 |
+
Download GitHub repositories based on search results.
|
| 412 |
+
|
| 413 |
+
Args:
|
| 414 |
+
search_result: Result from GitHub repository search
|
| 415 |
+
paper_dir: Directory where the paper and its code will be stored
|
| 416 |
+
logger: Logger instance for logging information
|
| 417 |
+
|
| 418 |
+
Returns:
|
| 419 |
+
str: Download result
|
| 420 |
+
"""
|
| 421 |
+
github_download_agent = Agent(
|
| 422 |
+
name="GithubDownloadAgent",
|
| 423 |
+
instruction="Download github repo to the directory {paper_dir}/code_base".format(
|
| 424 |
+
paper_dir=paper_dir
|
| 425 |
+
),
|
| 426 |
+
server_names=["filesystem", "github-downloader"],
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
async with github_download_agent:
|
| 430 |
+
print("GitHub downloader: Downloading repositories...")
|
| 431 |
+
downloader = await github_download_agent.attach_llm(get_preferred_llm_class())
|
| 432 |
+
|
| 433 |
+
# Set higher token output for GitHub download
|
| 434 |
+
github_params = RequestParams(
|
| 435 |
+
max_tokens=4096,
|
| 436 |
+
temperature=0.1,
|
| 437 |
+
)
|
| 438 |
+
|
| 439 |
+
return await downloader.generate_str(
|
| 440 |
+
message=search_result, request_params=github_params
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
async def paper_reference_analyzer(paper_dir: str, logger) -> str:
|
| 445 |
+
"""
|
| 446 |
+
Run the paper reference analysis and GitHub repository workflow.
|
| 447 |
+
|
| 448 |
+
Args:
|
| 449 |
+
analysis_result: Result from the paper analyzer
|
| 450 |
+
logger: Logger instance for logging information
|
| 451 |
+
|
| 452 |
+
Returns:
|
| 453 |
+
str: Reference analysis result
|
| 454 |
+
"""
|
| 455 |
+
reference_analysis_agent = Agent(
|
| 456 |
+
name="ReferenceAnalysisAgent",
|
| 457 |
+
instruction=PAPER_REFERENCE_ANALYZER_PROMPT,
|
| 458 |
+
server_names=["filesystem", "fetch"],
|
| 459 |
+
)
|
| 460 |
+
message = f"""Analyze the research paper in directory: {paper_dir}
|
| 461 |
+
|
| 462 |
+
Please locate and analyze the markdown (.md) file containing the research paper. **Focus specifically on the References/Bibliography section** to identify and analyze the 5 most relevant references that have GitHub repositories.
|
| 463 |
+
|
| 464 |
+
Focus on:
|
| 465 |
+
1. **References section analysis** - Extract all citations from the References/Bibliography part
|
| 466 |
+
2. References with high-quality GitHub implementations
|
| 467 |
+
3. Papers cited for methodology, algorithms, or core techniques
|
| 468 |
+
4. Related work that shares similar technical approaches
|
| 469 |
+
5. Implementation references that could provide code patterns
|
| 470 |
+
|
| 471 |
+
Goal: Find the most valuable GitHub repositories from the paper's reference list for code implementation reference."""
|
| 472 |
+
|
| 473 |
+
async with reference_analysis_agent:
|
| 474 |
+
print("Reference analyzer: Connected to server, analyzing references...")
|
| 475 |
+
analyzer = await reference_analysis_agent.attach_llm(get_preferred_llm_class())
|
| 476 |
+
|
| 477 |
+
reference_result = await analyzer.generate_str(message=message)
|
| 478 |
+
return reference_result
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
async def _process_input_source(input_source: str, logger) -> str:
|
| 482 |
+
"""
|
| 483 |
+
Process and validate input source (file path or URL).
|
| 484 |
+
|
| 485 |
+
Args:
|
| 486 |
+
input_source: Input source (file path or analysis result)
|
| 487 |
+
logger: Logger instance
|
| 488 |
+
|
| 489 |
+
Returns:
|
| 490 |
+
str: Processed input source
|
| 491 |
+
"""
|
| 492 |
+
if input_source.startswith("file://"):
|
| 493 |
+
file_path = input_source[7:]
|
| 494 |
+
if os.name == "nt" and file_path.startswith("/"):
|
| 495 |
+
file_path = file_path.lstrip("/")
|
| 496 |
+
return file_path
|
| 497 |
+
return input_source
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
async def orchestrate_research_analysis_agent(
|
| 501 |
+
input_source: str, logger, progress_callback: Optional[Callable] = None
|
| 502 |
+
) -> Tuple[str, str]:
|
| 503 |
+
"""
|
| 504 |
+
Orchestrate intelligent research analysis and resource processing automation.
|
| 505 |
+
|
| 506 |
+
This agent coordinates multiple AI components to analyze research content
|
| 507 |
+
and process associated resources with automated workflow management.
|
| 508 |
+
|
| 509 |
+
Args:
|
| 510 |
+
input_source: Research input source for analysis
|
| 511 |
+
logger: Logger instance for process tracking
|
| 512 |
+
progress_callback: Progress callback function for workflow monitoring
|
| 513 |
+
|
| 514 |
+
Returns:
|
| 515 |
+
tuple: (analysis_result, resource_processing_result)
|
| 516 |
+
"""
|
| 517 |
+
# Step 1: Research Analysis
|
| 518 |
+
if progress_callback:
|
| 519 |
+
progress_callback(
|
| 520 |
+
10, "📊 Analyzing research content and extracting key information..."
|
| 521 |
+
)
|
| 522 |
+
analysis_result = await run_research_analyzer(input_source, logger)
|
| 523 |
+
|
| 524 |
+
# Add brief pause for system stability
|
| 525 |
+
await asyncio.sleep(5)
|
| 526 |
+
|
| 527 |
+
# Step 2: Download Processing
|
| 528 |
+
if progress_callback:
|
| 529 |
+
progress_callback(
|
| 530 |
+
25, "📥 Processing downloads and preparing document structure..."
|
| 531 |
+
)
|
| 532 |
+
download_result = await run_resource_processor(analysis_result, logger)
|
| 533 |
+
|
| 534 |
+
return analysis_result, download_result
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
async def synthesize_workspace_infrastructure_agent(
|
| 538 |
+
download_result: str, logger, workspace_dir: Optional[str] = None
|
| 539 |
+
) -> Dict[str, str]:
|
| 540 |
+
"""
|
| 541 |
+
Synthesize intelligent research workspace infrastructure with automated structure generation.
|
| 542 |
+
|
| 543 |
+
This agent autonomously creates and configures the optimal workspace architecture
|
| 544 |
+
for research project implementation with AI-driven path optimization.
|
| 545 |
+
|
| 546 |
+
Args:
|
| 547 |
+
download_result: Resource processing result from analysis agent
|
| 548 |
+
logger: Logger instance for infrastructure tracking
|
| 549 |
+
workspace_dir: Optional workspace directory path for environment customization
|
| 550 |
+
|
| 551 |
+
Returns:
|
| 552 |
+
dict: Comprehensive workspace infrastructure metadata
|
| 553 |
+
"""
|
| 554 |
+
# Parse download result to get file information
|
| 555 |
+
result = await FileProcessor.process_file_input(
|
| 556 |
+
download_result, base_dir=workspace_dir
|
| 557 |
+
)
|
| 558 |
+
paper_dir = result["paper_dir"]
|
| 559 |
+
|
| 560 |
+
# Log workspace infrastructure synthesis
|
| 561 |
+
print("🏗️ Intelligent workspace infrastructure synthesized:")
|
| 562 |
+
print(f" Base workspace environment: {workspace_dir or 'auto-detected'}")
|
| 563 |
+
print(f" Research workspace: {paper_dir}")
|
| 564 |
+
print(" AI-driven path optimization: active")
|
| 565 |
+
|
| 566 |
+
return {
|
| 567 |
+
"paper_dir": paper_dir,
|
| 568 |
+
"standardized_text": result["standardized_text"],
|
| 569 |
+
"reference_path": os.path.join(paper_dir, "reference.txt"),
|
| 570 |
+
"initial_plan_path": os.path.join(paper_dir, "initial_plan.txt"),
|
| 571 |
+
"download_path": os.path.join(paper_dir, "github_download.txt"),
|
| 572 |
+
"index_report_path": os.path.join(paper_dir, "codebase_index_report.txt"),
|
| 573 |
+
"implementation_report_path": os.path.join(
|
| 574 |
+
paper_dir, "code_implementation_report.txt"
|
| 575 |
+
),
|
| 576 |
+
"workspace_dir": workspace_dir,
|
| 577 |
+
}
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
async def orchestrate_reference_intelligence_agent(
|
| 581 |
+
dir_info: Dict[str, str], logger, progress_callback: Optional[Callable] = None
|
| 582 |
+
) -> str:
|
| 583 |
+
"""
|
| 584 |
+
Orchestrate intelligent reference analysis with automated research discovery.
|
| 585 |
+
|
| 586 |
+
This agent autonomously processes research references and discovers
|
| 587 |
+
related work using advanced AI-powered analysis algorithms.
|
| 588 |
+
|
| 589 |
+
Args:
|
| 590 |
+
dir_info: Workspace infrastructure metadata
|
| 591 |
+
logger: Logger instance for intelligence tracking
|
| 592 |
+
progress_callback: Progress callback function for monitoring
|
| 593 |
+
|
| 594 |
+
Returns:
|
| 595 |
+
str: Comprehensive reference intelligence analysis result
|
| 596 |
+
"""
|
| 597 |
+
if progress_callback:
|
| 598 |
+
progress_callback(50, "🧠 Orchestrating reference intelligence discovery...")
|
| 599 |
+
|
| 600 |
+
reference_path = dir_info["reference_path"]
|
| 601 |
+
|
| 602 |
+
# Check if reference analysis already exists
|
| 603 |
+
if os.path.exists(reference_path):
|
| 604 |
+
print(f"Found existing reference analysis at {reference_path}")
|
| 605 |
+
with open(reference_path, "r", encoding="utf-8") as f:
|
| 606 |
+
return f.read()
|
| 607 |
+
|
| 608 |
+
# Execute reference analysis
|
| 609 |
+
reference_result = await paper_reference_analyzer(dir_info["paper_dir"], logger)
|
| 610 |
+
|
| 611 |
+
# Save reference analysis result
|
| 612 |
+
with open(reference_path, "w", encoding="utf-8") as f:
|
| 613 |
+
f.write(reference_result)
|
| 614 |
+
print(f"Reference analysis saved to {reference_path}")
|
| 615 |
+
|
| 616 |
+
return reference_result
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
async def orchestrate_document_preprocessing_agent(
|
| 620 |
+
dir_info: Dict[str, str], logger
|
| 621 |
+
) -> Dict[str, Any]:
|
| 622 |
+
"""
|
| 623 |
+
Orchestrate adaptive document preprocessing with intelligent segmentation control.
|
| 624 |
+
|
| 625 |
+
This agent autonomously determines whether to use document segmentation based on
|
| 626 |
+
configuration settings and document size, then applies the appropriate processing strategy.
|
| 627 |
+
|
| 628 |
+
Args:
|
| 629 |
+
dir_info: Workspace infrastructure metadata
|
| 630 |
+
logger: Logger instance for preprocessing tracking
|
| 631 |
+
|
| 632 |
+
Returns:
|
| 633 |
+
dict: Document preprocessing result with segmentation metadata
|
| 634 |
+
"""
|
| 635 |
+
|
| 636 |
+
try:
|
| 637 |
+
print("🔍 Starting adaptive document preprocessing...")
|
| 638 |
+
print(f" Paper directory: {dir_info['paper_dir']}")
|
| 639 |
+
|
| 640 |
+
# Step 1: Check if any markdown files exist
|
| 641 |
+
md_files = []
|
| 642 |
+
try:
|
| 643 |
+
md_files = [
|
| 644 |
+
f for f in os.listdir(dir_info["paper_dir"]) if f.endswith(".md")
|
| 645 |
+
]
|
| 646 |
+
except Exception as e:
|
| 647 |
+
print(f"⚠️ Error reading paper directory: {e}")
|
| 648 |
+
|
| 649 |
+
if not md_files:
|
| 650 |
+
print("ℹ️ No markdown files found - skipping document preprocessing")
|
| 651 |
+
dir_info["segments_ready"] = False
|
| 652 |
+
dir_info["use_segmentation"] = False
|
| 653 |
+
return {
|
| 654 |
+
"status": "skipped",
|
| 655 |
+
"reason": "no_markdown_files",
|
| 656 |
+
"paper_dir": dir_info["paper_dir"],
|
| 657 |
+
"segments_ready": False,
|
| 658 |
+
"use_segmentation": False,
|
| 659 |
+
}
|
| 660 |
+
|
| 661 |
+
# Step 2: Read document content to determine size
|
| 662 |
+
md_path = os.path.join(dir_info["paper_dir"], md_files[0])
|
| 663 |
+
try:
|
| 664 |
+
# Check if file is actually a PDF by reading the first few bytes
|
| 665 |
+
with open(md_path, "rb") as f:
|
| 666 |
+
header = f.read(8)
|
| 667 |
+
if header.startswith(b"%PDF"):
|
| 668 |
+
raise IOError(
|
| 669 |
+
f"File {md_path} is a PDF file, not a text file. Please convert it to markdown format or use PDF processing tools."
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
with open(md_path, "r", encoding="utf-8") as f:
|
| 673 |
+
document_content = f.read()
|
| 674 |
+
except Exception as e:
|
| 675 |
+
print(f"⚠️ Error reading document content: {e}")
|
| 676 |
+
dir_info["segments_ready"] = False
|
| 677 |
+
dir_info["use_segmentation"] = False
|
| 678 |
+
return {
|
| 679 |
+
"status": "error",
|
| 680 |
+
"error_message": f"Failed to read document: {str(e)}",
|
| 681 |
+
"paper_dir": dir_info["paper_dir"],
|
| 682 |
+
"segments_ready": False,
|
| 683 |
+
"use_segmentation": False,
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
# Step 3: Determine if segmentation should be used
|
| 687 |
+
should_segment, reason = should_use_document_segmentation(document_content)
|
| 688 |
+
print(f"📊 Segmentation decision: {should_segment}")
|
| 689 |
+
print(f" Reason: {reason}")
|
| 690 |
+
|
| 691 |
+
# Store decision in dir_info for downstream agents
|
| 692 |
+
dir_info["use_segmentation"] = should_segment
|
| 693 |
+
|
| 694 |
+
if should_segment:
|
| 695 |
+
print("🔧 Using intelligent document segmentation workflow...")
|
| 696 |
+
|
| 697 |
+
# Prepare document segments using the segmentation agent
|
| 698 |
+
segmentation_result = await prepare_document_segments(
|
| 699 |
+
paper_dir=dir_info["paper_dir"], logger=logger
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
if segmentation_result["status"] == "success":
|
| 703 |
+
print("✅ Document segmentation completed successfully!")
|
| 704 |
+
print(f" Segments directory: {segmentation_result['segments_dir']}")
|
| 705 |
+
print(" 🧠 Intelligent segments ready for planning agents")
|
| 706 |
+
|
| 707 |
+
# Add segment information to dir_info for downstream agents
|
| 708 |
+
dir_info["segments_dir"] = segmentation_result["segments_dir"]
|
| 709 |
+
dir_info["segments_ready"] = True
|
| 710 |
+
|
| 711 |
+
return segmentation_result
|
| 712 |
+
|
| 713 |
+
else:
|
| 714 |
+
print(
|
| 715 |
+
f"⚠️ Document segmentation failed: {segmentation_result.get('error_message', 'Unknown error')}"
|
| 716 |
+
)
|
| 717 |
+
print(" Falling back to traditional full-document processing...")
|
| 718 |
+
dir_info["segments_ready"] = False
|
| 719 |
+
dir_info["use_segmentation"] = False
|
| 720 |
+
|
| 721 |
+
return {
|
| 722 |
+
"status": "fallback_to_traditional",
|
| 723 |
+
"original_error": segmentation_result.get(
|
| 724 |
+
"error_message", "Unknown error"
|
| 725 |
+
),
|
| 726 |
+
"paper_dir": dir_info["paper_dir"],
|
| 727 |
+
"segments_ready": False,
|
| 728 |
+
"use_segmentation": False,
|
| 729 |
+
"fallback_reason": "segmentation_failed",
|
| 730 |
+
}
|
| 731 |
+
else:
|
| 732 |
+
print("📖 Using traditional full-document reading workflow...")
|
| 733 |
+
dir_info["segments_ready"] = False
|
| 734 |
+
|
| 735 |
+
return {
|
| 736 |
+
"status": "traditional",
|
| 737 |
+
"reason": reason,
|
| 738 |
+
"paper_dir": dir_info["paper_dir"],
|
| 739 |
+
"segments_ready": False,
|
| 740 |
+
"use_segmentation": False,
|
| 741 |
+
"document_size": len(document_content),
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
except Exception as e:
|
| 745 |
+
print(f"❌ Error during document preprocessing: {e}")
|
| 746 |
+
print(" Continuing with traditional full-document processing...")
|
| 747 |
+
|
| 748 |
+
# Ensure fallback settings
|
| 749 |
+
dir_info["segments_ready"] = False
|
| 750 |
+
dir_info["use_segmentation"] = False
|
| 751 |
+
|
| 752 |
+
return {
|
| 753 |
+
"status": "error",
|
| 754 |
+
"paper_dir": dir_info["paper_dir"],
|
| 755 |
+
"segments_ready": False,
|
| 756 |
+
"use_segmentation": False,
|
| 757 |
+
"error_message": str(e),
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
|
| 761 |
+
async def orchestrate_code_planning_agent(
|
| 762 |
+
dir_info: Dict[str, str], logger, progress_callback: Optional[Callable] = None
|
| 763 |
+
):
|
| 764 |
+
"""
|
| 765 |
+
Orchestrate intelligent code planning with automated design analysis.
|
| 766 |
+
|
| 767 |
+
This agent autonomously generates optimal code reproduction plans and implementation
|
| 768 |
+
strategies using AI-driven code analysis and planning principles.
|
| 769 |
+
|
| 770 |
+
Args:
|
| 771 |
+
dir_info: Workspace infrastructure metadata
|
| 772 |
+
logger: Logger instance for planning tracking
|
| 773 |
+
progress_callback: Progress callback function for monitoring
|
| 774 |
+
"""
|
| 775 |
+
if progress_callback:
|
| 776 |
+
progress_callback(40, "🏗️ Synthesizing intelligent code architecture...")
|
| 777 |
+
|
| 778 |
+
initial_plan_path = dir_info["initial_plan_path"]
|
| 779 |
+
|
| 780 |
+
# Check if initial plan already exists
|
| 781 |
+
if not os.path.exists(initial_plan_path):
|
| 782 |
+
# Use segmentation setting from preprocessing phase
|
| 783 |
+
use_segmentation = dir_info.get("use_segmentation", True)
|
| 784 |
+
print(f"📊 Planning mode: {'Segmented' if use_segmentation else 'Traditional'}")
|
| 785 |
+
|
| 786 |
+
initial_plan_result = await run_code_analyzer(
|
| 787 |
+
dir_info["paper_dir"], logger, use_segmentation=use_segmentation
|
| 788 |
+
)
|
| 789 |
+
with open(initial_plan_path, "w", encoding="utf-8") as f:
|
| 790 |
+
f.write(initial_plan_result)
|
| 791 |
+
print(f"Initial plan saved to {initial_plan_path}")
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
async def automate_repository_acquisition_agent(
|
| 795 |
+
reference_result: str,
|
| 796 |
+
dir_info: Dict[str, str],
|
| 797 |
+
logger,
|
| 798 |
+
progress_callback: Optional[Callable] = None,
|
| 799 |
+
):
|
| 800 |
+
"""
|
| 801 |
+
Automate intelligent repository acquisition with AI-guided selection.
|
| 802 |
+
|
| 803 |
+
This agent autonomously identifies, evaluates, and acquires relevant
|
| 804 |
+
repositories using intelligent filtering and automated download protocols.
|
| 805 |
+
|
| 806 |
+
Args:
|
| 807 |
+
reference_result: Reference intelligence analysis result
|
| 808 |
+
dir_info: Workspace infrastructure metadata
|
| 809 |
+
logger: Logger instance for acquisition tracking
|
| 810 |
+
progress_callback: Progress callback function for monitoring
|
| 811 |
+
"""
|
| 812 |
+
if progress_callback:
|
| 813 |
+
progress_callback(60, "🤖 Automating intelligent repository acquisition...")
|
| 814 |
+
|
| 815 |
+
await asyncio.sleep(5) # Brief pause for stability
|
| 816 |
+
|
| 817 |
+
try:
|
| 818 |
+
download_result = await github_repo_download(
|
| 819 |
+
reference_result, dir_info["paper_dir"], logger
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
# Save download results
|
| 823 |
+
with open(dir_info["download_path"], "w", encoding="utf-8") as f:
|
| 824 |
+
f.write(download_result)
|
| 825 |
+
print(f"GitHub download results saved to {dir_info['download_path']}")
|
| 826 |
+
|
| 827 |
+
# Verify if any repositories were actually downloaded
|
| 828 |
+
code_base_path = os.path.join(dir_info["paper_dir"], "code_base")
|
| 829 |
+
if os.path.exists(code_base_path):
|
| 830 |
+
downloaded_repos = [
|
| 831 |
+
d
|
| 832 |
+
for d in os.listdir(code_base_path)
|
| 833 |
+
if os.path.isdir(os.path.join(code_base_path, d))
|
| 834 |
+
and not d.startswith(".")
|
| 835 |
+
]
|
| 836 |
+
|
| 837 |
+
if downloaded_repos:
|
| 838 |
+
print(
|
| 839 |
+
f"Successfully downloaded {len(downloaded_repos)} repositories: {downloaded_repos}"
|
| 840 |
+
)
|
| 841 |
+
else:
|
| 842 |
+
print(
|
| 843 |
+
"GitHub download phase completed, but no repositories were found in the code_base directory"
|
| 844 |
+
)
|
| 845 |
+
print("This might indicate:")
|
| 846 |
+
print(
|
| 847 |
+
"1. No relevant repositories were identified in the reference analysis"
|
| 848 |
+
)
|
| 849 |
+
print(
|
| 850 |
+
"2. Repository downloads failed due to access permissions or network issues"
|
| 851 |
+
)
|
| 852 |
+
print(
|
| 853 |
+
"3. The download agent encountered errors during the download process"
|
| 854 |
+
)
|
| 855 |
+
else:
|
| 856 |
+
print(f"Code base directory was not created: {code_base_path}")
|
| 857 |
+
|
| 858 |
+
except Exception as e:
|
| 859 |
+
print(f"Error during GitHub repository download: {e}")
|
| 860 |
+
# Still save the error information
|
| 861 |
+
error_message = f"GitHub download failed: {str(e)}"
|
| 862 |
+
with open(dir_info["download_path"], "w", encoding="utf-8") as f:
|
| 863 |
+
f.write(error_message)
|
| 864 |
+
print(f"GitHub download error saved to {dir_info['download_path']}")
|
| 865 |
+
raise e # Re-raise to be handled by the main pipeline
|
| 866 |
+
|
| 867 |
+
|
| 868 |
+
async def orchestrate_codebase_intelligence_agent(
|
| 869 |
+
dir_info: Dict[str, str], logger, progress_callback: Optional[Callable] = None
|
| 870 |
+
) -> Dict:
|
| 871 |
+
"""
|
| 872 |
+
Orchestrate intelligent codebase analysis with automated knowledge extraction.
|
| 873 |
+
|
| 874 |
+
This agent autonomously processes and indexes codebases using advanced
|
| 875 |
+
AI algorithms for intelligent relationship mapping and knowledge synthesis.
|
| 876 |
+
|
| 877 |
+
Args:
|
| 878 |
+
dir_info: Workspace infrastructure metadata
|
| 879 |
+
logger: Logger instance for intelligence tracking
|
| 880 |
+
progress_callback: Progress callback function for monitoring
|
| 881 |
+
|
| 882 |
+
Returns:
|
| 883 |
+
dict: Comprehensive codebase intelligence analysis result
|
| 884 |
+
"""
|
| 885 |
+
if progress_callback:
|
| 886 |
+
progress_callback(70, "🧮 Orchestrating codebase intelligence analysis...")
|
| 887 |
+
|
| 888 |
+
print(
|
| 889 |
+
"Initiating intelligent codebase analysis with AI-powered relationship mapping..."
|
| 890 |
+
)
|
| 891 |
+
await asyncio.sleep(2) # Brief pause before starting indexing
|
| 892 |
+
|
| 893 |
+
# Check if code_base directory exists and has content
|
| 894 |
+
code_base_path = os.path.join(dir_info["paper_dir"], "code_base")
|
| 895 |
+
if not os.path.exists(code_base_path):
|
| 896 |
+
print(f"Code base directory not found: {code_base_path}")
|
| 897 |
+
return {
|
| 898 |
+
"status": "skipped",
|
| 899 |
+
"message": "No code base directory found - skipping indexing",
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
# Check if there are any repositories in the code_base directory
|
| 903 |
+
try:
|
| 904 |
+
repo_dirs = [
|
| 905 |
+
d
|
| 906 |
+
for d in os.listdir(code_base_path)
|
| 907 |
+
if os.path.isdir(os.path.join(code_base_path, d)) and not d.startswith(".")
|
| 908 |
+
]
|
| 909 |
+
|
| 910 |
+
if not repo_dirs:
|
| 911 |
+
print(f"No repositories found in {code_base_path}")
|
| 912 |
+
print("This might be because:")
|
| 913 |
+
print("1. GitHub download phase didn't complete successfully")
|
| 914 |
+
print("2. No relevant repositories were identified for download")
|
| 915 |
+
print("3. Repository download failed due to access issues")
|
| 916 |
+
print("Continuing with code implementation without codebase indexing...")
|
| 917 |
+
|
| 918 |
+
# Save a report about the skipped indexing
|
| 919 |
+
skip_report = {
|
| 920 |
+
"status": "skipped",
|
| 921 |
+
"reason": "no_repositories_found",
|
| 922 |
+
"message": f"No repositories found in {code_base_path}",
|
| 923 |
+
"suggestions": [
|
| 924 |
+
"Check if GitHub download phase completed successfully",
|
| 925 |
+
"Verify if relevant repositories were identified in reference analysis",
|
| 926 |
+
"Check network connectivity and GitHub access permissions",
|
| 927 |
+
],
|
| 928 |
+
}
|
| 929 |
+
|
| 930 |
+
with open(dir_info["index_report_path"], "w", encoding="utf-8") as f:
|
| 931 |
+
f.write(str(skip_report))
|
| 932 |
+
print(f"Indexing skip report saved to {dir_info['index_report_path']}")
|
| 933 |
+
|
| 934 |
+
return skip_report
|
| 935 |
+
|
| 936 |
+
except Exception as e:
|
| 937 |
+
print(f"Error checking code base directory: {e}")
|
| 938 |
+
return {
|
| 939 |
+
"status": "error",
|
| 940 |
+
"message": f"Error checking code base directory: {str(e)}",
|
| 941 |
+
}
|
| 942 |
+
|
| 943 |
+
try:
|
| 944 |
+
from workflows.codebase_index_workflow import run_codebase_indexing
|
| 945 |
+
|
| 946 |
+
print(f"Found {len(repo_dirs)} repositories to index: {repo_dirs}")
|
| 947 |
+
|
| 948 |
+
# Run codebase index workflow
|
| 949 |
+
index_result = await run_codebase_indexing(
|
| 950 |
+
paper_dir=dir_info["paper_dir"],
|
| 951 |
+
initial_plan_path=dir_info["initial_plan_path"],
|
| 952 |
+
config_path="mcp_agent.secrets.yaml",
|
| 953 |
+
logger=logger,
|
| 954 |
+
)
|
| 955 |
+
|
| 956 |
+
# Log indexing results
|
| 957 |
+
if index_result["status"] == "success":
|
| 958 |
+
print("Code indexing completed successfully!")
|
| 959 |
+
print(
|
| 960 |
+
f"Indexed {index_result['statistics']['total_repositories'] if index_result.get('statistics') else len(index_result['output_files'])} repositories"
|
| 961 |
+
)
|
| 962 |
+
print(f"Generated {len(index_result['output_files'])} index files")
|
| 963 |
+
|
| 964 |
+
# Save indexing results to file
|
| 965 |
+
with open(dir_info["index_report_path"], "w", encoding="utf-8") as f:
|
| 966 |
+
f.write(str(index_result))
|
| 967 |
+
print(f"Indexing report saved to {dir_info['index_report_path']}")
|
| 968 |
+
|
| 969 |
+
elif index_result["status"] == "warning":
|
| 970 |
+
print(f"Code indexing completed with warnings: {index_result['message']}")
|
| 971 |
+
else:
|
| 972 |
+
print(f"Code indexing failed: {index_result['message']}")
|
| 973 |
+
|
| 974 |
+
return index_result
|
| 975 |
+
|
| 976 |
+
except Exception as e:
|
| 977 |
+
print(f"Error during codebase indexing workflow: {e}")
|
| 978 |
+
print("Continuing with code implementation despite indexing failure...")
|
| 979 |
+
|
| 980 |
+
# Save error report
|
| 981 |
+
error_report = {
|
| 982 |
+
"status": "error",
|
| 983 |
+
"message": str(e),
|
| 984 |
+
"phase": "codebase_indexing",
|
| 985 |
+
"recovery_action": "continuing_with_code_implementation",
|
| 986 |
+
}
|
| 987 |
+
|
| 988 |
+
with open(dir_info["index_report_path"], "w", encoding="utf-8") as f:
|
| 989 |
+
f.write(str(error_report))
|
| 990 |
+
print(f"Indexing error report saved to {dir_info['index_report_path']}")
|
| 991 |
+
|
| 992 |
+
return error_report
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
async def synthesize_code_implementation_agent(
|
| 996 |
+
dir_info: Dict[str, str],
|
| 997 |
+
logger,
|
| 998 |
+
progress_callback: Optional[Callable] = None,
|
| 999 |
+
enable_indexing: bool = True,
|
| 1000 |
+
) -> Dict:
|
| 1001 |
+
"""
|
| 1002 |
+
Synthesize intelligent code implementation with automated development.
|
| 1003 |
+
|
| 1004 |
+
This agent autonomously generates high-quality code implementations using
|
| 1005 |
+
AI-powered development strategies and intelligent code synthesis algorithms.
|
| 1006 |
+
|
| 1007 |
+
Args:
|
| 1008 |
+
dir_info: Workspace infrastructure metadata
|
| 1009 |
+
logger: Logger instance for implementation tracking
|
| 1010 |
+
progress_callback: Progress callback function for monitoring
|
| 1011 |
+
enable_indexing: Whether to enable code reference indexing for enhanced implementation
|
| 1012 |
+
|
| 1013 |
+
Returns:
|
| 1014 |
+
dict: Comprehensive code implementation synthesis result
|
| 1015 |
+
"""
|
| 1016 |
+
if progress_callback:
|
| 1017 |
+
progress_callback(85, "🔬 Synthesizing intelligent code implementation...")
|
| 1018 |
+
|
| 1019 |
+
print(
|
| 1020 |
+
"Launching intelligent code synthesis with AI-driven implementation strategies..."
|
| 1021 |
+
)
|
| 1022 |
+
await asyncio.sleep(3) # Brief pause before starting implementation
|
| 1023 |
+
|
| 1024 |
+
try:
|
| 1025 |
+
# Create code implementation workflow instance based on indexing preference
|
| 1026 |
+
if enable_indexing:
|
| 1027 |
+
print(
|
| 1028 |
+
"🔍 Using enhanced code implementation workflow with reference indexing..."
|
| 1029 |
+
)
|
| 1030 |
+
code_workflow = CodeImplementationWorkflowWithIndex()
|
| 1031 |
+
else:
|
| 1032 |
+
print("⚡ Using standard code implementation workflow (fast mode)...")
|
| 1033 |
+
code_workflow = CodeImplementationWorkflow()
|
| 1034 |
+
|
| 1035 |
+
# Check if initial plan file exists
|
| 1036 |
+
if os.path.exists(dir_info["initial_plan_path"]):
|
| 1037 |
+
print(f"Using initial plan from {dir_info['initial_plan_path']}")
|
| 1038 |
+
|
| 1039 |
+
# Run code implementation workflow with pure code mode
|
| 1040 |
+
implementation_result = await code_workflow.run_workflow(
|
| 1041 |
+
plan_file_path=dir_info["initial_plan_path"],
|
| 1042 |
+
target_directory=dir_info["paper_dir"],
|
| 1043 |
+
pure_code_mode=True, # Focus on code implementation, skip testing
|
| 1044 |
+
)
|
| 1045 |
+
|
| 1046 |
+
# Log implementation results
|
| 1047 |
+
if implementation_result["status"] == "success":
|
| 1048 |
+
print("Code implementation completed successfully!")
|
| 1049 |
+
print(f"Code directory: {implementation_result['code_directory']}")
|
| 1050 |
+
|
| 1051 |
+
# Save implementation results to file
|
| 1052 |
+
with open(
|
| 1053 |
+
dir_info["implementation_report_path"], "w", encoding="utf-8"
|
| 1054 |
+
) as f:
|
| 1055 |
+
f.write(str(implementation_result))
|
| 1056 |
+
print(
|
| 1057 |
+
f"Implementation report saved to {dir_info['implementation_report_path']}"
|
| 1058 |
+
)
|
| 1059 |
+
|
| 1060 |
+
else:
|
| 1061 |
+
print(
|
| 1062 |
+
f"Code implementation failed: {implementation_result.get('message', 'Unknown error')}"
|
| 1063 |
+
)
|
| 1064 |
+
|
| 1065 |
+
return implementation_result
|
| 1066 |
+
else:
|
| 1067 |
+
print(
|
| 1068 |
+
f"Initial plan file not found at {dir_info['initial_plan_path']}, skipping code implementation"
|
| 1069 |
+
)
|
| 1070 |
+
return {
|
| 1071 |
+
"status": "warning",
|
| 1072 |
+
"message": "Initial plan not found - code implementation skipped",
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
except Exception as e:
|
| 1076 |
+
print(f"Error during code implementation workflow: {e}")
|
| 1077 |
+
return {"status": "error", "message": str(e)}
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
async def run_chat_planning_agent(user_input: str, logger) -> str:
|
| 1081 |
+
"""
|
| 1082 |
+
Run the chat-based planning agent for user-provided coding requirements.
|
| 1083 |
+
|
| 1084 |
+
This agent transforms user's coding description into a comprehensive implementation plan
|
| 1085 |
+
that can be directly used for code generation. It handles both academic and engineering
|
| 1086 |
+
requirements with intelligent context adaptation.
|
| 1087 |
+
|
| 1088 |
+
Args:
|
| 1089 |
+
user_input: User's coding requirements and description
|
| 1090 |
+
logger: Logger instance for logging information
|
| 1091 |
+
|
| 1092 |
+
Returns:
|
| 1093 |
+
str: Comprehensive implementation plan in YAML format
|
| 1094 |
+
"""
|
| 1095 |
+
try:
|
| 1096 |
+
print("💬 Starting chat-based planning agent...")
|
| 1097 |
+
print(f"Input length: {len(user_input) if user_input else 0}")
|
| 1098 |
+
print(f"Input preview: {user_input[:200] if user_input else 'None'}...")
|
| 1099 |
+
|
| 1100 |
+
if not user_input or user_input.strip() == "":
|
| 1101 |
+
raise ValueError(
|
| 1102 |
+
"Empty or None user_input provided to run_chat_planning_agent"
|
| 1103 |
+
)
|
| 1104 |
+
|
| 1105 |
+
# Create the chat planning agent
|
| 1106 |
+
chat_planning_agent = Agent(
|
| 1107 |
+
name="ChatPlanningAgent",
|
| 1108 |
+
instruction=CHAT_AGENT_PLANNING_PROMPT,
|
| 1109 |
+
server_names=get_search_server_names(), # Dynamic search server configuration
|
| 1110 |
+
)
|
| 1111 |
+
|
| 1112 |
+
async with chat_planning_agent:
|
| 1113 |
+
print("chat_planning: Connected to server, calling list_tools...")
|
| 1114 |
+
try:
|
| 1115 |
+
tools = await chat_planning_agent.list_tools()
|
| 1116 |
+
print(
|
| 1117 |
+
"Tools available:",
|
| 1118 |
+
tools.model_dump() if hasattr(tools, "model_dump") else str(tools),
|
| 1119 |
+
)
|
| 1120 |
+
except Exception as e:
|
| 1121 |
+
print(f"Failed to list tools: {e}")
|
| 1122 |
+
|
| 1123 |
+
try:
|
| 1124 |
+
planner = await chat_planning_agent.attach_llm(
|
| 1125 |
+
get_preferred_llm_class()
|
| 1126 |
+
)
|
| 1127 |
+
print("✅ LLM attached successfully")
|
| 1128 |
+
except Exception as e:
|
| 1129 |
+
print(f"❌ Failed to attach LLM: {e}")
|
| 1130 |
+
raise
|
| 1131 |
+
|
| 1132 |
+
# Set higher token output for comprehensive planning
|
| 1133 |
+
planning_params = RequestParams(
|
| 1134 |
+
max_tokens=8192, # Higher token limit for detailed plans
|
| 1135 |
+
temperature=0.2, # Lower temperature for more structured output
|
| 1136 |
+
)
|
| 1137 |
+
|
| 1138 |
+
print(
|
| 1139 |
+
f"🔄 Making LLM request with params: max_tokens={planning_params.max_tokens}, temperature={planning_params.temperature}"
|
| 1140 |
+
)
|
| 1141 |
+
|
| 1142 |
+
# Format the input message for the agent
|
| 1143 |
+
formatted_message = f"""Please analyze the following coding requirements and generate a comprehensive implementation plan:
|
| 1144 |
+
|
| 1145 |
+
User Requirements:
|
| 1146 |
+
{user_input}
|
| 1147 |
+
|
| 1148 |
+
Please provide a detailed implementation plan that covers all aspects needed for successful development."""
|
| 1149 |
+
|
| 1150 |
+
try:
|
| 1151 |
+
raw_result = await planner.generate_str(
|
| 1152 |
+
message=formatted_message, request_params=planning_params
|
| 1153 |
+
)
|
| 1154 |
+
|
| 1155 |
+
print("✅ Planning request completed")
|
| 1156 |
+
print(f"Raw result type: {type(raw_result)}")
|
| 1157 |
+
print(f"Raw result length: {len(raw_result) if raw_result else 0}")
|
| 1158 |
+
|
| 1159 |
+
if not raw_result:
|
| 1160 |
+
print("❌ CRITICAL: raw_result is empty or None!")
|
| 1161 |
+
raise ValueError("Chat planning agent returned empty result")
|
| 1162 |
+
|
| 1163 |
+
except Exception as e:
|
| 1164 |
+
print(f"❌ Planning generation failed: {e}")
|
| 1165 |
+
print(f"Exception type: {type(e)}")
|
| 1166 |
+
raise
|
| 1167 |
+
|
| 1168 |
+
# Log to SimpleLLMLogger
|
| 1169 |
+
if hasattr(logger, "log_response"):
|
| 1170 |
+
logger.log_response(
|
| 1171 |
+
raw_result, model="ChatPlanningAgent", agent="ChatPlanningAgent"
|
| 1172 |
+
)
|
| 1173 |
+
|
| 1174 |
+
if not raw_result or raw_result.strip() == "":
|
| 1175 |
+
print("❌ CRITICAL: Planning result is empty!")
|
| 1176 |
+
raise ValueError("Chat planning agent produced empty output")
|
| 1177 |
+
|
| 1178 |
+
print("🎯 Chat planning completed successfully")
|
| 1179 |
+
print(f"Planning result preview: {raw_result[:500]}...")
|
| 1180 |
+
|
| 1181 |
+
return raw_result
|
| 1182 |
+
|
| 1183 |
+
except Exception as e:
|
| 1184 |
+
print(f"❌ run_chat_planning_agent failed: {e}")
|
| 1185 |
+
print(f"Exception details: {type(e).__name__}: {str(e)}")
|
| 1186 |
+
raise
|
| 1187 |
+
|
| 1188 |
+
|
| 1189 |
+
async def execute_multi_agent_research_pipeline(
|
| 1190 |
+
input_source: str,
|
| 1191 |
+
logger,
|
| 1192 |
+
progress_callback: Optional[Callable] = None,
|
| 1193 |
+
enable_indexing: bool = True,
|
| 1194 |
+
) -> str:
|
| 1195 |
+
"""
|
| 1196 |
+
Execute the complete intelligent multi-agent research orchestration pipeline.
|
| 1197 |
+
|
| 1198 |
+
This is the main AI orchestration engine that coordinates autonomous research workflow agents:
|
| 1199 |
+
- Local workspace automation for seamless environment management
|
| 1200 |
+
- Intelligent research analysis with automated content processing
|
| 1201 |
+
- AI-driven code architecture synthesis and design automation
|
| 1202 |
+
- Reference intelligence discovery with automated knowledge extraction (optional)
|
| 1203 |
+
- Codebase intelligence orchestration with automated relationship analysis (optional)
|
| 1204 |
+
- Intelligent code implementation synthesis with AI-powered development
|
| 1205 |
+
|
| 1206 |
+
Args:
|
| 1207 |
+
input_source: Research input source (file path, URL, or preprocessed analysis)
|
| 1208 |
+
logger: Logger instance for comprehensive workflow intelligence tracking
|
| 1209 |
+
progress_callback: Progress callback function for real-time monitoring
|
| 1210 |
+
enable_indexing: Whether to enable advanced intelligence analysis (default: True)
|
| 1211 |
+
|
| 1212 |
+
Returns:
|
| 1213 |
+
str: The comprehensive pipeline execution result with status and outcomes
|
| 1214 |
+
"""
|
| 1215 |
+
try:
|
| 1216 |
+
# Phase 0: Workspace Setup
|
| 1217 |
+
if progress_callback:
|
| 1218 |
+
progress_callback(5, "🔄 Setting up workspace for file processing...")
|
| 1219 |
+
|
| 1220 |
+
print("🚀 Initializing intelligent multi-agent research orchestration system")
|
| 1221 |
+
|
| 1222 |
+
# Setup local workspace directory
|
| 1223 |
+
workspace_dir = os.path.join(os.getcwd(), "deepcode_lab")
|
| 1224 |
+
os.makedirs(workspace_dir, exist_ok=True)
|
| 1225 |
+
|
| 1226 |
+
print("📁 Working environment: local")
|
| 1227 |
+
print(f"📂 Workspace directory: {workspace_dir}")
|
| 1228 |
+
print("✅ Workspace status: ready")
|
| 1229 |
+
|
| 1230 |
+
# Log intelligence functionality status
|
| 1231 |
+
if enable_indexing:
|
| 1232 |
+
print("🧠 Advanced intelligence analysis enabled - comprehensive workflow")
|
| 1233 |
+
else:
|
| 1234 |
+
print("⚡ Optimized mode - advanced intelligence analysis disabled")
|
| 1235 |
+
|
| 1236 |
+
# Phase 1: Input Processing and Validation
|
| 1237 |
+
input_source = await _process_input_source(input_source, logger)
|
| 1238 |
+
|
| 1239 |
+
# Phase 2: Research Analysis and Resource Processing (if needed)
|
| 1240 |
+
if isinstance(input_source, str) and (
|
| 1241 |
+
input_source.endswith((".pdf", ".docx", ".txt", ".html", ".md"))
|
| 1242 |
+
or input_source.startswith(("http", "file://"))
|
| 1243 |
+
):
|
| 1244 |
+
(
|
| 1245 |
+
analysis_result,
|
| 1246 |
+
download_result,
|
| 1247 |
+
) = await orchestrate_research_analysis_agent(
|
| 1248 |
+
input_source, logger, progress_callback
|
| 1249 |
+
)
|
| 1250 |
+
else:
|
| 1251 |
+
download_result = input_source # Use input directly if already processed
|
| 1252 |
+
|
| 1253 |
+
# Phase 3: Workspace Infrastructure Synthesis
|
| 1254 |
+
if progress_callback:
|
| 1255 |
+
progress_callback(
|
| 1256 |
+
40, "🏗️ Synthesizing intelligent workspace infrastructure..."
|
| 1257 |
+
)
|
| 1258 |
+
|
| 1259 |
+
dir_info = await synthesize_workspace_infrastructure_agent(
|
| 1260 |
+
download_result, logger, workspace_dir
|
| 1261 |
+
)
|
| 1262 |
+
await asyncio.sleep(30)
|
| 1263 |
+
|
| 1264 |
+
# Phase 3.5: Document Segmentation and Preprocessing
|
| 1265 |
+
|
| 1266 |
+
segmentation_result = await orchestrate_document_preprocessing_agent(
|
| 1267 |
+
dir_info, logger
|
| 1268 |
+
)
|
| 1269 |
+
|
| 1270 |
+
# Handle segmentation result
|
| 1271 |
+
if segmentation_result["status"] == "success":
|
| 1272 |
+
print("✅ Document preprocessing completed successfully!")
|
| 1273 |
+
print(
|
| 1274 |
+
f" 📊 Using segmentation: {dir_info.get('use_segmentation', False)}"
|
| 1275 |
+
)
|
| 1276 |
+
if dir_info.get("segments_ready", False):
|
| 1277 |
+
print(
|
| 1278 |
+
f" 📁 Segments directory: {segmentation_result.get('segments_dir', 'N/A')}"
|
| 1279 |
+
)
|
| 1280 |
+
elif segmentation_result["status"] == "fallback_to_traditional":
|
| 1281 |
+
print("⚠️ Document segmentation failed, using traditional processing")
|
| 1282 |
+
print(
|
| 1283 |
+
f" Original error: {segmentation_result.get('original_error', 'Unknown')}"
|
| 1284 |
+
)
|
| 1285 |
+
else:
|
| 1286 |
+
print(
|
| 1287 |
+
f"⚠️ Document preprocessing encountered issues: {segmentation_result.get('error_message', 'Unknown')}"
|
| 1288 |
+
)
|
| 1289 |
+
|
| 1290 |
+
# Phase 4: Code Planning Orchestration
|
| 1291 |
+
await orchestrate_code_planning_agent(dir_info, logger, progress_callback)
|
| 1292 |
+
|
| 1293 |
+
# Phase 5: Reference Intelligence (only when indexing is enabled)
|
| 1294 |
+
if enable_indexing:
|
| 1295 |
+
reference_result = await orchestrate_reference_intelligence_agent(
|
| 1296 |
+
dir_info, logger, progress_callback
|
| 1297 |
+
)
|
| 1298 |
+
else:
|
| 1299 |
+
print("🔶 Skipping reference intelligence analysis (fast mode enabled)")
|
| 1300 |
+
# Create empty reference analysis result to maintain file structure consistency
|
| 1301 |
+
reference_result = "Reference intelligence analysis skipped - fast mode enabled for optimized processing"
|
| 1302 |
+
with open(dir_info["reference_path"], "w", encoding="utf-8") as f:
|
| 1303 |
+
f.write(reference_result)
|
| 1304 |
+
|
| 1305 |
+
# Phase 6: Repository Acquisition Automation (optional)
|
| 1306 |
+
if enable_indexing:
|
| 1307 |
+
await automate_repository_acquisition_agent(
|
| 1308 |
+
reference_result, dir_info, logger, progress_callback
|
| 1309 |
+
)
|
| 1310 |
+
else:
|
| 1311 |
+
print("🔶 Skipping automated repository acquisition (fast mode enabled)")
|
| 1312 |
+
# Create empty download result file to maintain file structure consistency
|
| 1313 |
+
with open(dir_info["download_path"], "w", encoding="utf-8") as f:
|
| 1314 |
+
f.write(
|
| 1315 |
+
"Automated repository acquisition skipped - fast mode enabled for optimized processing"
|
| 1316 |
+
)
|
| 1317 |
+
|
| 1318 |
+
# Phase 7: Codebase Intelligence Orchestration (optional)
|
| 1319 |
+
if enable_indexing:
|
| 1320 |
+
index_result = await orchestrate_codebase_intelligence_agent(
|
| 1321 |
+
dir_info, logger, progress_callback
|
| 1322 |
+
)
|
| 1323 |
+
else:
|
| 1324 |
+
print("🔶 Skipping codebase intelligence orchestration (fast mode enabled)")
|
| 1325 |
+
# Create a skipped indexing result
|
| 1326 |
+
index_result = {
|
| 1327 |
+
"status": "skipped",
|
| 1328 |
+
"reason": "fast_mode_enabled",
|
| 1329 |
+
"message": "Codebase intelligence orchestration skipped for optimized processing",
|
| 1330 |
+
}
|
| 1331 |
+
with open(dir_info["index_report_path"], "w", encoding="utf-8") as f:
|
| 1332 |
+
f.write(str(index_result))
|
| 1333 |
+
|
| 1334 |
+
# Phase 8: Code Implementation Synthesis
|
| 1335 |
+
implementation_result = await synthesize_code_implementation_agent(
|
| 1336 |
+
dir_info, logger, progress_callback, enable_indexing
|
| 1337 |
+
)
|
| 1338 |
+
|
| 1339 |
+
# Final Status Report
|
| 1340 |
+
if enable_indexing:
|
| 1341 |
+
pipeline_summary = (
|
| 1342 |
+
f"Multi-agent research pipeline completed for {dir_info['paper_dir']}"
|
| 1343 |
+
)
|
| 1344 |
+
else:
|
| 1345 |
+
pipeline_summary = f"Multi-agent research pipeline completed (fast mode) for {dir_info['paper_dir']}"
|
| 1346 |
+
|
| 1347 |
+
# Add indexing status to summary
|
| 1348 |
+
if not enable_indexing:
|
| 1349 |
+
pipeline_summary += (
|
| 1350 |
+
"\n⚡ Fast mode: GitHub download and codebase indexing skipped"
|
| 1351 |
+
)
|
| 1352 |
+
elif index_result["status"] == "skipped":
|
| 1353 |
+
pipeline_summary += f"\n🔶 Codebase indexing: {index_result['message']}"
|
| 1354 |
+
elif index_result["status"] == "error":
|
| 1355 |
+
pipeline_summary += (
|
| 1356 |
+
f"\n❌ Codebase indexing failed: {index_result['message']}"
|
| 1357 |
+
)
|
| 1358 |
+
elif index_result["status"] == "success":
|
| 1359 |
+
pipeline_summary += "\n✅ Codebase indexing completed successfully"
|
| 1360 |
+
|
| 1361 |
+
# Add implementation status to summary
|
| 1362 |
+
if implementation_result["status"] == "success":
|
| 1363 |
+
pipeline_summary += "\n🎉 Code implementation completed successfully!"
|
| 1364 |
+
pipeline_summary += (
|
| 1365 |
+
f"\n📁 Code generated in: {implementation_result['code_directory']}"
|
| 1366 |
+
)
|
| 1367 |
+
return pipeline_summary
|
| 1368 |
+
elif implementation_result["status"] == "warning":
|
| 1369 |
+
pipeline_summary += (
|
| 1370 |
+
f"\n⚠️ Code implementation: {implementation_result['message']}"
|
| 1371 |
+
)
|
| 1372 |
+
return pipeline_summary
|
| 1373 |
+
else:
|
| 1374 |
+
pipeline_summary += (
|
| 1375 |
+
f"\n❌ Code implementation failed: {implementation_result['message']}"
|
| 1376 |
+
)
|
| 1377 |
+
return pipeline_summary
|
| 1378 |
+
|
| 1379 |
+
except Exception as e:
|
| 1380 |
+
print(f"Error in execute_multi_agent_research_pipeline: {e}")
|
| 1381 |
+
raise e
|
| 1382 |
+
|
| 1383 |
+
|
| 1384 |
+
# Backward compatibility alias (deprecated)
|
| 1385 |
+
async def paper_code_preparation(
|
| 1386 |
+
input_source: str, logger, progress_callback: Optional[Callable] = None
|
| 1387 |
+
) -> str:
|
| 1388 |
+
"""
|
| 1389 |
+
Deprecated: Use execute_multi_agent_research_pipeline instead.
|
| 1390 |
+
|
| 1391 |
+
Args:
|
| 1392 |
+
input_source: Input source
|
| 1393 |
+
logger: Logger instance
|
| 1394 |
+
progress_callback: Progress callback function
|
| 1395 |
+
|
| 1396 |
+
Returns:
|
| 1397 |
+
str: Pipeline result
|
| 1398 |
+
"""
|
| 1399 |
+
print(
|
| 1400 |
+
"paper_code_preparation is deprecated. Use execute_multi_agent_research_pipeline instead."
|
| 1401 |
+
)
|
| 1402 |
+
return await execute_multi_agent_research_pipeline(
|
| 1403 |
+
input_source, logger, progress_callback
|
| 1404 |
+
)
|
| 1405 |
+
|
| 1406 |
+
|
| 1407 |
+
async def execute_chat_based_planning_pipeline(
|
| 1408 |
+
user_input: str,
|
| 1409 |
+
logger,
|
| 1410 |
+
progress_callback: Optional[Callable] = None,
|
| 1411 |
+
enable_indexing: bool = True,
|
| 1412 |
+
) -> str:
|
| 1413 |
+
"""
|
| 1414 |
+
Execute the chat-based planning and implementation pipeline.
|
| 1415 |
+
|
| 1416 |
+
This pipeline is designed for users who provide coding requirements directly through chat,
|
| 1417 |
+
bypassing the traditional paper analysis phases (Phase 0-7) and jumping directly to
|
| 1418 |
+
planning and code implementation.
|
| 1419 |
+
|
| 1420 |
+
Pipeline Flow:
|
| 1421 |
+
- Chat Planning: Transform user input into implementation plan
|
| 1422 |
+
- Workspace Setup: Create necessary directory structure
|
| 1423 |
+
- Code Implementation: Generate code based on the plan
|
| 1424 |
+
|
| 1425 |
+
Args:
|
| 1426 |
+
user_input: User's coding requirements and description
|
| 1427 |
+
logger: Logger instance for comprehensive workflow tracking
|
| 1428 |
+
progress_callback: Progress callback function for real-time monitoring
|
| 1429 |
+
enable_indexing: Whether to enable code reference indexing for enhanced implementation
|
| 1430 |
+
|
| 1431 |
+
Returns:
|
| 1432 |
+
str: The pipeline execution result with status and outcomes
|
| 1433 |
+
"""
|
| 1434 |
+
try:
|
| 1435 |
+
print("🚀 Initializing chat-based planning and implementation pipeline")
|
| 1436 |
+
print("💬 Chat mode: Direct user requirements to code implementation")
|
| 1437 |
+
|
| 1438 |
+
# Phase 0: Workspace Setup
|
| 1439 |
+
if progress_callback:
|
| 1440 |
+
progress_callback(5, "🔄 Setting up workspace for file processing...")
|
| 1441 |
+
|
| 1442 |
+
# Setup local workspace directory
|
| 1443 |
+
workspace_dir = os.path.join(os.getcwd(), "deepcode_lab")
|
| 1444 |
+
os.makedirs(workspace_dir, exist_ok=True)
|
| 1445 |
+
|
| 1446 |
+
print("📁 Working environment: local")
|
| 1447 |
+
print(f"📂 Workspace directory: {workspace_dir}")
|
| 1448 |
+
print("✅ Workspace status: ready")
|
| 1449 |
+
|
| 1450 |
+
# Phase 1: Chat-Based Planning
|
| 1451 |
+
if progress_callback:
|
| 1452 |
+
progress_callback(
|
| 1453 |
+
30,
|
| 1454 |
+
"💬 Generating comprehensive implementation plan from user requirements...",
|
| 1455 |
+
)
|
| 1456 |
+
|
| 1457 |
+
print("🧠 Running chat-based planning agent...")
|
| 1458 |
+
planning_result = await run_chat_planning_agent(user_input, logger)
|
| 1459 |
+
|
| 1460 |
+
# Phase 2: Workspace Infrastructure Synthesis
|
| 1461 |
+
if progress_callback:
|
| 1462 |
+
progress_callback(
|
| 1463 |
+
50, "🏗️ Synthesizing intelligent workspace infrastructure..."
|
| 1464 |
+
)
|
| 1465 |
+
|
| 1466 |
+
# Create workspace directory structure for chat mode
|
| 1467 |
+
# First, let's create a temporary directory structure that mimics a paper workspace
|
| 1468 |
+
import time
|
| 1469 |
+
|
| 1470 |
+
# Generate a unique paper directory name
|
| 1471 |
+
timestamp = str(int(time.time()))
|
| 1472 |
+
paper_name = f"chat_project_{timestamp}"
|
| 1473 |
+
|
| 1474 |
+
# Use workspace directory
|
| 1475 |
+
chat_paper_dir = os.path.join(workspace_dir, "papers", paper_name)
|
| 1476 |
+
|
| 1477 |
+
os.makedirs(chat_paper_dir, exist_ok=True)
|
| 1478 |
+
|
| 1479 |
+
# Create a synthetic markdown file with user requirements
|
| 1480 |
+
markdown_content = f"""# User Coding Requirements
|
| 1481 |
+
|
| 1482 |
+
## Project Description
|
| 1483 |
+
This is a coding project generated from user requirements via chat interface.
|
| 1484 |
+
|
| 1485 |
+
## User Requirements
|
| 1486 |
+
{user_input}
|
| 1487 |
+
|
| 1488 |
+
## Generated Implementation Plan
|
| 1489 |
+
The following implementation plan was generated by the AI chat planning agent:
|
| 1490 |
+
|
| 1491 |
+
```yaml
|
| 1492 |
+
{planning_result}
|
| 1493 |
+
```
|
| 1494 |
+
|
| 1495 |
+
## Project Metadata
|
| 1496 |
+
- **Input Type**: Chat Input
|
| 1497 |
+
- **Generation Method**: AI Chat Planning Agent
|
| 1498 |
+
- **Timestamp**: {timestamp}
|
| 1499 |
+
"""
|
| 1500 |
+
|
| 1501 |
+
# Save the markdown file
|
| 1502 |
+
markdown_file_path = os.path.join(chat_paper_dir, f"{paper_name}.md")
|
| 1503 |
+
with open(markdown_file_path, "w", encoding="utf-8") as f:
|
| 1504 |
+
f.write(markdown_content)
|
| 1505 |
+
|
| 1506 |
+
print(f"💾 Created chat project workspace: {chat_paper_dir}")
|
| 1507 |
+
print(f"📄 Saved requirements to: {markdown_file_path}")
|
| 1508 |
+
|
| 1509 |
+
# Create a download result that matches FileProcessor expectations
|
| 1510 |
+
synthetic_download_result = json.dumps(
|
| 1511 |
+
{
|
| 1512 |
+
"status": "success",
|
| 1513 |
+
"paper_path": markdown_file_path,
|
| 1514 |
+
"input_type": "chat_input",
|
| 1515 |
+
"paper_info": {
|
| 1516 |
+
"title": "User-Provided Coding Requirements",
|
| 1517 |
+
"source": "chat_input",
|
| 1518 |
+
"description": "Implementation plan generated from user requirements",
|
| 1519 |
+
},
|
| 1520 |
+
}
|
| 1521 |
+
)
|
| 1522 |
+
|
| 1523 |
+
dir_info = await synthesize_workspace_infrastructure_agent(
|
| 1524 |
+
synthetic_download_result, logger, workspace_dir
|
| 1525 |
+
)
|
| 1526 |
+
await asyncio.sleep(10) # Brief pause for file system operations
|
| 1527 |
+
|
| 1528 |
+
# Phase 3: Save Planning Result
|
| 1529 |
+
if progress_callback:
|
| 1530 |
+
progress_callback(70, "📝 Saving implementation plan...")
|
| 1531 |
+
|
| 1532 |
+
# Save the planning result to the initial_plan.txt file (same location as Phase 4 in original pipeline)
|
| 1533 |
+
initial_plan_path = dir_info["initial_plan_path"]
|
| 1534 |
+
with open(initial_plan_path, "w", encoding="utf-8") as f:
|
| 1535 |
+
f.write(planning_result)
|
| 1536 |
+
print(f"💾 Implementation plan saved to {initial_plan_path}")
|
| 1537 |
+
|
| 1538 |
+
# Phase 4: Code Implementation Synthesis (same as Phase 8 in original pipeline)
|
| 1539 |
+
if progress_callback:
|
| 1540 |
+
progress_callback(85, "🔬 Synthesizing intelligent code implementation...")
|
| 1541 |
+
|
| 1542 |
+
implementation_result = await synthesize_code_implementation_agent(
|
| 1543 |
+
dir_info, logger, progress_callback, enable_indexing
|
| 1544 |
+
)
|
| 1545 |
+
|
| 1546 |
+
# Final Status Report
|
| 1547 |
+
pipeline_summary = f"Chat-based planning and implementation pipeline completed for {dir_info['paper_dir']}"
|
| 1548 |
+
|
| 1549 |
+
# Add implementation status to summary
|
| 1550 |
+
if implementation_result["status"] == "success":
|
| 1551 |
+
pipeline_summary += "\n🎉 Code implementation completed successfully!"
|
| 1552 |
+
pipeline_summary += (
|
| 1553 |
+
f"\n📁 Code generated in: {implementation_result['code_directory']}"
|
| 1554 |
+
)
|
| 1555 |
+
pipeline_summary += (
|
| 1556 |
+
"\n💬 Generated from user requirements via chat interface"
|
| 1557 |
+
)
|
| 1558 |
+
return pipeline_summary
|
| 1559 |
+
elif implementation_result["status"] == "warning":
|
| 1560 |
+
pipeline_summary += (
|
| 1561 |
+
f"\n⚠️ Code implementation: {implementation_result['message']}"
|
| 1562 |
+
)
|
| 1563 |
+
return pipeline_summary
|
| 1564 |
+
else:
|
| 1565 |
+
pipeline_summary += (
|
| 1566 |
+
f"\n❌ Code implementation failed: {implementation_result['message']}"
|
| 1567 |
+
)
|
| 1568 |
+
return pipeline_summary
|
| 1569 |
+
|
| 1570 |
+
except Exception as e:
|
| 1571 |
+
print(f"Error in execute_chat_based_planning_pipeline: {e}")
|
| 1572 |
+
raise e
|
projects/ui/DeepCode/workflows/code_implementation_workflow.py
ADDED
|
@@ -0,0 +1,993 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Paper Code Implementation Workflow - MCP-compliant Iterative Development
|
| 3 |
+
|
| 4 |
+
Features:
|
| 5 |
+
1. File Tree Creation
|
| 6 |
+
2. Code Implementation - Based on aisi-basic-agent iterative development
|
| 7 |
+
|
| 8 |
+
MCP Architecture:
|
| 9 |
+
- MCP Server: tools/code_implementation_server.py
|
| 10 |
+
- MCP Client: Called through mcp_agent framework
|
| 11 |
+
- Configuration: mcp_agent.config.yaml
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import asyncio
|
| 15 |
+
import json
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import time
|
| 20 |
+
import yaml
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from typing import Dict, Any, Optional, List
|
| 23 |
+
|
| 24 |
+
# MCP Agent imports
|
| 25 |
+
from mcp_agent.agents.agent import Agent
|
| 26 |
+
|
| 27 |
+
# Local imports
|
| 28 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 29 |
+
from prompts.code_prompts import STRUCTURE_GENERATOR_PROMPT
|
| 30 |
+
from prompts.code_prompts import (
|
| 31 |
+
GENERAL_CODE_IMPLEMENTATION_SYSTEM_PROMPT,
|
| 32 |
+
)
|
| 33 |
+
from workflows.agents import CodeImplementationAgent
|
| 34 |
+
from workflows.agents.memory_agent_concise import ConciseMemoryAgent
|
| 35 |
+
from config.mcp_tool_definitions import get_mcp_tools
|
| 36 |
+
from utils.llm_utils import get_preferred_llm_class, get_default_models
|
| 37 |
+
# DialogueLogger removed - no longer needed
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class CodeImplementationWorkflow:
|
| 41 |
+
"""
|
| 42 |
+
Paper Code Implementation Workflow Manager
|
| 43 |
+
|
| 44 |
+
Uses standard MCP architecture:
|
| 45 |
+
1. Connect to code-implementation server via MCP client
|
| 46 |
+
2. Use MCP protocol for tool calls
|
| 47 |
+
3. Support workspace management and operation history tracking
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
# ==================== 1. Class Initialization and Configuration (Infrastructure Layer) ====================
|
| 51 |
+
|
| 52 |
+
def __init__(self, config_path: str = "mcp_agent.secrets.yaml"):
|
| 53 |
+
"""Initialize workflow with configuration"""
|
| 54 |
+
self.config_path = config_path
|
| 55 |
+
self.api_config = self._load_api_config()
|
| 56 |
+
self.default_models = get_default_models("mcp_agent.config.yaml")
|
| 57 |
+
self.logger = self._create_logger()
|
| 58 |
+
self.mcp_agent = None
|
| 59 |
+
self.enable_read_tools = (
|
| 60 |
+
True # Default value, will be overridden by run_workflow parameter
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
def _load_api_config(self) -> Dict[str, Any]:
|
| 64 |
+
"""Load API configuration from YAML file"""
|
| 65 |
+
try:
|
| 66 |
+
with open(self.config_path, "r", encoding="utf-8") as f:
|
| 67 |
+
return yaml.safe_load(f)
|
| 68 |
+
except Exception as e:
|
| 69 |
+
raise Exception(f"Failed to load API config: {e}")
|
| 70 |
+
|
| 71 |
+
def _create_logger(self) -> logging.Logger:
|
| 72 |
+
"""Create and configure logger"""
|
| 73 |
+
logger = logging.getLogger(__name__)
|
| 74 |
+
# Don't add handlers to child loggers - let them propagate to root
|
| 75 |
+
logger.setLevel(logging.INFO)
|
| 76 |
+
return logger
|
| 77 |
+
|
| 78 |
+
def _read_plan_file(self, plan_file_path: str) -> str:
|
| 79 |
+
"""Read implementation plan file"""
|
| 80 |
+
plan_path = Path(plan_file_path)
|
| 81 |
+
if not plan_path.exists():
|
| 82 |
+
raise FileNotFoundError(
|
| 83 |
+
f"Implementation plan file not found: {plan_file_path}"
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
with open(plan_path, "r", encoding="utf-8") as f:
|
| 87 |
+
return f.read()
|
| 88 |
+
|
| 89 |
+
def _check_file_tree_exists(self, target_directory: str) -> bool:
|
| 90 |
+
"""Check if file tree structure already exists"""
|
| 91 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 92 |
+
return os.path.exists(code_directory) and len(os.listdir(code_directory)) > 0
|
| 93 |
+
|
| 94 |
+
# ==================== 2. Public Interface Methods (External API Layer) ====================
|
| 95 |
+
|
| 96 |
+
async def run_workflow(
|
| 97 |
+
self,
|
| 98 |
+
plan_file_path: str,
|
| 99 |
+
target_directory: Optional[str] = None,
|
| 100 |
+
pure_code_mode: bool = False,
|
| 101 |
+
enable_read_tools: bool = True,
|
| 102 |
+
):
|
| 103 |
+
"""Run complete workflow - Main public interface"""
|
| 104 |
+
# Set the read tools configuration
|
| 105 |
+
self.enable_read_tools = enable_read_tools
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
plan_content = self._read_plan_file(plan_file_path)
|
| 109 |
+
|
| 110 |
+
if target_directory is None:
|
| 111 |
+
target_directory = str(Path(plan_file_path).parent)
|
| 112 |
+
|
| 113 |
+
# Calculate code directory for workspace alignment
|
| 114 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 115 |
+
|
| 116 |
+
self.logger.info("=" * 80)
|
| 117 |
+
self.logger.info("🚀 STARTING CODE IMPLEMENTATION WORKFLOW")
|
| 118 |
+
self.logger.info("=" * 80)
|
| 119 |
+
self.logger.info(f"📄 Plan file: {plan_file_path}")
|
| 120 |
+
self.logger.info(f"📂 Plan file parent: {target_directory}")
|
| 121 |
+
self.logger.info(f"🎯 Code directory (MCP workspace): {code_directory}")
|
| 122 |
+
self.logger.info(
|
| 123 |
+
f"⚙️ Read tools: {'ENABLED' if self.enable_read_tools else 'DISABLED'}"
|
| 124 |
+
)
|
| 125 |
+
self.logger.info("=" * 80)
|
| 126 |
+
|
| 127 |
+
results = {}
|
| 128 |
+
|
| 129 |
+
# Check if file tree exists
|
| 130 |
+
if self._check_file_tree_exists(target_directory):
|
| 131 |
+
self.logger.info("File tree exists, skipping creation")
|
| 132 |
+
results["file_tree"] = "Already exists, skipped creation"
|
| 133 |
+
else:
|
| 134 |
+
self.logger.info("Creating file tree...")
|
| 135 |
+
results["file_tree"] = await self.create_file_structure(
|
| 136 |
+
plan_content, target_directory
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
# Code implementation
|
| 140 |
+
if pure_code_mode:
|
| 141 |
+
self.logger.info("Starting pure code implementation...")
|
| 142 |
+
results["code_implementation"] = await self.implement_code_pure(
|
| 143 |
+
plan_content, target_directory, code_directory
|
| 144 |
+
)
|
| 145 |
+
else:
|
| 146 |
+
pass
|
| 147 |
+
|
| 148 |
+
self.logger.info("Workflow execution successful")
|
| 149 |
+
|
| 150 |
+
return {
|
| 151 |
+
"status": "success",
|
| 152 |
+
"plan_file": plan_file_path,
|
| 153 |
+
"target_directory": target_directory,
|
| 154 |
+
"code_directory": os.path.join(target_directory, "generate_code"),
|
| 155 |
+
"results": results,
|
| 156 |
+
"mcp_architecture": "standard",
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
except Exception as e:
|
| 160 |
+
self.logger.error(f"Workflow execution failed: {e}")
|
| 161 |
+
|
| 162 |
+
return {"status": "error", "message": str(e), "plan_file": plan_file_path}
|
| 163 |
+
finally:
|
| 164 |
+
await self._cleanup_mcp_agent()
|
| 165 |
+
|
| 166 |
+
async def create_file_structure(
|
| 167 |
+
self, plan_content: str, target_directory: str
|
| 168 |
+
) -> str:
|
| 169 |
+
"""Create file tree structure based on implementation plan"""
|
| 170 |
+
self.logger.info("Starting file tree creation...")
|
| 171 |
+
|
| 172 |
+
structure_agent = Agent(
|
| 173 |
+
name="StructureGeneratorAgent",
|
| 174 |
+
instruction=STRUCTURE_GENERATOR_PROMPT,
|
| 175 |
+
server_names=["command-executor"],
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
async with structure_agent:
|
| 179 |
+
creator = await structure_agent.attach_llm(
|
| 180 |
+
get_preferred_llm_class(self.config_path)
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
message = f"""Analyze the following implementation plan and generate shell commands to create the file tree structure.
|
| 184 |
+
|
| 185 |
+
Target Directory: {target_directory}/generate_code
|
| 186 |
+
|
| 187 |
+
Implementation Plan:
|
| 188 |
+
{plan_content}
|
| 189 |
+
|
| 190 |
+
Tasks:
|
| 191 |
+
1. Find the file tree structure in the implementation plan
|
| 192 |
+
2. Generate shell commands (mkdir -p, touch) to create that structure
|
| 193 |
+
3. Use the execute_commands tool to run the commands and create the file structure
|
| 194 |
+
|
| 195 |
+
Requirements:
|
| 196 |
+
- Use mkdir -p to create directories
|
| 197 |
+
- Use touch to create files
|
| 198 |
+
- Include __init__.py file for Python packages
|
| 199 |
+
- Use relative paths to the target directory
|
| 200 |
+
- Execute commands to actually create the file structure"""
|
| 201 |
+
|
| 202 |
+
result = await creator.generate_str(message=message)
|
| 203 |
+
self.logger.info("File tree structure creation completed")
|
| 204 |
+
return result
|
| 205 |
+
|
| 206 |
+
async def implement_code_pure(
|
| 207 |
+
self, plan_content: str, target_directory: str, code_directory: str = None
|
| 208 |
+
) -> str:
|
| 209 |
+
"""Pure code implementation - focus on code writing without testing"""
|
| 210 |
+
self.logger.info("Starting pure code implementation (no testing)...")
|
| 211 |
+
|
| 212 |
+
# Use provided code_directory or calculate it (for backwards compatibility)
|
| 213 |
+
if code_directory is None:
|
| 214 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 215 |
+
|
| 216 |
+
self.logger.info(f"🎯 Using code directory (MCP workspace): {code_directory}")
|
| 217 |
+
|
| 218 |
+
if not os.path.exists(code_directory):
|
| 219 |
+
raise FileNotFoundError(
|
| 220 |
+
"File tree structure not found, please run file tree creation first"
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
client, client_type = await self._initialize_llm_client()
|
| 225 |
+
await self._initialize_mcp_agent(code_directory)
|
| 226 |
+
|
| 227 |
+
tools = self._prepare_mcp_tool_definitions()
|
| 228 |
+
system_message = GENERAL_CODE_IMPLEMENTATION_SYSTEM_PROMPT
|
| 229 |
+
messages = []
|
| 230 |
+
|
| 231 |
+
# implementation_message = f"""**TASK: Implement Research Paper Reproduction Code**
|
| 232 |
+
|
| 233 |
+
# You are implementing a complete, working codebase that reproduces the core algorithms, experiments, and methods described in a research paper. Your goal is to create functional code that can replicate the paper's key results and contributions.
|
| 234 |
+
|
| 235 |
+
# **What you need to do:**
|
| 236 |
+
# - Analyze the paper content and reproduction plan to understand requirements
|
| 237 |
+
# - Implement all core algorithms mentioned in the main body of the paper
|
| 238 |
+
# - Create the necessary components following the planned architecture
|
| 239 |
+
# - Test each component to ensure functionality
|
| 240 |
+
# - Integrate components into a cohesive, executable system
|
| 241 |
+
# - Focus on reproducing main contributions rather than appendix-only experiments
|
| 242 |
+
|
| 243 |
+
# **RESOURCES:**
|
| 244 |
+
# - **Paper & Reproduction Plan**: `{target_directory}/` (contains .md paper files and initial_plan.txt with detailed implementation guidance)
|
| 245 |
+
# - **Reference Code Indexes**: `{target_directory}/indexes/` (JSON files with implementation patterns from related codebases)
|
| 246 |
+
# - **Implementation Directory**: `{code_directory}/` (your working directory for all code files)
|
| 247 |
+
|
| 248 |
+
# **CURRENT OBJECTIVE:**
|
| 249 |
+
# Start by reading the reproduction plan (`{target_directory}/initial_plan.txt`) to understand the implementation strategy, then examine the paper content to identify the first priority component to implement. Use the search_code tool to find relevant reference implementations from the indexes directory (`{target_directory}/indexes/*.json`) before coding.
|
| 250 |
+
|
| 251 |
+
# ---
|
| 252 |
+
# **START:** Review the plan above and begin implementation."""
|
| 253 |
+
implementation_message = f"""**Task: Implement code based on the following reproduction plan**
|
| 254 |
+
|
| 255 |
+
**Code Reproduction Plan:**
|
| 256 |
+
{plan_content}
|
| 257 |
+
|
| 258 |
+
**Working Directory:** {code_directory}
|
| 259 |
+
|
| 260 |
+
**Current Objective:** Begin implementation by analyzing the plan structure, examining the current project layout, and implementing the first foundation file according to the plan's priority order."""
|
| 261 |
+
|
| 262 |
+
messages.append({"role": "user", "content": implementation_message})
|
| 263 |
+
|
| 264 |
+
result = await self._pure_code_implementation_loop(
|
| 265 |
+
client,
|
| 266 |
+
client_type,
|
| 267 |
+
system_message,
|
| 268 |
+
messages,
|
| 269 |
+
tools,
|
| 270 |
+
plan_content,
|
| 271 |
+
target_directory,
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
return result
|
| 275 |
+
|
| 276 |
+
finally:
|
| 277 |
+
await self._cleanup_mcp_agent()
|
| 278 |
+
|
| 279 |
+
# ==================== 3. Core Business Logic (Implementation Layer) ====================
|
| 280 |
+
|
| 281 |
+
async def _pure_code_implementation_loop(
|
| 282 |
+
self,
|
| 283 |
+
client,
|
| 284 |
+
client_type,
|
| 285 |
+
system_message,
|
| 286 |
+
messages,
|
| 287 |
+
tools,
|
| 288 |
+
plan_content,
|
| 289 |
+
target_directory,
|
| 290 |
+
):
|
| 291 |
+
"""Pure code implementation loop with memory optimization and phase consistency"""
|
| 292 |
+
max_iterations = 500
|
| 293 |
+
iteration = 0
|
| 294 |
+
start_time = time.time()
|
| 295 |
+
max_time = 2400 # 40 minutes
|
| 296 |
+
|
| 297 |
+
# Initialize specialized agents
|
| 298 |
+
code_agent = CodeImplementationAgent(
|
| 299 |
+
self.mcp_agent, self.logger, self.enable_read_tools
|
| 300 |
+
)
|
| 301 |
+
memory_agent = ConciseMemoryAgent(plan_content, self.logger, target_directory)
|
| 302 |
+
|
| 303 |
+
# Log read tools configuration
|
| 304 |
+
read_tools_status = "ENABLED" if self.enable_read_tools else "DISABLED"
|
| 305 |
+
self.logger.info(
|
| 306 |
+
f"🔧 Read tools (read_file, read_code_mem): {read_tools_status}"
|
| 307 |
+
)
|
| 308 |
+
if not self.enable_read_tools:
|
| 309 |
+
self.logger.info(
|
| 310 |
+
"🚫 No read mode: read_file and read_code_mem tools will be skipped"
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
# Connect code agent with memory agent for summary generation
|
| 314 |
+
# Note: Concise memory agent doesn't need LLM client for summary generation
|
| 315 |
+
code_agent.set_memory_agent(memory_agent, client, client_type)
|
| 316 |
+
|
| 317 |
+
# Initialize memory agent with iteration 0
|
| 318 |
+
memory_agent.start_new_round(iteration=0)
|
| 319 |
+
|
| 320 |
+
while iteration < max_iterations:
|
| 321 |
+
iteration += 1
|
| 322 |
+
elapsed_time = time.time() - start_time
|
| 323 |
+
|
| 324 |
+
if elapsed_time > max_time:
|
| 325 |
+
self.logger.warning(f"Time limit reached: {elapsed_time:.2f}s")
|
| 326 |
+
break
|
| 327 |
+
|
| 328 |
+
# # Test simplified memory approach if we have files implemented
|
| 329 |
+
# if iteration == 5 and code_agent.get_files_implemented_count() > 0:
|
| 330 |
+
# self.logger.info("🧪 Testing simplified memory approach...")
|
| 331 |
+
# test_results = await memory_agent.test_simplified_memory_approach()
|
| 332 |
+
# self.logger.info(f"Memory test results: {test_results}")
|
| 333 |
+
|
| 334 |
+
# self.logger.info(f"Pure code implementation iteration {iteration}: generating code")
|
| 335 |
+
|
| 336 |
+
messages = self._validate_messages(messages)
|
| 337 |
+
current_system_message = code_agent.get_system_prompt()
|
| 338 |
+
|
| 339 |
+
# Round logging removed
|
| 340 |
+
|
| 341 |
+
# Call LLM
|
| 342 |
+
response = await self._call_llm_with_tools(
|
| 343 |
+
client, client_type, current_system_message, messages, tools
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
response_content = response.get("content", "").strip()
|
| 347 |
+
if not response_content:
|
| 348 |
+
response_content = "Continue implementing code files..."
|
| 349 |
+
|
| 350 |
+
messages.append({"role": "assistant", "content": response_content})
|
| 351 |
+
|
| 352 |
+
# Handle tool calls
|
| 353 |
+
if response.get("tool_calls"):
|
| 354 |
+
tool_results = await code_agent.execute_tool_calls(
|
| 355 |
+
response["tool_calls"]
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# Record essential tool results in concise memory agent
|
| 359 |
+
for tool_call, tool_result in zip(response["tool_calls"], tool_results):
|
| 360 |
+
memory_agent.record_tool_result(
|
| 361 |
+
tool_name=tool_call["name"],
|
| 362 |
+
tool_input=tool_call["input"],
|
| 363 |
+
tool_result=tool_result.get("result"),
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
# NEW LOGIC: Check if write_file was called and trigger memory optimization immediately
|
| 367 |
+
|
| 368 |
+
# Determine guidance based on results
|
| 369 |
+
has_error = self._check_tool_results_for_errors(tool_results)
|
| 370 |
+
files_count = code_agent.get_files_implemented_count()
|
| 371 |
+
|
| 372 |
+
if has_error:
|
| 373 |
+
guidance = self._generate_error_guidance()
|
| 374 |
+
else:
|
| 375 |
+
guidance = self._generate_success_guidance(files_count)
|
| 376 |
+
|
| 377 |
+
compiled_response = self._compile_user_response(tool_results, guidance)
|
| 378 |
+
messages.append({"role": "user", "content": compiled_response})
|
| 379 |
+
|
| 380 |
+
# NEW LOGIC: Apply memory optimization immediately after write_file detection
|
| 381 |
+
if memory_agent.should_trigger_memory_optimization(
|
| 382 |
+
messages, code_agent.get_files_implemented_count()
|
| 383 |
+
):
|
| 384 |
+
# Memory optimization triggered
|
| 385 |
+
|
| 386 |
+
# Apply concise memory optimization
|
| 387 |
+
files_implemented_count = code_agent.get_files_implemented_count()
|
| 388 |
+
current_system_message = code_agent.get_system_prompt()
|
| 389 |
+
messages = memory_agent.apply_memory_optimization(
|
| 390 |
+
current_system_message, messages, files_implemented_count
|
| 391 |
+
)
|
| 392 |
+
|
| 393 |
+
# Memory optimization completed
|
| 394 |
+
|
| 395 |
+
else:
|
| 396 |
+
files_count = code_agent.get_files_implemented_count()
|
| 397 |
+
no_tools_guidance = self._generate_no_tools_guidance(files_count)
|
| 398 |
+
messages.append({"role": "user", "content": no_tools_guidance})
|
| 399 |
+
|
| 400 |
+
# Check for analysis loop and provide corrective guidance
|
| 401 |
+
if code_agent.is_in_analysis_loop():
|
| 402 |
+
analysis_loop_guidance = code_agent.get_analysis_loop_guidance()
|
| 403 |
+
messages.append({"role": "user", "content": analysis_loop_guidance})
|
| 404 |
+
self.logger.warning(
|
| 405 |
+
"Analysis loop detected and corrective guidance provided"
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
# Record file implementations in memory agent (for the current round)
|
| 409 |
+
for file_info in code_agent.get_implementation_summary()["completed_files"]:
|
| 410 |
+
memory_agent.record_file_implementation(file_info["file"])
|
| 411 |
+
|
| 412 |
+
# REMOVED: Old memory optimization logic - now happens immediately after write_file
|
| 413 |
+
# Memory optimization is now triggered immediately after write_file detection
|
| 414 |
+
|
| 415 |
+
# Start new round for next iteration, sync with workflow iteration
|
| 416 |
+
memory_agent.start_new_round(iteration=iteration)
|
| 417 |
+
|
| 418 |
+
# Check completion
|
| 419 |
+
if any(
|
| 420 |
+
keyword in response_content.lower()
|
| 421 |
+
for keyword in [
|
| 422 |
+
"all files implemented",
|
| 423 |
+
"all phases completed",
|
| 424 |
+
"reproduction plan fully implemented",
|
| 425 |
+
"all code of repo implementation complete",
|
| 426 |
+
]
|
| 427 |
+
):
|
| 428 |
+
self.logger.info("Code implementation declared complete")
|
| 429 |
+
break
|
| 430 |
+
|
| 431 |
+
# Emergency trim if too long
|
| 432 |
+
if len(messages) > 50:
|
| 433 |
+
self.logger.warning(
|
| 434 |
+
"Emergency message trim - applying concise memory optimization"
|
| 435 |
+
)
|
| 436 |
+
|
| 437 |
+
current_system_message = code_agent.get_system_prompt()
|
| 438 |
+
files_implemented_count = code_agent.get_files_implemented_count()
|
| 439 |
+
messages = memory_agent.apply_memory_optimization(
|
| 440 |
+
current_system_message, messages, files_implemented_count
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
return await self._generate_pure_code_final_report_with_concise_agents(
|
| 444 |
+
iteration, time.time() - start_time, code_agent, memory_agent
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# ==================== 4. MCP Agent and LLM Communication Management (Communication Layer) ====================
|
| 448 |
+
|
| 449 |
+
async def _initialize_mcp_agent(self, code_directory: str):
|
| 450 |
+
"""Initialize MCP agent and connect to code-implementation server"""
|
| 451 |
+
try:
|
| 452 |
+
self.mcp_agent = Agent(
|
| 453 |
+
name="CodeImplementationAgent",
|
| 454 |
+
instruction="You are a code implementation assistant, using MCP tools to implement paper code replication.",
|
| 455 |
+
server_names=["code-implementation", "code-reference-indexer"],
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
await self.mcp_agent.__aenter__()
|
| 459 |
+
llm = await self.mcp_agent.attach_llm(
|
| 460 |
+
get_preferred_llm_class(self.config_path)
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
# Set workspace to the target code directory
|
| 464 |
+
workspace_result = await self.mcp_agent.call_tool(
|
| 465 |
+
"set_workspace", {"workspace_path": code_directory}
|
| 466 |
+
)
|
| 467 |
+
self.logger.info(f"Workspace setup result: {workspace_result}")
|
| 468 |
+
|
| 469 |
+
return llm
|
| 470 |
+
|
| 471 |
+
except Exception as e:
|
| 472 |
+
self.logger.error(f"Failed to initialize MCP agent: {e}")
|
| 473 |
+
if self.mcp_agent:
|
| 474 |
+
try:
|
| 475 |
+
await self.mcp_agent.__aexit__(None, None, None)
|
| 476 |
+
except Exception:
|
| 477 |
+
pass
|
| 478 |
+
self.mcp_agent = None
|
| 479 |
+
raise
|
| 480 |
+
|
| 481 |
+
async def _cleanup_mcp_agent(self):
|
| 482 |
+
"""Clean up MCP agent resources"""
|
| 483 |
+
if self.mcp_agent:
|
| 484 |
+
try:
|
| 485 |
+
await self.mcp_agent.__aexit__(None, None, None)
|
| 486 |
+
self.logger.info("MCP agent connection closed")
|
| 487 |
+
except Exception as e:
|
| 488 |
+
self.logger.warning(f"Error closing MCP agent: {e}")
|
| 489 |
+
finally:
|
| 490 |
+
self.mcp_agent = None
|
| 491 |
+
|
| 492 |
+
async def _initialize_llm_client(self):
|
| 493 |
+
"""Initialize LLM client (Anthropic or OpenAI) based on API key availability"""
|
| 494 |
+
# Check which API has available key and try that first
|
| 495 |
+
anthropic_key = self.api_config.get("anthropic", {}).get("api_key", "")
|
| 496 |
+
openai_key = self.api_config.get("openai", {}).get("api_key", "")
|
| 497 |
+
|
| 498 |
+
# Try Anthropic API first if key is available
|
| 499 |
+
if anthropic_key and anthropic_key.strip():
|
| 500 |
+
try:
|
| 501 |
+
from anthropic import AsyncAnthropic
|
| 502 |
+
|
| 503 |
+
client = AsyncAnthropic(api_key=anthropic_key)
|
| 504 |
+
# Test connection with default model from config
|
| 505 |
+
await client.messages.create(
|
| 506 |
+
model=self.default_models["anthropic"],
|
| 507 |
+
max_tokens=20,
|
| 508 |
+
messages=[{"role": "user", "content": "test"}],
|
| 509 |
+
)
|
| 510 |
+
self.logger.info(
|
| 511 |
+
f"Using Anthropic API with model: {self.default_models['anthropic']}"
|
| 512 |
+
)
|
| 513 |
+
return client, "anthropic"
|
| 514 |
+
except Exception as e:
|
| 515 |
+
self.logger.warning(f"Anthropic API unavailable: {e}")
|
| 516 |
+
|
| 517 |
+
# Try OpenAI API if Anthropic failed or key not available
|
| 518 |
+
if openai_key and openai_key.strip():
|
| 519 |
+
try:
|
| 520 |
+
from openai import AsyncOpenAI
|
| 521 |
+
|
| 522 |
+
# Handle custom base_url if specified
|
| 523 |
+
openai_config = self.api_config.get("openai", {})
|
| 524 |
+
base_url = openai_config.get("base_url")
|
| 525 |
+
|
| 526 |
+
if base_url:
|
| 527 |
+
client = AsyncOpenAI(api_key=openai_key, base_url=base_url)
|
| 528 |
+
else:
|
| 529 |
+
client = AsyncOpenAI(api_key=openai_key)
|
| 530 |
+
|
| 531 |
+
# Test connection with default model from config
|
| 532 |
+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
|
| 533 |
+
try:
|
| 534 |
+
await client.chat.completions.create(
|
| 535 |
+
model=self.default_models["openai"],
|
| 536 |
+
max_tokens=20,
|
| 537 |
+
messages=[{"role": "user", "content": "test"}],
|
| 538 |
+
)
|
| 539 |
+
except Exception as e:
|
| 540 |
+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
|
| 541 |
+
# Retry with max_completion_tokens for models that require it
|
| 542 |
+
await client.chat.completions.create(
|
| 543 |
+
model=self.default_models["openai"],
|
| 544 |
+
max_completion_tokens=20,
|
| 545 |
+
messages=[{"role": "user", "content": "test"}],
|
| 546 |
+
)
|
| 547 |
+
else:
|
| 548 |
+
raise
|
| 549 |
+
self.logger.info(
|
| 550 |
+
f"Using OpenAI API with model: {self.default_models['openai']}"
|
| 551 |
+
)
|
| 552 |
+
if base_url:
|
| 553 |
+
self.logger.info(f"Using custom base URL: {base_url}")
|
| 554 |
+
return client, "openai"
|
| 555 |
+
except Exception as e:
|
| 556 |
+
self.logger.warning(f"OpenAI API unavailable: {e}")
|
| 557 |
+
|
| 558 |
+
raise ValueError(
|
| 559 |
+
"No available LLM API - please check your API keys in configuration"
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
async def _call_llm_with_tools(
|
| 563 |
+
self, client, client_type, system_message, messages, tools, max_tokens=8192
|
| 564 |
+
):
|
| 565 |
+
"""Call LLM with tools"""
|
| 566 |
+
try:
|
| 567 |
+
if client_type == "anthropic":
|
| 568 |
+
return await self._call_anthropic_with_tools(
|
| 569 |
+
client, system_message, messages, tools, max_tokens
|
| 570 |
+
)
|
| 571 |
+
elif client_type == "openai":
|
| 572 |
+
return await self._call_openai_with_tools(
|
| 573 |
+
client, system_message, messages, tools, max_tokens
|
| 574 |
+
)
|
| 575 |
+
else:
|
| 576 |
+
raise ValueError(f"Unsupported client type: {client_type}")
|
| 577 |
+
except Exception as e:
|
| 578 |
+
self.logger.error(f"LLM call failed: {e}")
|
| 579 |
+
raise
|
| 580 |
+
|
| 581 |
+
async def _call_anthropic_with_tools(
|
| 582 |
+
self, client, system_message, messages, tools, max_tokens
|
| 583 |
+
):
|
| 584 |
+
"""Call Anthropic API"""
|
| 585 |
+
validated_messages = self._validate_messages(messages)
|
| 586 |
+
if not validated_messages:
|
| 587 |
+
validated_messages = [
|
| 588 |
+
{"role": "user", "content": "Please continue implementing code"}
|
| 589 |
+
]
|
| 590 |
+
|
| 591 |
+
try:
|
| 592 |
+
response = await client.messages.create(
|
| 593 |
+
model=self.default_models["anthropic"],
|
| 594 |
+
system=system_message,
|
| 595 |
+
messages=validated_messages,
|
| 596 |
+
tools=tools,
|
| 597 |
+
max_tokens=max_tokens,
|
| 598 |
+
temperature=0.2,
|
| 599 |
+
)
|
| 600 |
+
except Exception as e:
|
| 601 |
+
self.logger.error(f"Anthropic API call failed: {e}")
|
| 602 |
+
raise
|
| 603 |
+
|
| 604 |
+
content = ""
|
| 605 |
+
tool_calls = []
|
| 606 |
+
|
| 607 |
+
for block in response.content:
|
| 608 |
+
if block.type == "text":
|
| 609 |
+
content += block.text
|
| 610 |
+
elif block.type == "tool_use":
|
| 611 |
+
tool_calls.append(
|
| 612 |
+
{"id": block.id, "name": block.name, "input": block.input}
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
return {"content": content, "tool_calls": tool_calls}
|
| 616 |
+
|
| 617 |
+
async def _call_openai_with_tools(
|
| 618 |
+
self, client, system_message, messages, tools, max_tokens
|
| 619 |
+
):
|
| 620 |
+
"""Call OpenAI API"""
|
| 621 |
+
openai_tools = []
|
| 622 |
+
for tool in tools:
|
| 623 |
+
openai_tools.append(
|
| 624 |
+
{
|
| 625 |
+
"type": "function",
|
| 626 |
+
"function": {
|
| 627 |
+
"name": tool["name"],
|
| 628 |
+
"description": tool["description"],
|
| 629 |
+
"parameters": tool["input_schema"],
|
| 630 |
+
},
|
| 631 |
+
}
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
openai_messages = [{"role": "system", "content": system_message}]
|
| 635 |
+
openai_messages.extend(messages)
|
| 636 |
+
|
| 637 |
+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
|
| 638 |
+
try:
|
| 639 |
+
response = await client.chat.completions.create(
|
| 640 |
+
model=self.default_models["openai"],
|
| 641 |
+
messages=openai_messages,
|
| 642 |
+
tools=openai_tools if openai_tools else None,
|
| 643 |
+
max_tokens=max_tokens,
|
| 644 |
+
temperature=0.2,
|
| 645 |
+
)
|
| 646 |
+
except Exception as e:
|
| 647 |
+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
|
| 648 |
+
# Retry with max_completion_tokens for models that require it
|
| 649 |
+
response = await client.chat.completions.create(
|
| 650 |
+
model=self.default_models["openai"],
|
| 651 |
+
messages=openai_messages,
|
| 652 |
+
tools=openai_tools if openai_tools else None,
|
| 653 |
+
max_completion_tokens=max_tokens,
|
| 654 |
+
)
|
| 655 |
+
else:
|
| 656 |
+
raise
|
| 657 |
+
|
| 658 |
+
message = response.choices[0].message
|
| 659 |
+
content = message.content or ""
|
| 660 |
+
|
| 661 |
+
tool_calls = []
|
| 662 |
+
if message.tool_calls:
|
| 663 |
+
for tool_call in message.tool_calls:
|
| 664 |
+
tool_calls.append(
|
| 665 |
+
{
|
| 666 |
+
"id": tool_call.id,
|
| 667 |
+
"name": tool_call.function.name,
|
| 668 |
+
"input": json.loads(tool_call.function.arguments),
|
| 669 |
+
}
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
return {"content": content, "tool_calls": tool_calls}
|
| 673 |
+
|
| 674 |
+
# ==================== 5. Tools and Utility Methods (Utility Layer) ====================
|
| 675 |
+
|
| 676 |
+
def _validate_messages(self, messages: List[Dict]) -> List[Dict]:
|
| 677 |
+
"""Validate and clean message list"""
|
| 678 |
+
valid_messages = []
|
| 679 |
+
for msg in messages:
|
| 680 |
+
content = msg.get("content", "").strip()
|
| 681 |
+
if content:
|
| 682 |
+
valid_messages.append(
|
| 683 |
+
{"role": msg.get("role", "user"), "content": content}
|
| 684 |
+
)
|
| 685 |
+
else:
|
| 686 |
+
self.logger.warning(f"Skipping empty message: {msg}")
|
| 687 |
+
return valid_messages
|
| 688 |
+
|
| 689 |
+
def _prepare_mcp_tool_definitions(self) -> List[Dict[str, Any]]:
|
| 690 |
+
"""Prepare tool definitions in Anthropic API standard format"""
|
| 691 |
+
return get_mcp_tools("code_implementation")
|
| 692 |
+
|
| 693 |
+
def _check_tool_results_for_errors(self, tool_results: List[Dict]) -> bool:
|
| 694 |
+
"""Check tool results for errors"""
|
| 695 |
+
for result in tool_results:
|
| 696 |
+
try:
|
| 697 |
+
if hasattr(result["result"], "content") and result["result"].content:
|
| 698 |
+
content_text = result["result"].content[0].text
|
| 699 |
+
parsed_result = json.loads(content_text)
|
| 700 |
+
if parsed_result.get("status") == "error":
|
| 701 |
+
return True
|
| 702 |
+
elif isinstance(result["result"], str):
|
| 703 |
+
if "error" in result["result"].lower():
|
| 704 |
+
return True
|
| 705 |
+
except (json.JSONDecodeError, AttributeError, IndexError):
|
| 706 |
+
result_str = str(result["result"])
|
| 707 |
+
if "error" in result_str.lower():
|
| 708 |
+
return True
|
| 709 |
+
return False
|
| 710 |
+
|
| 711 |
+
# ==================== 6. User Interaction and Feedback (Interaction Layer) ====================
|
| 712 |
+
|
| 713 |
+
def _generate_success_guidance(self, files_count: int) -> str:
|
| 714 |
+
"""Generate concise success guidance for continuing implementation"""
|
| 715 |
+
return f"""✅ File implementation completed successfully!
|
| 716 |
+
|
| 717 |
+
📊 **Progress Status:** {files_count} files implemented
|
| 718 |
+
|
| 719 |
+
🎯 **Next Action:** Check if ALL files from the reproduction plan are implemented.
|
| 720 |
+
|
| 721 |
+
⚡ **Decision Process:**
|
| 722 |
+
1. **If ALL files are implemented:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 723 |
+
2. **If MORE files need implementation:** Continue with dependency-aware workflow:
|
| 724 |
+
- **Start with `read_code_mem`** to understand existing implementations and dependencies
|
| 725 |
+
- **Then `write_file`** to implement the new component
|
| 726 |
+
- **Finally: Test** if needed
|
| 727 |
+
|
| 728 |
+
💡 **Key Point:** Always verify completion status before continuing with new file creation."""
|
| 729 |
+
|
| 730 |
+
def _generate_error_guidance(self) -> str:
|
| 731 |
+
"""Generate error guidance for handling issues"""
|
| 732 |
+
return """❌ Error detected during file implementation.
|
| 733 |
+
|
| 734 |
+
🔧 **Action Required:**
|
| 735 |
+
1. Review the error details above
|
| 736 |
+
2. Fix the identified issue
|
| 737 |
+
3. **Check if ALL files from the reproduction plan are implemented:**
|
| 738 |
+
- **If YES:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 739 |
+
- **If NO:** Continue with proper development cycle for next file:
|
| 740 |
+
- **Start with `read_code_mem`** to understand existing implementations
|
| 741 |
+
- **Then `write_file`** to implement properly
|
| 742 |
+
- **Test** if needed
|
| 743 |
+
4. Ensure proper error handling in future implementations
|
| 744 |
+
|
| 745 |
+
💡 **Remember:** Always verify if all planned files are implemented before continuing with new file creation."""
|
| 746 |
+
|
| 747 |
+
def _generate_no_tools_guidance(self, files_count: int) -> str:
|
| 748 |
+
"""Generate concise guidance when no tools are called"""
|
| 749 |
+
return f"""⚠️ No tool calls detected in your response.
|
| 750 |
+
|
| 751 |
+
📊 **Current Progress:** {files_count} files implemented
|
| 752 |
+
|
| 753 |
+
🚨 **Action Required:** You must use tools. **FIRST check if ALL files from the reproduction plan are implemented:**
|
| 754 |
+
|
| 755 |
+
⚡ **Decision Process:**
|
| 756 |
+
1. **If ALL files are implemented:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 757 |
+
2. **If MORE files need implementation:** Follow the development cycle:
|
| 758 |
+
- **Start with `read_code_mem`** to understand existing implementations
|
| 759 |
+
- **Then `write_file`** to implement the new component
|
| 760 |
+
- **Finally: Test** if needed
|
| 761 |
+
|
| 762 |
+
🚨 **Critical:** Always verify completion status first, then use appropriate tools - not just explanations!"""
|
| 763 |
+
|
| 764 |
+
def _compile_user_response(self, tool_results: List[Dict], guidance: str) -> str:
|
| 765 |
+
"""Compile tool results and guidance into a single user response"""
|
| 766 |
+
response_parts = []
|
| 767 |
+
|
| 768 |
+
if tool_results:
|
| 769 |
+
response_parts.append("🔧 **Tool Execution Results:**")
|
| 770 |
+
for tool_result in tool_results:
|
| 771 |
+
tool_name = tool_result["tool_name"]
|
| 772 |
+
result_content = tool_result["result"]
|
| 773 |
+
response_parts.append(
|
| 774 |
+
f"```\nTool: {tool_name}\nResult: {result_content}\n```"
|
| 775 |
+
)
|
| 776 |
+
|
| 777 |
+
if guidance:
|
| 778 |
+
response_parts.append("\n" + guidance)
|
| 779 |
+
|
| 780 |
+
return "\n\n".join(response_parts)
|
| 781 |
+
|
| 782 |
+
# ==================== 7. Reporting and Output (Output Layer) ====================
|
| 783 |
+
|
| 784 |
+
async def _generate_pure_code_final_report_with_concise_agents(
|
| 785 |
+
self,
|
| 786 |
+
iterations: int,
|
| 787 |
+
elapsed_time: float,
|
| 788 |
+
code_agent: CodeImplementationAgent,
|
| 789 |
+
memory_agent: ConciseMemoryAgent,
|
| 790 |
+
):
|
| 791 |
+
"""Generate final report using concise agent statistics"""
|
| 792 |
+
try:
|
| 793 |
+
code_stats = code_agent.get_implementation_statistics()
|
| 794 |
+
memory_stats = memory_agent.get_memory_statistics(
|
| 795 |
+
code_stats["files_implemented_count"]
|
| 796 |
+
)
|
| 797 |
+
|
| 798 |
+
if self.mcp_agent:
|
| 799 |
+
history_result = await self.mcp_agent.call_tool(
|
| 800 |
+
"get_operation_history", {"last_n": 30}
|
| 801 |
+
)
|
| 802 |
+
history_data = (
|
| 803 |
+
json.loads(history_result)
|
| 804 |
+
if isinstance(history_result, str)
|
| 805 |
+
else history_result
|
| 806 |
+
)
|
| 807 |
+
else:
|
| 808 |
+
history_data = {"total_operations": 0, "history": []}
|
| 809 |
+
|
| 810 |
+
write_operations = 0
|
| 811 |
+
files_created = []
|
| 812 |
+
if "history" in history_data:
|
| 813 |
+
for item in history_data["history"]:
|
| 814 |
+
if item.get("action") == "write_file":
|
| 815 |
+
write_operations += 1
|
| 816 |
+
file_path = item.get("details", {}).get("file_path", "unknown")
|
| 817 |
+
files_created.append(file_path)
|
| 818 |
+
|
| 819 |
+
report = f"""
|
| 820 |
+
# Pure Code Implementation Completion Report (Write-File-Based Memory Mode)
|
| 821 |
+
|
| 822 |
+
## Execution Summary
|
| 823 |
+
- Implementation iterations: {iterations}
|
| 824 |
+
- Total elapsed time: {elapsed_time:.2f} seconds
|
| 825 |
+
- Files implemented: {code_stats['total_files_implemented']}
|
| 826 |
+
- File write operations: {write_operations}
|
| 827 |
+
- Total MCP operations: {history_data.get('total_operations', 0)}
|
| 828 |
+
|
| 829 |
+
## Read Tools Configuration
|
| 830 |
+
- Read tools enabled: {code_stats['read_tools_status']['read_tools_enabled']}
|
| 831 |
+
- Status: {code_stats['read_tools_status']['status']}
|
| 832 |
+
- Tools affected: {', '.join(code_stats['read_tools_status']['tools_affected'])}
|
| 833 |
+
|
| 834 |
+
## Agent Performance
|
| 835 |
+
### Code Implementation Agent
|
| 836 |
+
- Files tracked: {code_stats['files_implemented_count']}
|
| 837 |
+
- Technical decisions: {code_stats['technical_decisions_count']}
|
| 838 |
+
- Constraints tracked: {code_stats['constraints_count']}
|
| 839 |
+
- Architecture notes: {code_stats['architecture_notes_count']}
|
| 840 |
+
- Dependency analysis performed: {code_stats['dependency_analysis_count']}
|
| 841 |
+
- Files read for dependencies: {code_stats['files_read_for_dependencies']}
|
| 842 |
+
- Last summary triggered at file count: {code_stats['last_summary_file_count']}
|
| 843 |
+
|
| 844 |
+
### Concise Memory Agent (Write-File-Based)
|
| 845 |
+
- Last write_file detected: {memory_stats['last_write_file_detected']}
|
| 846 |
+
- Should clear memory next: {memory_stats['should_clear_memory_next']}
|
| 847 |
+
- Files implemented count: {memory_stats['implemented_files_tracked']}
|
| 848 |
+
- Current round: {memory_stats['current_round']}
|
| 849 |
+
- Concise mode active: {memory_stats['concise_mode_active']}
|
| 850 |
+
- Current round tool results: {memory_stats['current_round_tool_results']}
|
| 851 |
+
- Essential tools recorded: {memory_stats['essential_tools_recorded']}
|
| 852 |
+
|
| 853 |
+
## Files Created
|
| 854 |
+
"""
|
| 855 |
+
for file_path in files_created[-20:]:
|
| 856 |
+
report += f"- {file_path}\n"
|
| 857 |
+
|
| 858 |
+
if len(files_created) > 20:
|
| 859 |
+
report += f"... and {len(files_created) - 20} more files\n"
|
| 860 |
+
|
| 861 |
+
report += """
|
| 862 |
+
## Architecture Features
|
| 863 |
+
✅ WRITE-FILE-BASED Memory Agent - Clear after each file generation
|
| 864 |
+
✅ After write_file: Clear history → Keep system prompt + initial plan + tool results
|
| 865 |
+
✅ Tool accumulation: read_code_mem, read_file, search_reference_code until next write_file
|
| 866 |
+
✅ Clean memory cycle: write_file → clear → accumulate → write_file → clear
|
| 867 |
+
✅ Essential tool recording with write_file detection
|
| 868 |
+
✅ Specialized agent separation for clean code organization
|
| 869 |
+
✅ MCP-compliant tool execution
|
| 870 |
+
✅ Production-grade code with comprehensive type hints
|
| 871 |
+
✅ Intelligent dependency analysis and file reading
|
| 872 |
+
✅ Automated read_file usage for implementation context
|
| 873 |
+
✅ Eliminates conversation clutter between file generations
|
| 874 |
+
✅ Focused memory for efficient next file generation
|
| 875 |
+
"""
|
| 876 |
+
return report
|
| 877 |
+
|
| 878 |
+
except Exception as e:
|
| 879 |
+
self.logger.error(f"Failed to generate final report: {e}")
|
| 880 |
+
return f"Failed to generate final report: {str(e)}"
|
| 881 |
+
|
| 882 |
+
|
| 883 |
+
async def main():
|
| 884 |
+
"""Main function for running the workflow"""
|
| 885 |
+
# Configure root logger carefully to avoid duplicates
|
| 886 |
+
root_logger = logging.getLogger()
|
| 887 |
+
if not root_logger.handlers:
|
| 888 |
+
handler = logging.StreamHandler()
|
| 889 |
+
formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
|
| 890 |
+
handler.setFormatter(formatter)
|
| 891 |
+
root_logger.addHandler(handler)
|
| 892 |
+
root_logger.setLevel(logging.INFO)
|
| 893 |
+
|
| 894 |
+
workflow = CodeImplementationWorkflow()
|
| 895 |
+
|
| 896 |
+
print("=" * 60)
|
| 897 |
+
print("Code Implementation Workflow with UNIFIED Reference Indexer")
|
| 898 |
+
print("=" * 60)
|
| 899 |
+
print("Select mode:")
|
| 900 |
+
print("1. Test Code Reference Indexer Integration")
|
| 901 |
+
print("2. Run Full Implementation Workflow")
|
| 902 |
+
print("3. Run Implementation with Pure Code Mode")
|
| 903 |
+
print("4. Test Read Tools Configuration")
|
| 904 |
+
|
| 905 |
+
# mode_choice = input("Enter choice (1-4, default: 3): ").strip()
|
| 906 |
+
|
| 907 |
+
# For testing purposes, we'll run the test first
|
| 908 |
+
# if mode_choice == "4":
|
| 909 |
+
# print("Testing Read Tools Configuration...")
|
| 910 |
+
|
| 911 |
+
# # Create a test workflow normally
|
| 912 |
+
# test_workflow = CodeImplementationWorkflow()
|
| 913 |
+
|
| 914 |
+
# # Create a mock code agent for testing
|
| 915 |
+
# print("\n🧪 Testing with read tools DISABLED:")
|
| 916 |
+
# test_agent_disabled = CodeImplementationAgent(None, enable_read_tools=False)
|
| 917 |
+
# await test_agent_disabled.test_read_tools_configuration()
|
| 918 |
+
|
| 919 |
+
# print("\n🧪 Testing with read tools ENABLED:")
|
| 920 |
+
# test_agent_enabled = CodeImplementationAgent(None, enable_read_tools=True)
|
| 921 |
+
# await test_agent_enabled.test_read_tools_configuration()
|
| 922 |
+
|
| 923 |
+
# print("✅ Read tools configuration testing completed!")
|
| 924 |
+
# return
|
| 925 |
+
|
| 926 |
+
# print("Running Code Reference Indexer Integration Test...")
|
| 927 |
+
|
| 928 |
+
test_success = True
|
| 929 |
+
if test_success:
|
| 930 |
+
print("\n" + "=" * 60)
|
| 931 |
+
print("🎉 UNIFIED Code Reference Indexer Integration Test PASSED!")
|
| 932 |
+
print("🔧 Three-step process successfully merged into ONE tool")
|
| 933 |
+
print("=" * 60)
|
| 934 |
+
|
| 935 |
+
# Ask if user wants to continue with actual workflow
|
| 936 |
+
print("\nContinuing with workflow execution...")
|
| 937 |
+
|
| 938 |
+
plan_file = "/Users/lizongwei/Reasearch/DeepCode_Base/DeepCode/deepcode_lab/papers/1/initial_plan.txt"
|
| 939 |
+
# plan_file = "/data2/bjdwhzzh/project-hku/Code-Agent2.0/Code-Agent/deepcode-mcp/agent_folders/papers/1/initial_plan.txt"
|
| 940 |
+
target_directory = (
|
| 941 |
+
"/Users/lizongwei/Reasearch/DeepCode_Base/DeepCode/deepcode_lab/papers/1/"
|
| 942 |
+
)
|
| 943 |
+
print("Implementation Mode Selection:")
|
| 944 |
+
print("1. Pure Code Implementation Mode (Recommended)")
|
| 945 |
+
print("2. Iterative Implementation Mode")
|
| 946 |
+
|
| 947 |
+
pure_code_mode = True
|
| 948 |
+
mode_name = "Pure Code Implementation Mode with Memory Agent Architecture + Code Reference Indexer"
|
| 949 |
+
print(f"Using: {mode_name}")
|
| 950 |
+
|
| 951 |
+
# Configure read tools - modify this parameter to enable/disable read tools
|
| 952 |
+
enable_read_tools = (
|
| 953 |
+
True # Set to False to disable read_file and read_code_mem tools
|
| 954 |
+
)
|
| 955 |
+
read_tools_status = "ENABLED" if enable_read_tools else "DISABLED"
|
| 956 |
+
print(f"🔧 Read tools (read_file, read_code_mem): {read_tools_status}")
|
| 957 |
+
|
| 958 |
+
# NOTE: To test without read tools, change the line above to:
|
| 959 |
+
# enable_read_tools = False
|
| 960 |
+
|
| 961 |
+
result = await workflow.run_workflow(
|
| 962 |
+
plan_file,
|
| 963 |
+
target_directory=target_directory,
|
| 964 |
+
pure_code_mode=pure_code_mode,
|
| 965 |
+
enable_read_tools=enable_read_tools,
|
| 966 |
+
)
|
| 967 |
+
|
| 968 |
+
print("=" * 60)
|
| 969 |
+
print("Workflow Execution Results:")
|
| 970 |
+
print(f"Status: {result['status']}")
|
| 971 |
+
print(f"Mode: {mode_name}")
|
| 972 |
+
|
| 973 |
+
if result["status"] == "success":
|
| 974 |
+
print(f"Code Directory: {result['code_directory']}")
|
| 975 |
+
print(f"MCP Architecture: {result.get('mcp_architecture', 'unknown')}")
|
| 976 |
+
print("Execution completed!")
|
| 977 |
+
else:
|
| 978 |
+
print(f"Error Message: {result['message']}")
|
| 979 |
+
|
| 980 |
+
print("=" * 60)
|
| 981 |
+
print(
|
| 982 |
+
"✅ Using Standard MCP Architecture with Memory Agent + Code Reference Indexer"
|
| 983 |
+
)
|
| 984 |
+
|
| 985 |
+
else:
|
| 986 |
+
print("\n" + "=" * 60)
|
| 987 |
+
print("❌ Code Reference Indexer Integration Test FAILED!")
|
| 988 |
+
print("Please check the configuration and try again.")
|
| 989 |
+
print("=" * 60)
|
| 990 |
+
|
| 991 |
+
|
| 992 |
+
if __name__ == "__main__":
|
| 993 |
+
asyncio.run(main())
|
projects/ui/DeepCode/workflows/code_implementation_workflow_index.py
ADDED
|
@@ -0,0 +1,997 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Paper Code Implementation Workflow - MCP-compliant Iterative Development
|
| 3 |
+
|
| 4 |
+
Features:
|
| 5 |
+
1. File Tree Creation
|
| 6 |
+
2. Code Implementation - Based on aisi-basic-agent iterative development
|
| 7 |
+
|
| 8 |
+
MCP Architecture:
|
| 9 |
+
- MCP Server: tools/code_implementation_server.py
|
| 10 |
+
- MCP Client: Called through mcp_agent framework
|
| 11 |
+
- Configuration: mcp_agent.config.yaml
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import asyncio
|
| 15 |
+
import json
|
| 16 |
+
import logging
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import time
|
| 20 |
+
import yaml
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from typing import Dict, Any, Optional, List
|
| 23 |
+
|
| 24 |
+
# MCP Agent imports
|
| 25 |
+
from mcp_agent.agents.agent import Agent
|
| 26 |
+
|
| 27 |
+
# Local imports
|
| 28 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 29 |
+
from prompts.code_prompts import STRUCTURE_GENERATOR_PROMPT
|
| 30 |
+
from prompts.code_prompts import (
|
| 31 |
+
PURE_CODE_IMPLEMENTATION_SYSTEM_PROMPT_INDEX,
|
| 32 |
+
)
|
| 33 |
+
from workflows.agents import CodeImplementationAgent
|
| 34 |
+
from workflows.agents.memory_agent_concise import ConciseMemoryAgent
|
| 35 |
+
from config.mcp_tool_definitions_index import get_mcp_tools
|
| 36 |
+
from utils.llm_utils import get_preferred_llm_class, get_default_models
|
| 37 |
+
# DialogueLogger removed - no longer needed
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class CodeImplementationWorkflowWithIndex:
|
| 41 |
+
"""
|
| 42 |
+
Paper Code Implementation Workflow Manager with Code Reference Indexer
|
| 43 |
+
|
| 44 |
+
Uses standard MCP architecture with enhanced indexing capabilities:
|
| 45 |
+
1. Connect to code-implementation server via MCP client
|
| 46 |
+
2. Use MCP protocol for tool calls
|
| 47 |
+
3. Support workspace management and operation history tracking
|
| 48 |
+
4. Integrated code reference indexer for enhanced code understanding
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
# ==================== 1. Class Initialization and Configuration (Infrastructure Layer) ====================
|
| 52 |
+
|
| 53 |
+
def __init__(self, config_path: str = "mcp_agent.secrets.yaml"):
|
| 54 |
+
"""Initialize workflow with configuration"""
|
| 55 |
+
self.config_path = config_path
|
| 56 |
+
self.api_config = self._load_api_config()
|
| 57 |
+
self.default_models = get_default_models("mcp_agent.config.yaml")
|
| 58 |
+
self.logger = self._create_logger()
|
| 59 |
+
self.mcp_agent = None
|
| 60 |
+
self.enable_read_tools = (
|
| 61 |
+
True # Default value, will be overridden by run_workflow parameter
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
def _load_api_config(self) -> Dict[str, Any]:
|
| 65 |
+
"""Load API configuration from YAML file"""
|
| 66 |
+
try:
|
| 67 |
+
with open(self.config_path, "r", encoding="utf-8") as f:
|
| 68 |
+
return yaml.safe_load(f)
|
| 69 |
+
except Exception as e:
|
| 70 |
+
raise Exception(f"Failed to load API config: {e}")
|
| 71 |
+
|
| 72 |
+
def _create_logger(self) -> logging.Logger:
|
| 73 |
+
"""Create and configure logger"""
|
| 74 |
+
logger = logging.getLogger(__name__)
|
| 75 |
+
# Don't add handlers to child loggers - let them propagate to root
|
| 76 |
+
logger.setLevel(logging.INFO)
|
| 77 |
+
return logger
|
| 78 |
+
|
| 79 |
+
def _read_plan_file(self, plan_file_path: str) -> str:
|
| 80 |
+
"""Read implementation plan file"""
|
| 81 |
+
plan_path = Path(plan_file_path)
|
| 82 |
+
if not plan_path.exists():
|
| 83 |
+
raise FileNotFoundError(
|
| 84 |
+
f"Implementation plan file not found: {plan_file_path}"
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
with open(plan_path, "r", encoding="utf-8") as f:
|
| 88 |
+
return f.read()
|
| 89 |
+
|
| 90 |
+
def _check_file_tree_exists(self, target_directory: str) -> bool:
|
| 91 |
+
"""Check if file tree structure already exists"""
|
| 92 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 93 |
+
return os.path.exists(code_directory) and len(os.listdir(code_directory)) > 0
|
| 94 |
+
|
| 95 |
+
# ==================== 2. Public Interface Methods (External API Layer) ====================
|
| 96 |
+
|
| 97 |
+
async def run_workflow(
|
| 98 |
+
self,
|
| 99 |
+
plan_file_path: str,
|
| 100 |
+
target_directory: Optional[str] = None,
|
| 101 |
+
pure_code_mode: bool = False,
|
| 102 |
+
enable_read_tools: bool = True,
|
| 103 |
+
):
|
| 104 |
+
"""Run complete workflow - Main public interface"""
|
| 105 |
+
# Set the read tools configuration
|
| 106 |
+
self.enable_read_tools = enable_read_tools
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
plan_content = self._read_plan_file(plan_file_path)
|
| 110 |
+
|
| 111 |
+
if target_directory is None:
|
| 112 |
+
target_directory = str(Path(plan_file_path).parent)
|
| 113 |
+
|
| 114 |
+
# Calculate code directory for workspace alignment
|
| 115 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 116 |
+
|
| 117 |
+
self.logger.info("=" * 80)
|
| 118 |
+
self.logger.info("🚀 STARTING CODE IMPLEMENTATION WORKFLOW")
|
| 119 |
+
self.logger.info("=" * 80)
|
| 120 |
+
self.logger.info(f"📄 Plan file: {plan_file_path}")
|
| 121 |
+
self.logger.info(f"📂 Plan file parent: {target_directory}")
|
| 122 |
+
self.logger.info(f"🎯 Code directory (MCP workspace): {code_directory}")
|
| 123 |
+
self.logger.info(
|
| 124 |
+
f"⚙️ Read tools: {'ENABLED' if self.enable_read_tools else 'DISABLED'}"
|
| 125 |
+
)
|
| 126 |
+
self.logger.info("=" * 80)
|
| 127 |
+
|
| 128 |
+
results = {}
|
| 129 |
+
|
| 130 |
+
# Check if file tree exists
|
| 131 |
+
if self._check_file_tree_exists(target_directory):
|
| 132 |
+
self.logger.info("File tree exists, skipping creation")
|
| 133 |
+
results["file_tree"] = "Already exists, skipped creation"
|
| 134 |
+
else:
|
| 135 |
+
self.logger.info("Creating file tree...")
|
| 136 |
+
results["file_tree"] = await self.create_file_structure(
|
| 137 |
+
plan_content, target_directory
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Code implementation
|
| 141 |
+
if pure_code_mode:
|
| 142 |
+
self.logger.info("Starting pure code implementation...")
|
| 143 |
+
results["code_implementation"] = await self.implement_code_pure(
|
| 144 |
+
plan_content, target_directory, code_directory
|
| 145 |
+
)
|
| 146 |
+
else:
|
| 147 |
+
pass
|
| 148 |
+
|
| 149 |
+
self.logger.info("Workflow execution successful")
|
| 150 |
+
|
| 151 |
+
return {
|
| 152 |
+
"status": "success",
|
| 153 |
+
"plan_file": plan_file_path,
|
| 154 |
+
"target_directory": target_directory,
|
| 155 |
+
"code_directory": os.path.join(target_directory, "generate_code"),
|
| 156 |
+
"results": results,
|
| 157 |
+
"mcp_architecture": "standard",
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
except Exception as e:
|
| 161 |
+
self.logger.error(f"Workflow execution failed: {e}")
|
| 162 |
+
|
| 163 |
+
return {"status": "error", "message": str(e), "plan_file": plan_file_path}
|
| 164 |
+
finally:
|
| 165 |
+
await self._cleanup_mcp_agent()
|
| 166 |
+
|
| 167 |
+
async def create_file_structure(
|
| 168 |
+
self, plan_content: str, target_directory: str
|
| 169 |
+
) -> str:
|
| 170 |
+
"""Create file tree structure based on implementation plan"""
|
| 171 |
+
self.logger.info("Starting file tree creation...")
|
| 172 |
+
|
| 173 |
+
structure_agent = Agent(
|
| 174 |
+
name="StructureGeneratorAgent",
|
| 175 |
+
instruction=STRUCTURE_GENERATOR_PROMPT,
|
| 176 |
+
server_names=["command-executor"],
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
async with structure_agent:
|
| 180 |
+
creator = await structure_agent.attach_llm(
|
| 181 |
+
get_preferred_llm_class(self.config_path)
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
message = f"""Analyze the following implementation plan and generate shell commands to create the file tree structure.
|
| 185 |
+
|
| 186 |
+
Target Directory: {target_directory}/generate_code
|
| 187 |
+
|
| 188 |
+
Implementation Plan:
|
| 189 |
+
{plan_content}
|
| 190 |
+
|
| 191 |
+
Tasks:
|
| 192 |
+
1. Find the file tree structure in the implementation plan
|
| 193 |
+
2. Generate shell commands (mkdir -p, touch) to create that structure
|
| 194 |
+
3. Use the execute_commands tool to run the commands and create the file structure
|
| 195 |
+
|
| 196 |
+
Requirements:
|
| 197 |
+
- Use mkdir -p to create directories
|
| 198 |
+
- Use touch to create files
|
| 199 |
+
- Include __init__.py file for Python packages
|
| 200 |
+
- Use relative paths to the target directory
|
| 201 |
+
- Execute commands to actually create the file structure"""
|
| 202 |
+
|
| 203 |
+
result = await creator.generate_str(message=message)
|
| 204 |
+
self.logger.info("File tree structure creation completed")
|
| 205 |
+
return result
|
| 206 |
+
|
| 207 |
+
async def implement_code_pure(
|
| 208 |
+
self, plan_content: str, target_directory: str, code_directory: str = None
|
| 209 |
+
) -> str:
|
| 210 |
+
"""Pure code implementation - focus on code writing without testing"""
|
| 211 |
+
self.logger.info("Starting pure code implementation (no testing)...")
|
| 212 |
+
|
| 213 |
+
# Use provided code_directory or calculate it (for backwards compatibility)
|
| 214 |
+
if code_directory is None:
|
| 215 |
+
code_directory = os.path.join(target_directory, "generate_code")
|
| 216 |
+
|
| 217 |
+
self.logger.info(f"🎯 Using code directory (MCP workspace): {code_directory}")
|
| 218 |
+
|
| 219 |
+
if not os.path.exists(code_directory):
|
| 220 |
+
raise FileNotFoundError(
|
| 221 |
+
"File tree structure not found, please run file tree creation first"
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
try:
|
| 225 |
+
client, client_type = await self._initialize_llm_client()
|
| 226 |
+
await self._initialize_mcp_agent(code_directory)
|
| 227 |
+
|
| 228 |
+
tools = self._prepare_mcp_tool_definitions()
|
| 229 |
+
system_message = PURE_CODE_IMPLEMENTATION_SYSTEM_PROMPT_INDEX
|
| 230 |
+
messages = []
|
| 231 |
+
|
| 232 |
+
# implementation_message = f"""**TASK: Implement Research Paper Reproduction Code**
|
| 233 |
+
|
| 234 |
+
# You are implementing a complete, working codebase that reproduces the core algorithms, experiments, and methods described in a research paper. Your goal is to create functional code that can replicate the paper's key results and contributions.
|
| 235 |
+
|
| 236 |
+
# **What you need to do:**
|
| 237 |
+
# - Analyze the paper content and reproduction plan to understand requirements
|
| 238 |
+
# - Implement all core algorithms mentioned in the main body of the paper
|
| 239 |
+
# - Create the necessary components following the planned architecture
|
| 240 |
+
# - Test each component to ensure functionality
|
| 241 |
+
# - Integrate components into a cohesive, executable system
|
| 242 |
+
# - Focus on reproducing main contributions rather than appendix-only experiments
|
| 243 |
+
|
| 244 |
+
# **RESOURCES:**
|
| 245 |
+
# - **Paper & Reproduction Plan**: `{target_directory}/` (contains .md paper files and initial_plan.txt with detailed implementation guidance)
|
| 246 |
+
# - **Reference Code Indexes**: `{target_directory}/indexes/` (JSON files with implementation patterns from related codebases)
|
| 247 |
+
# - **Implementation Directory**: `{code_directory}/` (your working directory for all code files)
|
| 248 |
+
|
| 249 |
+
# **CURRENT OBJECTIVE:**
|
| 250 |
+
# Start by reading the reproduction plan (`{target_directory}/initial_plan.txt`) to understand the implementation strategy, then examine the paper content to identify the first priority component to implement. Use the search_code tool to find relevant reference implementations from the indexes directory (`{target_directory}/indexes/*.json`) before coding.
|
| 251 |
+
|
| 252 |
+
# ---
|
| 253 |
+
# **START:** Review the plan above and begin implementation."""
|
| 254 |
+
implementation_message = f"""**Task: Implement code based on the following reproduction plan**
|
| 255 |
+
|
| 256 |
+
**Code Reproduction Plan:**
|
| 257 |
+
{plan_content}
|
| 258 |
+
|
| 259 |
+
**Working Directory:** {code_directory}
|
| 260 |
+
|
| 261 |
+
**Current Objective:** Begin implementation by analyzing the plan structure, examining the current project layout, and implementing the first foundation file according to the plan's priority order."""
|
| 262 |
+
|
| 263 |
+
messages.append({"role": "user", "content": implementation_message})
|
| 264 |
+
|
| 265 |
+
result = await self._pure_code_implementation_loop(
|
| 266 |
+
client,
|
| 267 |
+
client_type,
|
| 268 |
+
system_message,
|
| 269 |
+
messages,
|
| 270 |
+
tools,
|
| 271 |
+
plan_content,
|
| 272 |
+
target_directory,
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
return result
|
| 276 |
+
|
| 277 |
+
finally:
|
| 278 |
+
await self._cleanup_mcp_agent()
|
| 279 |
+
|
| 280 |
+
# ==================== 3. Core Business Logic (Implementation Layer) ====================
|
| 281 |
+
|
| 282 |
+
async def _pure_code_implementation_loop(
|
| 283 |
+
self,
|
| 284 |
+
client,
|
| 285 |
+
client_type,
|
| 286 |
+
system_message,
|
| 287 |
+
messages,
|
| 288 |
+
tools,
|
| 289 |
+
plan_content,
|
| 290 |
+
target_directory,
|
| 291 |
+
):
|
| 292 |
+
"""Pure code implementation loop with memory optimization and phase consistency"""
|
| 293 |
+
max_iterations = 500
|
| 294 |
+
iteration = 0
|
| 295 |
+
start_time = time.time()
|
| 296 |
+
max_time = 2400 # 40 minutes
|
| 297 |
+
|
| 298 |
+
# Initialize specialized agents
|
| 299 |
+
code_agent = CodeImplementationAgent(
|
| 300 |
+
self.mcp_agent, self.logger, self.enable_read_tools
|
| 301 |
+
)
|
| 302 |
+
memory_agent = ConciseMemoryAgent(plan_content, self.logger, target_directory)
|
| 303 |
+
|
| 304 |
+
# Log read tools configuration
|
| 305 |
+
read_tools_status = "ENABLED" if self.enable_read_tools else "DISABLED"
|
| 306 |
+
self.logger.info(
|
| 307 |
+
f"🔧 Read tools (read_file, read_code_mem): {read_tools_status}"
|
| 308 |
+
)
|
| 309 |
+
if not self.enable_read_tools:
|
| 310 |
+
self.logger.info(
|
| 311 |
+
"🚫 No read mode: read_file and read_code_mem tools will be skipped"
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
# Connect code agent with memory agent for summary generation
|
| 315 |
+
# Note: Concise memory agent doesn't need LLM client for summary generation
|
| 316 |
+
code_agent.set_memory_agent(memory_agent, client, client_type)
|
| 317 |
+
|
| 318 |
+
# Initialize memory agent with iteration 0
|
| 319 |
+
memory_agent.start_new_round(iteration=0)
|
| 320 |
+
|
| 321 |
+
while iteration < max_iterations:
|
| 322 |
+
iteration += 1
|
| 323 |
+
elapsed_time = time.time() - start_time
|
| 324 |
+
|
| 325 |
+
if elapsed_time > max_time:
|
| 326 |
+
self.logger.warning(f"Time limit reached: {elapsed_time:.2f}s")
|
| 327 |
+
break
|
| 328 |
+
|
| 329 |
+
# # Test simplified memory approach if we have files implemented
|
| 330 |
+
# if iteration == 5 and code_agent.get_files_implemented_count() > 0:
|
| 331 |
+
# self.logger.info("🧪 Testing simplified memory approach...")
|
| 332 |
+
# test_results = await memory_agent.test_simplified_memory_approach()
|
| 333 |
+
# self.logger.info(f"Memory test results: {test_results}")
|
| 334 |
+
|
| 335 |
+
# self.logger.info(f"Pure code implementation iteration {iteration}: generating code")
|
| 336 |
+
|
| 337 |
+
messages = self._validate_messages(messages)
|
| 338 |
+
current_system_message = code_agent.get_system_prompt()
|
| 339 |
+
|
| 340 |
+
# Round logging removed
|
| 341 |
+
|
| 342 |
+
# Call LLM
|
| 343 |
+
response = await self._call_llm_with_tools(
|
| 344 |
+
client, client_type, current_system_message, messages, tools
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
response_content = response.get("content", "").strip()
|
| 348 |
+
if not response_content:
|
| 349 |
+
response_content = "Continue implementing code files..."
|
| 350 |
+
|
| 351 |
+
messages.append({"role": "assistant", "content": response_content})
|
| 352 |
+
|
| 353 |
+
# Handle tool calls
|
| 354 |
+
if response.get("tool_calls"):
|
| 355 |
+
tool_results = await code_agent.execute_tool_calls(
|
| 356 |
+
response["tool_calls"]
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
# Record essential tool results in concise memory agent
|
| 360 |
+
for tool_call, tool_result in zip(response["tool_calls"], tool_results):
|
| 361 |
+
memory_agent.record_tool_result(
|
| 362 |
+
tool_name=tool_call["name"],
|
| 363 |
+
tool_input=tool_call["input"],
|
| 364 |
+
tool_result=tool_result.get("result"),
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
# NEW LOGIC: Check if write_file was called and trigger memory optimization immediately
|
| 368 |
+
|
| 369 |
+
# Determine guidance based on results
|
| 370 |
+
has_error = self._check_tool_results_for_errors(tool_results)
|
| 371 |
+
files_count = code_agent.get_files_implemented_count()
|
| 372 |
+
|
| 373 |
+
if has_error:
|
| 374 |
+
guidance = self._generate_error_guidance()
|
| 375 |
+
else:
|
| 376 |
+
guidance = self._generate_success_guidance(files_count)
|
| 377 |
+
|
| 378 |
+
compiled_response = self._compile_user_response(tool_results, guidance)
|
| 379 |
+
messages.append({"role": "user", "content": compiled_response})
|
| 380 |
+
|
| 381 |
+
# NEW LOGIC: Apply memory optimization immediately after write_file detection
|
| 382 |
+
if memory_agent.should_trigger_memory_optimization(
|
| 383 |
+
messages, code_agent.get_files_implemented_count()
|
| 384 |
+
):
|
| 385 |
+
# Memory optimization triggered
|
| 386 |
+
|
| 387 |
+
# Apply concise memory optimization
|
| 388 |
+
files_implemented_count = code_agent.get_files_implemented_count()
|
| 389 |
+
current_system_message = code_agent.get_system_prompt()
|
| 390 |
+
messages = memory_agent.apply_memory_optimization(
|
| 391 |
+
current_system_message, messages, files_implemented_count
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# Memory optimization completed
|
| 395 |
+
|
| 396 |
+
else:
|
| 397 |
+
files_count = code_agent.get_files_implemented_count()
|
| 398 |
+
no_tools_guidance = self._generate_no_tools_guidance(files_count)
|
| 399 |
+
messages.append({"role": "user", "content": no_tools_guidance})
|
| 400 |
+
|
| 401 |
+
# Check for analysis loop and provide corrective guidance
|
| 402 |
+
if code_agent.is_in_analysis_loop():
|
| 403 |
+
analysis_loop_guidance = code_agent.get_analysis_loop_guidance()
|
| 404 |
+
messages.append({"role": "user", "content": analysis_loop_guidance})
|
| 405 |
+
self.logger.warning(
|
| 406 |
+
"Analysis loop detected and corrective guidance provided"
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
# Record file implementations in memory agent (for the current round)
|
| 410 |
+
for file_info in code_agent.get_implementation_summary()["completed_files"]:
|
| 411 |
+
memory_agent.record_file_implementation(file_info["file"])
|
| 412 |
+
|
| 413 |
+
# REMOVED: Old memory optimization logic - now happens immediately after write_file
|
| 414 |
+
# Memory optimization is now triggered immediately after write_file detection
|
| 415 |
+
|
| 416 |
+
# Start new round for next iteration, sync with workflow iteration
|
| 417 |
+
memory_agent.start_new_round(iteration=iteration)
|
| 418 |
+
|
| 419 |
+
# Check completion
|
| 420 |
+
if any(
|
| 421 |
+
keyword in response_content.lower()
|
| 422 |
+
for keyword in [
|
| 423 |
+
"all files implemented",
|
| 424 |
+
"all phases completed",
|
| 425 |
+
"reproduction plan fully implemented",
|
| 426 |
+
"all code of repo implementation complete",
|
| 427 |
+
]
|
| 428 |
+
):
|
| 429 |
+
self.logger.info("Code implementation declared complete")
|
| 430 |
+
break
|
| 431 |
+
|
| 432 |
+
# Emergency trim if too long
|
| 433 |
+
if len(messages) > 50:
|
| 434 |
+
self.logger.warning(
|
| 435 |
+
"Emergency message trim - applying concise memory optimization"
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
current_system_message = code_agent.get_system_prompt()
|
| 439 |
+
files_implemented_count = code_agent.get_files_implemented_count()
|
| 440 |
+
messages = memory_agent.apply_memory_optimization(
|
| 441 |
+
current_system_message, messages, files_implemented_count
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
return await self._generate_pure_code_final_report_with_concise_agents(
|
| 445 |
+
iteration, time.time() - start_time, code_agent, memory_agent
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
# ==================== 4. MCP Agent and LLM Communication Management (Communication Layer) ====================
|
| 449 |
+
|
| 450 |
+
async def _initialize_mcp_agent(self, code_directory: str):
|
| 451 |
+
"""Initialize MCP agent and connect to code-implementation server"""
|
| 452 |
+
try:
|
| 453 |
+
self.mcp_agent = Agent(
|
| 454 |
+
name="CodeImplementationAgent",
|
| 455 |
+
instruction="You are a code implementation assistant, using MCP tools to implement paper code replication.",
|
| 456 |
+
server_names=["code-implementation", "code-reference-indexer"],
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
await self.mcp_agent.__aenter__()
|
| 460 |
+
llm = await self.mcp_agent.attach_llm(
|
| 461 |
+
get_preferred_llm_class(self.config_path)
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
# Set workspace to the target code directory
|
| 465 |
+
workspace_result = await self.mcp_agent.call_tool(
|
| 466 |
+
"set_workspace", {"workspace_path": code_directory}
|
| 467 |
+
)
|
| 468 |
+
self.logger.info(f"Workspace setup result: {workspace_result}")
|
| 469 |
+
|
| 470 |
+
return llm
|
| 471 |
+
|
| 472 |
+
except Exception as e:
|
| 473 |
+
self.logger.error(f"Failed to initialize MCP agent: {e}")
|
| 474 |
+
if self.mcp_agent:
|
| 475 |
+
try:
|
| 476 |
+
await self.mcp_agent.__aexit__(None, None, None)
|
| 477 |
+
except Exception:
|
| 478 |
+
pass
|
| 479 |
+
self.mcp_agent = None
|
| 480 |
+
raise
|
| 481 |
+
|
| 482 |
+
async def _cleanup_mcp_agent(self):
|
| 483 |
+
"""Clean up MCP agent resources"""
|
| 484 |
+
if self.mcp_agent:
|
| 485 |
+
try:
|
| 486 |
+
await self.mcp_agent.__aexit__(None, None, None)
|
| 487 |
+
self.logger.info("MCP agent connection closed")
|
| 488 |
+
except Exception as e:
|
| 489 |
+
self.logger.warning(f"Error closing MCP agent: {e}")
|
| 490 |
+
finally:
|
| 491 |
+
self.mcp_agent = None
|
| 492 |
+
|
| 493 |
+
async def _initialize_llm_client(self):
|
| 494 |
+
"""Initialize LLM client (Anthropic or OpenAI) based on API key availability"""
|
| 495 |
+
# Check which API has available key and try that first
|
| 496 |
+
anthropic_key = self.api_config.get("anthropic", {}).get("api_key", "")
|
| 497 |
+
openai_key = self.api_config.get("openai", {}).get("api_key", "")
|
| 498 |
+
|
| 499 |
+
# Try Anthropic API first if key is available
|
| 500 |
+
if anthropic_key and anthropic_key.strip():
|
| 501 |
+
try:
|
| 502 |
+
from anthropic import AsyncAnthropic
|
| 503 |
+
|
| 504 |
+
client = AsyncAnthropic(api_key=anthropic_key)
|
| 505 |
+
# Test connection with default model from config
|
| 506 |
+
await client.messages.create(
|
| 507 |
+
model=self.default_models["anthropic"],
|
| 508 |
+
max_tokens=20,
|
| 509 |
+
messages=[{"role": "user", "content": "test"}],
|
| 510 |
+
)
|
| 511 |
+
self.logger.info(
|
| 512 |
+
f"Using Anthropic API with model: {self.default_models['anthropic']}"
|
| 513 |
+
)
|
| 514 |
+
return client, "anthropic"
|
| 515 |
+
except Exception as e:
|
| 516 |
+
self.logger.warning(f"Anthropic API unavailable: {e}")
|
| 517 |
+
|
| 518 |
+
# Try OpenAI API if Anthropic failed or key not available
|
| 519 |
+
if openai_key and openai_key.strip():
|
| 520 |
+
try:
|
| 521 |
+
from openai import AsyncOpenAI
|
| 522 |
+
|
| 523 |
+
# Handle custom base_url if specified
|
| 524 |
+
openai_config = self.api_config.get("openai", {})
|
| 525 |
+
base_url = openai_config.get("base_url")
|
| 526 |
+
|
| 527 |
+
if base_url:
|
| 528 |
+
client = AsyncOpenAI(api_key=openai_key, base_url=base_url)
|
| 529 |
+
else:
|
| 530 |
+
client = AsyncOpenAI(api_key=openai_key)
|
| 531 |
+
|
| 532 |
+
# Test connection with default model from config
|
| 533 |
+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
|
| 534 |
+
try:
|
| 535 |
+
await client.chat.completions.create(
|
| 536 |
+
model=self.default_models["openai"],
|
| 537 |
+
max_tokens=20,
|
| 538 |
+
messages=[{"role": "user", "content": "test"}],
|
| 539 |
+
)
|
| 540 |
+
except Exception as e:
|
| 541 |
+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
|
| 542 |
+
# Retry with max_completion_tokens for models that require it
|
| 543 |
+
await client.chat.completions.create(
|
| 544 |
+
model=self.default_models["openai"],
|
| 545 |
+
max_completion_tokens=20,
|
| 546 |
+
messages=[{"role": "user", "content": "test"}],
|
| 547 |
+
)
|
| 548 |
+
else:
|
| 549 |
+
raise
|
| 550 |
+
self.logger.info(
|
| 551 |
+
f"Using OpenAI API with model: {self.default_models['openai']}"
|
| 552 |
+
)
|
| 553 |
+
if base_url:
|
| 554 |
+
self.logger.info(f"Using custom base URL: {base_url}")
|
| 555 |
+
return client, "openai"
|
| 556 |
+
except Exception as e:
|
| 557 |
+
self.logger.warning(f"OpenAI API unavailable: {e}")
|
| 558 |
+
|
| 559 |
+
raise ValueError(
|
| 560 |
+
"No available LLM API - please check your API keys in configuration"
|
| 561 |
+
)
|
| 562 |
+
|
| 563 |
+
async def _call_llm_with_tools(
|
| 564 |
+
self, client, client_type, system_message, messages, tools, max_tokens=8192
|
| 565 |
+
):
|
| 566 |
+
"""Call LLM with tools"""
|
| 567 |
+
try:
|
| 568 |
+
if client_type == "anthropic":
|
| 569 |
+
return await self._call_anthropic_with_tools(
|
| 570 |
+
client, system_message, messages, tools, max_tokens
|
| 571 |
+
)
|
| 572 |
+
elif client_type == "openai":
|
| 573 |
+
return await self._call_openai_with_tools(
|
| 574 |
+
client, system_message, messages, tools, max_tokens
|
| 575 |
+
)
|
| 576 |
+
else:
|
| 577 |
+
raise ValueError(f"Unsupported client type: {client_type}")
|
| 578 |
+
except Exception as e:
|
| 579 |
+
self.logger.error(f"LLM call failed: {e}")
|
| 580 |
+
raise
|
| 581 |
+
|
| 582 |
+
async def _call_anthropic_with_tools(
|
| 583 |
+
self, client, system_message, messages, tools, max_tokens
|
| 584 |
+
):
|
| 585 |
+
"""Call Anthropic API"""
|
| 586 |
+
validated_messages = self._validate_messages(messages)
|
| 587 |
+
if not validated_messages:
|
| 588 |
+
validated_messages = [
|
| 589 |
+
{"role": "user", "content": "Please continue implementing code"}
|
| 590 |
+
]
|
| 591 |
+
|
| 592 |
+
try:
|
| 593 |
+
response = await client.messages.create(
|
| 594 |
+
model=self.default_models["anthropic"],
|
| 595 |
+
system=system_message,
|
| 596 |
+
messages=validated_messages,
|
| 597 |
+
tools=tools,
|
| 598 |
+
max_tokens=max_tokens,
|
| 599 |
+
temperature=0.2,
|
| 600 |
+
)
|
| 601 |
+
except Exception as e:
|
| 602 |
+
self.logger.error(f"Anthropic API call failed: {e}")
|
| 603 |
+
raise
|
| 604 |
+
|
| 605 |
+
content = ""
|
| 606 |
+
tool_calls = []
|
| 607 |
+
|
| 608 |
+
for block in response.content:
|
| 609 |
+
if block.type == "text":
|
| 610 |
+
content += block.text
|
| 611 |
+
elif block.type == "tool_use":
|
| 612 |
+
tool_calls.append(
|
| 613 |
+
{"id": block.id, "name": block.name, "input": block.input}
|
| 614 |
+
)
|
| 615 |
+
|
| 616 |
+
return {"content": content, "tool_calls": tool_calls}
|
| 617 |
+
|
| 618 |
+
async def _call_openai_with_tools(
|
| 619 |
+
self, client, system_message, messages, tools, max_tokens
|
| 620 |
+
):
|
| 621 |
+
"""Call OpenAI API"""
|
| 622 |
+
openai_tools = []
|
| 623 |
+
for tool in tools:
|
| 624 |
+
openai_tools.append(
|
| 625 |
+
{
|
| 626 |
+
"type": "function",
|
| 627 |
+
"function": {
|
| 628 |
+
"name": tool["name"],
|
| 629 |
+
"description": tool["description"],
|
| 630 |
+
"parameters": tool["input_schema"],
|
| 631 |
+
},
|
| 632 |
+
}
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
openai_messages = [{"role": "system", "content": system_message}]
|
| 636 |
+
openai_messages.extend(messages)
|
| 637 |
+
|
| 638 |
+
# Try max_tokens first, fallback to max_completion_tokens if unsupported
|
| 639 |
+
try:
|
| 640 |
+
response = await client.chat.completions.create(
|
| 641 |
+
model=self.default_models["openai"],
|
| 642 |
+
messages=openai_messages,
|
| 643 |
+
tools=openai_tools if openai_tools else None,
|
| 644 |
+
max_tokens=max_tokens,
|
| 645 |
+
temperature=0.2,
|
| 646 |
+
)
|
| 647 |
+
except Exception as e:
|
| 648 |
+
if "max_tokens" in str(e) and "max_completion_tokens" in str(e):
|
| 649 |
+
# Retry with max_completion_tokens for models that require it
|
| 650 |
+
response = await client.chat.completions.create(
|
| 651 |
+
model=self.default_models["openai"],
|
| 652 |
+
messages=openai_messages,
|
| 653 |
+
tools=openai_tools if openai_tools else None,
|
| 654 |
+
max_completion_tokens=max_tokens,
|
| 655 |
+
)
|
| 656 |
+
else:
|
| 657 |
+
raise
|
| 658 |
+
|
| 659 |
+
message = response.choices[0].message
|
| 660 |
+
content = message.content or ""
|
| 661 |
+
|
| 662 |
+
tool_calls = []
|
| 663 |
+
if message.tool_calls:
|
| 664 |
+
for tool_call in message.tool_calls:
|
| 665 |
+
tool_calls.append(
|
| 666 |
+
{
|
| 667 |
+
"id": tool_call.id,
|
| 668 |
+
"name": tool_call.function.name,
|
| 669 |
+
"input": json.loads(tool_call.function.arguments),
|
| 670 |
+
}
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
return {"content": content, "tool_calls": tool_calls}
|
| 674 |
+
|
| 675 |
+
# ==================== 5. Tools and Utility Methods (Utility Layer) ====================
|
| 676 |
+
|
| 677 |
+
def _validate_messages(self, messages: List[Dict]) -> List[Dict]:
|
| 678 |
+
"""Validate and clean message list"""
|
| 679 |
+
valid_messages = []
|
| 680 |
+
for msg in messages:
|
| 681 |
+
content = msg.get("content", "").strip()
|
| 682 |
+
if content:
|
| 683 |
+
valid_messages.append(
|
| 684 |
+
{"role": msg.get("role", "user"), "content": content}
|
| 685 |
+
)
|
| 686 |
+
else:
|
| 687 |
+
self.logger.warning(f"Skipping empty message: {msg}")
|
| 688 |
+
return valid_messages
|
| 689 |
+
|
| 690 |
+
def _prepare_mcp_tool_definitions(self) -> List[Dict[str, Any]]:
|
| 691 |
+
"""Prepare tool definitions in Anthropic API standard format"""
|
| 692 |
+
return get_mcp_tools("code_implementation")
|
| 693 |
+
|
| 694 |
+
def _check_tool_results_for_errors(self, tool_results: List[Dict]) -> bool:
|
| 695 |
+
"""Check tool results for errors"""
|
| 696 |
+
for result in tool_results:
|
| 697 |
+
try:
|
| 698 |
+
if hasattr(result["result"], "content") and result["result"].content:
|
| 699 |
+
content_text = result["result"].content[0].text
|
| 700 |
+
parsed_result = json.loads(content_text)
|
| 701 |
+
if parsed_result.get("status") == "error":
|
| 702 |
+
return True
|
| 703 |
+
elif isinstance(result["result"], str):
|
| 704 |
+
if "error" in result["result"].lower():
|
| 705 |
+
return True
|
| 706 |
+
except (json.JSONDecodeError, AttributeError, IndexError):
|
| 707 |
+
result_str = str(result["result"])
|
| 708 |
+
if "error" in result_str.lower():
|
| 709 |
+
return True
|
| 710 |
+
return False
|
| 711 |
+
|
| 712 |
+
# ==================== 6. User Interaction and Feedback (Interaction Layer) ====================
|
| 713 |
+
|
| 714 |
+
def _generate_success_guidance(self, files_count: int) -> str:
|
| 715 |
+
"""Generate concise success guidance for continuing implementation"""
|
| 716 |
+
return f"""✅ File implementation completed successfully!
|
| 717 |
+
|
| 718 |
+
📊 **Progress Status:** {files_count} files implemented
|
| 719 |
+
|
| 720 |
+
🎯 **Next Action:** Check if ALL files from the reproduction plan are implemented.
|
| 721 |
+
|
| 722 |
+
⚡ **Decision Process:**
|
| 723 |
+
1. **If ALL files are implemented:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 724 |
+
2. **If MORE files need implementation:** Continue with dependency-aware workflow:
|
| 725 |
+
- **Start with `read_code_mem`** to understand existing implementations and dependencies
|
| 726 |
+
- **Optionally use `search_code_references`** for reference patterns (OPTIONAL - use for inspiration only, original paper specs take priority)
|
| 727 |
+
- **Then `write_file`** to implement the new component
|
| 728 |
+
- **Finally: Test** if needed
|
| 729 |
+
|
| 730 |
+
💡 **Key Point:** Always verify completion status before continuing with new file creation."""
|
| 731 |
+
|
| 732 |
+
def _generate_error_guidance(self) -> str:
|
| 733 |
+
"""Generate error guidance for handling issues"""
|
| 734 |
+
return """❌ Error detected during file implementation.
|
| 735 |
+
|
| 736 |
+
🔧 **Action Required:**
|
| 737 |
+
1. Review the error details above
|
| 738 |
+
2. Fix the identified issue
|
| 739 |
+
3. **Check if ALL files from the reproduction plan are implemented:**
|
| 740 |
+
- **If YES:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 741 |
+
- **If NO:** Continue with proper development cycle for next file:
|
| 742 |
+
- **Start with `read_code_mem`** to understand existing implementations
|
| 743 |
+
- **Optionally use `search_code_references`** for reference patterns (OPTIONAL - for inspiration only)
|
| 744 |
+
- **Then `write_file`** to implement properly
|
| 745 |
+
- **Test** if needed
|
| 746 |
+
4. Ensure proper error handling in future implementations
|
| 747 |
+
|
| 748 |
+
💡 **Remember:** Always verify if all planned files are implemented before continuing with new file creation."""
|
| 749 |
+
|
| 750 |
+
def _generate_no_tools_guidance(self, files_count: int) -> str:
|
| 751 |
+
"""Generate concise guidance when no tools are called"""
|
| 752 |
+
return f"""⚠️ No tool calls detected in your response.
|
| 753 |
+
|
| 754 |
+
📊 **Current Progress:** {files_count} files implemented
|
| 755 |
+
|
| 756 |
+
🚨 **Action Required:** You must use tools. **FIRST check if ALL files from the reproduction plan are implemented:**
|
| 757 |
+
|
| 758 |
+
⚡ **Decision Process:**
|
| 759 |
+
1. **If ALL files are implemented:** Use `execute_python` or `execute_bash` to test the complete implementation, then respond "**implementation complete**" to end the conversation
|
| 760 |
+
2. **If MORE files need implementation:** Follow the development cycle:
|
| 761 |
+
- **Start with `read_code_mem`** to understand existing implementations
|
| 762 |
+
- **Optionally use `search_code_references`** for reference patterns (OPTIONAL - for inspiration only)
|
| 763 |
+
- **Then `write_file`** to implement the new component
|
| 764 |
+
- **Finally: Test** if needed
|
| 765 |
+
|
| 766 |
+
🚨 **Critical:** Always verify completion status first, then use appropriate tools - not just explanations!"""
|
| 767 |
+
|
| 768 |
+
def _compile_user_response(self, tool_results: List[Dict], guidance: str) -> str:
|
| 769 |
+
"""Compile tool results and guidance into a single user response"""
|
| 770 |
+
response_parts = []
|
| 771 |
+
|
| 772 |
+
if tool_results:
|
| 773 |
+
response_parts.append("🔧 **Tool Execution Results:**")
|
| 774 |
+
for tool_result in tool_results:
|
| 775 |
+
tool_name = tool_result["tool_name"]
|
| 776 |
+
result_content = tool_result["result"]
|
| 777 |
+
response_parts.append(
|
| 778 |
+
f"```\nTool: {tool_name}\nResult: {result_content}\n```"
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
if guidance:
|
| 782 |
+
response_parts.append("\n" + guidance)
|
| 783 |
+
|
| 784 |
+
return "\n\n".join(response_parts)
|
| 785 |
+
|
| 786 |
+
# ==================== 7. Reporting and Output (Output Layer) ====================
|
| 787 |
+
|
| 788 |
+
async def _generate_pure_code_final_report_with_concise_agents(
|
| 789 |
+
self,
|
| 790 |
+
iterations: int,
|
| 791 |
+
elapsed_time: float,
|
| 792 |
+
code_agent: CodeImplementationAgent,
|
| 793 |
+
memory_agent: ConciseMemoryAgent,
|
| 794 |
+
):
|
| 795 |
+
"""Generate final report using concise agent statistics"""
|
| 796 |
+
try:
|
| 797 |
+
code_stats = code_agent.get_implementation_statistics()
|
| 798 |
+
memory_stats = memory_agent.get_memory_statistics(
|
| 799 |
+
code_stats["files_implemented_count"]
|
| 800 |
+
)
|
| 801 |
+
|
| 802 |
+
if self.mcp_agent:
|
| 803 |
+
history_result = await self.mcp_agent.call_tool(
|
| 804 |
+
"get_operation_history", {"last_n": 30}
|
| 805 |
+
)
|
| 806 |
+
history_data = (
|
| 807 |
+
json.loads(history_result)
|
| 808 |
+
if isinstance(history_result, str)
|
| 809 |
+
else history_result
|
| 810 |
+
)
|
| 811 |
+
else:
|
| 812 |
+
history_data = {"total_operations": 0, "history": []}
|
| 813 |
+
|
| 814 |
+
write_operations = 0
|
| 815 |
+
files_created = []
|
| 816 |
+
if "history" in history_data:
|
| 817 |
+
for item in history_data["history"]:
|
| 818 |
+
if item.get("action") == "write_file":
|
| 819 |
+
write_operations += 1
|
| 820 |
+
file_path = item.get("details", {}).get("file_path", "unknown")
|
| 821 |
+
files_created.append(file_path)
|
| 822 |
+
|
| 823 |
+
report = f"""
|
| 824 |
+
# Pure Code Implementation Completion Report (Write-File-Based Memory Mode)
|
| 825 |
+
|
| 826 |
+
## Execution Summary
|
| 827 |
+
- Implementation iterations: {iterations}
|
| 828 |
+
- Total elapsed time: {elapsed_time:.2f} seconds
|
| 829 |
+
- Files implemented: {code_stats['total_files_implemented']}
|
| 830 |
+
- File write operations: {write_operations}
|
| 831 |
+
- Total MCP operations: {history_data.get('total_operations', 0)}
|
| 832 |
+
|
| 833 |
+
## Read Tools Configuration
|
| 834 |
+
- Read tools enabled: {code_stats['read_tools_status']['read_tools_enabled']}
|
| 835 |
+
- Status: {code_stats['read_tools_status']['status']}
|
| 836 |
+
- Tools affected: {', '.join(code_stats['read_tools_status']['tools_affected'])}
|
| 837 |
+
|
| 838 |
+
## Agent Performance
|
| 839 |
+
### Code Implementation Agent
|
| 840 |
+
- Files tracked: {code_stats['files_implemented_count']}
|
| 841 |
+
- Technical decisions: {code_stats['technical_decisions_count']}
|
| 842 |
+
- Constraints tracked: {code_stats['constraints_count']}
|
| 843 |
+
- Architecture notes: {code_stats['architecture_notes_count']}
|
| 844 |
+
- Dependency analysis performed: {code_stats['dependency_analysis_count']}
|
| 845 |
+
- Files read for dependencies: {code_stats['files_read_for_dependencies']}
|
| 846 |
+
- Last summary triggered at file count: {code_stats['last_summary_file_count']}
|
| 847 |
+
|
| 848 |
+
### Concise Memory Agent (Write-File-Based)
|
| 849 |
+
- Last write_file detected: {memory_stats['last_write_file_detected']}
|
| 850 |
+
- Should clear memory next: {memory_stats['should_clear_memory_next']}
|
| 851 |
+
- Files implemented count: {memory_stats['implemented_files_tracked']}
|
| 852 |
+
- Current round: {memory_stats['current_round']}
|
| 853 |
+
- Concise mode active: {memory_stats['concise_mode_active']}
|
| 854 |
+
- Current round tool results: {memory_stats['current_round_tool_results']}
|
| 855 |
+
- Essential tools recorded: {memory_stats['essential_tools_recorded']}
|
| 856 |
+
|
| 857 |
+
## Files Created
|
| 858 |
+
"""
|
| 859 |
+
for file_path in files_created[-20:]:
|
| 860 |
+
report += f"- {file_path}\n"
|
| 861 |
+
|
| 862 |
+
if len(files_created) > 20:
|
| 863 |
+
report += f"... and {len(files_created) - 20} more files\n"
|
| 864 |
+
|
| 865 |
+
report += """
|
| 866 |
+
## Architecture Features
|
| 867 |
+
✅ WRITE-FILE-BASED Memory Agent - Clear after each file generation
|
| 868 |
+
✅ After write_file: Clear history → Keep system prompt + initial plan + tool results
|
| 869 |
+
✅ Tool accumulation: read_code_mem, read_file, search_reference_code until next write_file
|
| 870 |
+
✅ Clean memory cycle: write_file → clear → accumulate → write_file → clear
|
| 871 |
+
✅ Essential tool recording with write_file detection
|
| 872 |
+
✅ Specialized agent separation for clean code organization
|
| 873 |
+
✅ MCP-compliant tool execution
|
| 874 |
+
✅ Production-grade code with comprehensive type hints
|
| 875 |
+
✅ Intelligent dependency analysis and file reading
|
| 876 |
+
✅ Automated read_file usage for implementation context
|
| 877 |
+
✅ Eliminates conversation clutter between file generations
|
| 878 |
+
✅ Focused memory for efficient next file generation
|
| 879 |
+
"""
|
| 880 |
+
return report
|
| 881 |
+
|
| 882 |
+
except Exception as e:
|
| 883 |
+
self.logger.error(f"Failed to generate final report: {e}")
|
| 884 |
+
return f"Failed to generate final report: {str(e)}"
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
async def main():
|
| 888 |
+
"""Main function for running the workflow"""
|
| 889 |
+
# Configure root logger carefully to avoid duplicates
|
| 890 |
+
root_logger = logging.getLogger()
|
| 891 |
+
if not root_logger.handlers:
|
| 892 |
+
handler = logging.StreamHandler()
|
| 893 |
+
formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
|
| 894 |
+
handler.setFormatter(formatter)
|
| 895 |
+
root_logger.addHandler(handler)
|
| 896 |
+
root_logger.setLevel(logging.INFO)
|
| 897 |
+
|
| 898 |
+
workflow = CodeImplementationWorkflowWithIndex()
|
| 899 |
+
|
| 900 |
+
print("=" * 60)
|
| 901 |
+
print("Code Implementation Workflow with UNIFIED Reference Indexer")
|
| 902 |
+
print("=" * 60)
|
| 903 |
+
print("Select mode:")
|
| 904 |
+
print("1. Test Code Reference Indexer Integration")
|
| 905 |
+
print("2. Run Full Implementation Workflow")
|
| 906 |
+
print("3. Run Implementation with Pure Code Mode")
|
| 907 |
+
print("4. Test Read Tools Configuration")
|
| 908 |
+
|
| 909 |
+
# mode_choice = input("Enter choice (1-4, default: 3): ").strip()
|
| 910 |
+
|
| 911 |
+
# For testing purposes, we'll run the test first
|
| 912 |
+
# if mode_choice == "4":
|
| 913 |
+
# print("Testing Read Tools Configuration...")
|
| 914 |
+
|
| 915 |
+
# # Create a test workflow normally
|
| 916 |
+
# test_workflow = CodeImplementationWorkflow()
|
| 917 |
+
|
| 918 |
+
# # Create a mock code agent for testing
|
| 919 |
+
# print("\n🧪 Testing with read tools DISABLED:")
|
| 920 |
+
# test_agent_disabled = CodeImplementationAgent(None, enable_read_tools=False)
|
| 921 |
+
# await test_agent_disabled.test_read_tools_configuration()
|
| 922 |
+
|
| 923 |
+
# print("\n🧪 Testing with read tools ENABLED:")
|
| 924 |
+
# test_agent_enabled = CodeImplementationAgent(None, enable_read_tools=True)
|
| 925 |
+
# await test_agent_enabled.test_read_tools_configuration()
|
| 926 |
+
|
| 927 |
+
# print("✅ Read tools configuration testing completed!")
|
| 928 |
+
# return
|
| 929 |
+
|
| 930 |
+
# print("Running Code Reference Indexer Integration Test...")
|
| 931 |
+
|
| 932 |
+
test_success = True
|
| 933 |
+
if test_success:
|
| 934 |
+
print("\n" + "=" * 60)
|
| 935 |
+
print("🎉 UNIFIED Code Reference Indexer Integration Test PASSED!")
|
| 936 |
+
print("🔧 Three-step process successfully merged into ONE tool")
|
| 937 |
+
print("=" * 60)
|
| 938 |
+
|
| 939 |
+
# Ask if user wants to continue with actual workflow
|
| 940 |
+
print("\nContinuing with workflow execution...")
|
| 941 |
+
|
| 942 |
+
plan_file = "/Users/lizongwei/Reasearch/DeepCode_Base/DeepCode/deepcode_lab/papers/1/initial_plan.txt"
|
| 943 |
+
# plan_file = "/data2/bjdwhzzh/project-hku/Code-Agent2.0/Code-Agent/deepcode-mcp/agent_folders/papers/1/initial_plan.txt"
|
| 944 |
+
target_directory = (
|
| 945 |
+
"/Users/lizongwei/Reasearch/DeepCode_Base/DeepCode/deepcode_lab/papers/1/"
|
| 946 |
+
)
|
| 947 |
+
print("Implementation Mode Selection:")
|
| 948 |
+
print("1. Pure Code Implementation Mode (Recommended)")
|
| 949 |
+
print("2. Iterative Implementation Mode")
|
| 950 |
+
|
| 951 |
+
pure_code_mode = True
|
| 952 |
+
mode_name = "Pure Code Implementation Mode with Memory Agent Architecture + Code Reference Indexer"
|
| 953 |
+
print(f"Using: {mode_name}")
|
| 954 |
+
|
| 955 |
+
# Configure read tools - modify this parameter to enable/disable read tools
|
| 956 |
+
enable_read_tools = (
|
| 957 |
+
True # Set to False to disable read_file and read_code_mem tools
|
| 958 |
+
)
|
| 959 |
+
read_tools_status = "ENABLED" if enable_read_tools else "DISABLED"
|
| 960 |
+
print(f"🔧 Read tools (read_file, read_code_mem): {read_tools_status}")
|
| 961 |
+
|
| 962 |
+
# NOTE: To test without read tools, change the line above to:
|
| 963 |
+
# enable_read_tools = False
|
| 964 |
+
|
| 965 |
+
result = await workflow.run_workflow(
|
| 966 |
+
plan_file,
|
| 967 |
+
target_directory=target_directory,
|
| 968 |
+
pure_code_mode=pure_code_mode,
|
| 969 |
+
enable_read_tools=enable_read_tools,
|
| 970 |
+
)
|
| 971 |
+
|
| 972 |
+
print("=" * 60)
|
| 973 |
+
print("Workflow Execution Results:")
|
| 974 |
+
print(f"Status: {result['status']}")
|
| 975 |
+
print(f"Mode: {mode_name}")
|
| 976 |
+
|
| 977 |
+
if result["status"] == "success":
|
| 978 |
+
print(f"Code Directory: {result['code_directory']}")
|
| 979 |
+
print(f"MCP Architecture: {result.get('mcp_architecture', 'unknown')}")
|
| 980 |
+
print("Execution completed!")
|
| 981 |
+
else:
|
| 982 |
+
print(f"Error Message: {result['message']}")
|
| 983 |
+
|
| 984 |
+
print("=" * 60)
|
| 985 |
+
print(
|
| 986 |
+
"✅ Using Standard MCP Architecture with Memory Agent + Code Reference Indexer"
|
| 987 |
+
)
|
| 988 |
+
|
| 989 |
+
else:
|
| 990 |
+
print("\n" + "=" * 60)
|
| 991 |
+
print("❌ Code Reference Indexer Integration Test FAILED!")
|
| 992 |
+
print("Please check the configuration and try again.")
|
| 993 |
+
print("=" * 60)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
if __name__ == "__main__":
|
| 997 |
+
asyncio.run(main())
|