Spaces:
Runtime error
Runtime error
| import argparse | |
| import json | |
| import mimetypes # Added missing import | |
| import os | |
| import re # Added missing import | |
| import shutil # Added missing import | |
| import threading | |
| from concurrent.futures import ThreadPoolExecutor, as_completed | |
| from datetime import datetime | |
| from pathlib import Path | |
| from typing import Dict, List, Optional, Any | |
| import datasets | |
| import pandas as pd | |
| from dotenv import load_dotenv | |
| from huggingface_hub import login | |
| import gradio as gr | |
| from scripts.reformulator import prepare_response | |
| from scripts.run_agents import ( | |
| get_single_file_description, | |
| get_zip_description, | |
| ) | |
| from scripts.text_inspector_tool import TextInspectorTool | |
| from scripts.text_web_browser import ( | |
| ArchiveSearchTool, | |
| FinderTool, | |
| FindNextTool, | |
| PageDownTool, | |
| PageUpTool, | |
| SimpleTextBrowser, | |
| VisitTool, | |
| ) | |
| from scripts.visual_qa import visualizer | |
| # from scripts.flux_lora_tool import FluxLoRATool | |
| from tqdm import tqdm | |
| from smolagents import ( | |
| CodeAgent, | |
| HfApiModel, | |
| LiteLLMModel, | |
| Model, | |
| OpenAIServerModel, # Added missing model | |
| TransformersModel, # Added missing model | |
| ToolCallingAgent, | |
| Tool, | |
| ) | |
| from smolagents.agent_types import AgentText, AgentImage, AgentAudio | |
| from smolagents.gradio_ui import pull_messages_from_step, handle_agent_output_types | |
| class GoogleSearchTool(Tool): | |
| """Performs Google web searches using the Serper API.""" | |
| name = "web_search" | |
| description = """Performs a google web search for your query then returns a string of the top search results.""" | |
| inputs = { | |
| "query": {"type": "string", "description": "The search query to perform."}, | |
| "filter_year": { | |
| "type": "integer", | |
| "description": "Optionally restrict results to a certain year", | |
| "nullable": True, | |
| }, | |
| } | |
| output_type = "string" | |
| def __init__(self): | |
| """Initialize the tool with API key from environment.""" | |
| super().__init__(self) | |
| self.serpapi_key = os.getenv("SERPER_API_KEY") | |
| self._validate_dependencies() | |
| def _validate_dependencies(self): | |
| """Ensure API key is available.""" | |
| if not self.serpapi_key: | |
| raise ValueError( | |
| "Missing SerpAPI key. Make sure you have 'SERPER_API_KEY' in your env variables." | |
| ) | |
| def forward(self, query: str, filter_year: Optional[int] = None) -> str: | |
| """Execute the search query and return formatted results.""" | |
| import requests | |
| params = { | |
| "engine": "google", | |
| "q": query, | |
| "api_key": self.serpapi_key, | |
| "google_domain": "google.com", | |
| } | |
| headers = {"X-API-KEY": self.serpapi_key, "Content-Type": "application/json"} | |
| if filter_year is not None: | |
| params["tbs"] = ( | |
| f"cdr:1,cd_min:01/01/{filter_year},cd_max:12/31/{filter_year}" | |
| ) | |
| response = requests.request( | |
| "POST", | |
| "https://google.serper.dev/search", | |
| headers=headers, | |
| data=json.dumps(params), | |
| ) | |
| if response.status_code == 200: | |
| results = response.json() | |
| else: | |
| raise ValueError(response.json()) | |
| if "organic" not in results.keys() or len(results["organic"]) == 0: | |
| year_filter_message = ( | |
| f" with filter year={filter_year}" if filter_year is not None else "" | |
| ) | |
| return f"No results found for '{query}'{year_filter_message}. Try with a more general query, or remove the year filter." | |
| return self._format_search_results(results["organic"]) | |
| def _format_search_results(self, organic_results: List[Dict[str, Any]]) -> str: | |
| """Format organic search results into a readable string.""" | |
| web_snippets = [] | |
| for idx, page in enumerate(organic_results): | |
| date_published = ( | |
| f"\nDate published: {page['date']}" if "date" in page else "" | |
| ) | |
| source = f"\nSource: {page['source']}" if "source" in page else "" | |
| snippet = f"\n{page['snippet']}" if "snippet" in page else "" | |
| formatted_result = f"{idx}. [{page['title']}]({page['link']}){date_published}{source}\n{snippet}" | |
| formatted_result = formatted_result.replace( | |
| "Your browser can't play this video.", "" | |
| ) | |
| web_snippets.append(formatted_result) | |
| return "## Search Results\n" + "\n\n".join(web_snippets) | |
| # Constants and configurations | |
| AUTHORIZED_IMPORTS = [ | |
| "requests", | |
| "zipfile", | |
| "pandas", | |
| "numpy", | |
| "sympy", | |
| "json", | |
| "bs4", | |
| "pubchempy", | |
| "xml", | |
| "yahoo_finance", | |
| "Bio", | |
| "sklearn", | |
| "scipy", | |
| "pydub", | |
| "PIL", | |
| "chess", | |
| "PyPDF2", | |
| "pptx", | |
| "torch", | |
| "datetime", | |
| "fractions", | |
| "csv", | |
| ] | |
| # Configuration setup | |
| def setup_environment(): | |
| """Initialize environment variables and authentication.""" | |
| load_dotenv(override=True) | |
| login(os.getenv("HF_TOKEN")) | |
| print("TOKKKK", os.getenv("HF_TOKEN")[-10:]) | |
| # Browser configuration | |
| user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0" | |
| BROWSER_CONFIG = { | |
| "viewport_size": 1024 * 5, | |
| "downloads_folder": "downloads_folder", | |
| "request_kwargs": { | |
| "headers": {"User-Agent": user_agent}, | |
| "timeout": 300, | |
| }, | |
| "serpapi_key": os.getenv("SERPAPI_API_KEY"), | |
| } | |
| # Custom role conversions for model response handling | |
| custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"} | |
| class ModelManager: | |
| """Manages model loading and initialization.""" | |
| def load_model(chosen_inference: str, model_id: str, key_manager=None): | |
| """Load the specified model with appropriate configuration.""" | |
| try: | |
| if chosen_inference == "hf_api": | |
| return HfApiModel(model_id=model_id) | |
| elif chosen_inference == "hf_api_provider": | |
| return HfApiModel(provider="together") | |
| elif chosen_inference == "litellm": | |
| return LiteLLMModel(model_id=model_id) | |
| elif chosen_inference == "ollama": | |
| if not key_manager: | |
| raise ValueError("Key manager required for Ollama model") | |
| return LiteLLMModel( | |
| model_id=model_id, | |
| api_base="http://localhost:11434", | |
| api_key=key_manager.get_key("ollama_api_key"), | |
| num_ctx=8192, | |
| ) | |
| elif chosen_inference == "openai": | |
| if not key_manager: | |
| raise ValueError("Key manager required for OpenAI model") | |
| return OpenAIServerModel( | |
| model_id=model_id, api_key=key_manager.get_key("openai_api_key") | |
| ) | |
| elif chosen_inference == "transformers": | |
| return TransformersModel( | |
| model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", | |
| device_map="auto", | |
| max_new_tokens=1000, | |
| ) | |
| else: | |
| raise ValueError(f"Invalid inference type: {chosen_inference}") | |
| except Exception as e: | |
| print(f"✗ Couldn't load model: {e}") | |
| raise | |
| class ToolRegistry: | |
| """Manages tool initialization and organization.""" | |
| def load_web_tools(model, browser, text_limit=20000): | |
| """Initialize and return web-related tools.""" | |
| return [ | |
| GoogleSearchTool(), | |
| VisitTool(browser), | |
| PageUpTool(browser), | |
| PageDownTool(browser), | |
| FinderTool(browser), | |
| FindNextTool(browser), | |
| ArchiveSearchTool(browser), | |
| TextInspectorTool(model, text_limit), | |
| ] | |
| def load_vision_tools(): | |
| """Initialize and return vision-related tools.""" | |
| try: | |
| return Tool.from_space( | |
| space_id="xkerser/gemma-3-12b-it", | |
| name="gemma_vision", | |
| description="Upload an image to extract and analyze text and visual content from images using Gemma 3", | |
| ) | |
| except Exception as e: | |
| print(f"✗ Couldn't initialize vision tool: {e}") | |
| raise | |
| def load_image_generation_tools(): | |
| """Initialize and return image generation tools.""" | |
| try: | |
| return Tool.from_space( | |
| space_id="xkerser/FLUX.1-dev", | |
| name="image_generator", | |
| description="Generates high-quality images using the FLUX.1-dev model based on text prompts.", | |
| ) | |
| except Exception as e: | |
| print(f"✗ Couldn't initialize image generation tool: {e}") | |
| raise | |
| # Agent creation in a factory function | |
| def create_agent(): | |
| """Creates a fresh agent instance for each session.""" | |
| # Initialize model | |
| model = LiteLLMModel( | |
| custom_role_conversions=custom_role_conversions, | |
| model_id="openrouter/perplexity/r1-1776", | |
| ) | |
| # Initialize tools | |
| text_limit = 20000 | |
| browser = SimpleTextBrowser(**BROWSER_CONFIG) | |
| web_tools = ToolRegistry.load_web_tools(model, browser, text_limit) | |
| gemma_vision_tool = ToolRegistry.load_vision_tools() | |
| return CodeAgent( | |
| model=model, | |
| tools=([visualizer] + web_tools, gemma_vision_tool), # Fixed the missing comma | |
| max_steps=10, | |
| verbosity_level=1, | |
| additional_authorized_imports=AUTHORIZED_IMPORTS, | |
| planning_interval=4, | |
| ) | |
| def stream_to_gradio( | |
| agent, | |
| task: str, | |
| reset_agent_memory: bool = False, | |
| additional_args: Optional[dict] = None, | |
| ): | |
| """Runs an agent with the given task and streams messages as gradio ChatMessages.""" | |
| for step_log in agent.run( | |
| task, stream=True, reset=reset_agent_memory, additional_args=additional_args | |
| ): | |
| for message in pull_messages_from_step(step_log): | |
| yield message | |
| # Process final answer | |
| final_answer = step_log # Last log is the run's final_answer | |
| final_answer = handle_agent_output_types(final_answer) | |
| if isinstance(final_answer, AgentText): | |
| yield gr.ChatMessage( | |
| role="assistant", | |
| content=f"**Final answer:**\n{final_answer.to_string()}\n", | |
| ) | |
| elif isinstance(final_answer, AgentImage): | |
| yield gr.ChatMessage( | |
| role="assistant", | |
| content={"path": final_answer.to_string(), "mime_type": "image/png"}, | |
| ) | |
| elif isinstance(final_answer, AgentAudio): | |
| yield gr.ChatMessage( | |
| role="assistant", | |
| content={"path": final_answer.to_string(), "mime_type": "audio/wav"}, | |
| ) | |
| else: | |
| yield gr.ChatMessage( | |
| role="assistant", content=f"**Final answer:** {str(final_answer)}" | |
| ) | |
| class GradioUI: | |
| """A one-line interface to launch your agent in Gradio.""" | |
| def __init__(self, file_upload_folder: str | None = None): | |
| """Initialize the Gradio UI with optional file upload functionality.""" | |
| self.file_upload_folder = file_upload_folder | |
| if self.file_upload_folder is not None: | |
| if not os.path.exists(file_upload_folder): | |
| os.mkdir(file_upload_folder) | |
| def interact_with_agent(self, prompt, messages, session_state): | |
| """Main interaction handler with the agent.""" | |
| # Get or create session-specific agent | |
| if "agent" not in session_state: | |
| session_state["agent"] = create_agent() | |
| # Adding monitoring | |
| try: | |
| # Log the existence of agent memory | |
| has_memory = hasattr(session_state["agent"], "memory") | |
| print(f"Agent has memory: {has_memory}") | |
| if has_memory: | |
| print(f"Memory type: {type(session_state['agent'].memory)}") | |
| messages.append(gr.ChatMessage(role="user", content=prompt)) | |
| yield messages | |
| for msg in stream_to_gradio( | |
| session_state["agent"], task=prompt, reset_agent_memory=False | |
| ): | |
| messages.append(msg) | |
| yield messages | |
| yield messages | |
| except Exception as e: | |
| print(f"Error in interaction: {str(e)}") | |
| raise | |
| def upload_file( | |
| self, | |
| file, | |
| file_uploads_log, | |
| allowed_file_types=[ | |
| "application/pdf", | |
| "application/vnd.openxmlformats-officedocument.wordprocessingml.document", | |
| "text/plain", | |
| ], | |
| ): | |
| """Handle file uploads with proper validation and security.""" | |
| if file is None: | |
| return gr.Textbox("No file uploaded", visible=True), file_uploads_log | |
| try: | |
| mime_type, _ = mimetypes.guess_type(file.name) | |
| except Exception as e: | |
| return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log | |
| if mime_type not in allowed_file_types: | |
| return gr.Textbox("File type disallowed", visible=True), file_uploads_log | |
| # Sanitize file name | |
| original_name = os.path.basename(file.name) | |
| sanitized_name = re.sub( | |
| r"[^\w\-.]", "_", original_name | |
| ) # Replace invalid chars with underscores | |
| # Ensure the extension correlates to the mime type | |
| type_to_ext = {} | |
| for ext, t in mimetypes.types_map.items(): | |
| if t not in type_to_ext: | |
| type_to_ext[t] = ext | |
| # Build sanitized filename with proper extension | |
| name_parts = sanitized_name.split(".")[:-1] | |
| extension = type_to_ext.get(mime_type, "") | |
| sanitized_name = "".join(name_parts) + extension | |
| # Save the uploaded file to the specified folder | |
| file_path = os.path.join(self.file_upload_folder, sanitized_name) | |
| shutil.copy(file.name, file_path) | |
| return gr.Textbox( | |
| f"File uploaded: {file_path}", visible=True | |
| ), file_uploads_log + [file_path] | |
| def log_user_message(self, text_input, file_uploads_log): | |
| """Process user message and handle file references.""" | |
| message = text_input | |
| if len(file_uploads_log) > 0: | |
| message += f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}" | |
| return ( | |
| message, | |
| gr.Textbox( | |
| value="", | |
| interactive=False, | |
| placeholder="Please wait while Steps are getting populated", | |
| ), | |
| gr.Button(interactive=False), | |
| ) | |
| def detect_device(self, request: gr.Request): | |
| """Detect whether the user is on mobile or desktop device.""" | |
| if not request: | |
| return "Unknown device" | |
| # Method 1: Check sec-ch-ua-mobile header | |
| is_mobile_header = request.headers.get("sec-ch-ua-mobile") | |
| if is_mobile_header: | |
| return "Mobile" if "?1" in is_mobile_header else "Desktop" | |
| # Method 2: Check user-agent string | |
| user_agent = request.headers.get("user-agent", "").lower() | |
| mobile_keywords = ["android", "iphone", "ipad", "mobile", "phone"] | |
| if any(keyword in user_agent for keyword in mobile_keywords): | |
| return "Mobile" | |
| # Method 3: Check platform | |
| platform = request.headers.get("sec-ch-ua-platform", "").lower() | |
| if platform: | |
| if platform in ['"android"', '"ios"']: | |
| return "Mobile" | |
| elif platform in ['"windows"', '"macos"', '"linux"']: | |
| return "Desktop" | |
| # Default case if no clear indicators | |
| return "Desktop" | |
| def launch(self, **kwargs): | |
| """Launch the Gradio UI with responsive layout.""" | |
| with gr.Blocks(theme="ocean", fill_height=True) as demo: | |
| # Different layouts for mobile and computer devices | |
| def layout(request: gr.Request): | |
| device = self.detect_device(request) | |
| print(f"device - {device}") | |
| # Render layout with sidebar | |
| if device == "Desktop": | |
| return self._create_desktop_layout() | |
| else: | |
| return self._create_mobile_layout() | |
| demo.launch(debug=True, **kwargs) | |
| def _create_desktop_layout(self): | |
| """Create the desktop layout with sidebar.""" | |
| with gr.Blocks(fill_height=True) as sidebar_demo: | |
| with gr.Sidebar(): | |
| gr.Markdown("""#OpenDeepResearch - free the AI agents!""") | |
| with gr.Group(): | |
| gr.Markdown("**What's on your mind mate?**", container=True) | |
| text_input = gr.Textbox( | |
| lines=3, | |
| label="Your request", | |
| container=False, | |
| placeholder="Enter your prompt here and press Shift+Enter or press the button", | |
| ) | |
| launch_research_btn = gr.Button("Run", variant="primary") | |
| # If an upload folder is provided, enable the upload feature | |
| if self.file_upload_folder is not None: | |
| upload_file = gr.File(label="Upload a file") | |
| upload_status = gr.Textbox( | |
| label="Upload Status", interactive=False, visible=False | |
| ) | |
| file_uploads_log = gr.State([]) | |
| upload_file.change( | |
| self.upload_file, | |
| [upload_file, file_uploads_log], | |
| [upload_status, file_uploads_log], | |
| ) | |
| gr.HTML("<br><br><h4><center>Powered by:</center></h4>") | |
| with gr.Row(): | |
| gr.HTML( | |
| """ | |
| <div style="display: flex; align-items: center; gap: 8px; font-family: system-ui, -apple-system, sans-serif;"> | |
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png" | |
| style="width: 32px; height: 32px; object-fit: contain;" alt="logo"> | |
| <a target="_blank" href="https://github.com/huggingface/smolagents"> | |
| <b>huggingface/smolagents</b> | |
| </a> | |
| </div> | |
| """ | |
| ) | |
| # Add session state to store session-specific data | |
| session_state = gr.State({}) # Initialize empty state for each session | |
| stored_messages = gr.State([]) | |
| if not "file_uploads_log" in locals(): | |
| file_uploads_log = gr.State([]) | |
| chatbot = gr.Chatbot( | |
| label="open-Deep-Research", | |
| type="messages", | |
| avatar_images=( | |
| None, | |
| "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", | |
| ), | |
| resizeable=False, | |
| scale=1, | |
| elem_id="my-chatbot", | |
| ) | |
| self._connect_event_handlers( | |
| text_input, | |
| launch_research_btn, | |
| file_uploads_log, | |
| stored_messages, | |
| chatbot, | |
| session_state, | |
| ) | |
| return sidebar_demo | |
| def _create_mobile_layout(self): | |
| """Create the mobile layout (simpler without sidebar).""" | |
| with gr.Blocks(fill_height=True) as simple_demo: | |
| gr.Markdown("""#OpenDeepResearch - free the AI agents!""") | |
| # Add session state to store session-specific data | |
| session_state = gr.State({}) | |
| stored_messages = gr.State([]) | |
| file_uploads_log = gr.State([]) | |
| chatbot = gr.Chatbot( | |
| label="open-Deep-Research", | |
| type="messages", | |
| avatar_images=( | |
| None, | |
| "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", | |
| ), | |
| resizeable=True, | |
| scale=1, | |
| ) | |
| # If an upload folder is provided, enable the upload feature | |
| if self.file_upload_folder is not None: | |
| upload_file = gr.File(label="Upload a file") | |
| upload_status = gr.Textbox( | |
| label="Upload Status", interactive=False, visible=False | |
| ) | |
| upload_file.change( | |
| self.upload_file, | |
| [upload_file, file_uploads_log], | |
| [upload_status, file_uploads_log], | |
| ) | |
| text_input = gr.Textbox( | |
| lines=1, | |
| label="What's on your mind mate?", | |
| placeholder="Chuck in a question and we'll take care of the rest", | |
| ) | |
| launch_research_btn = gr.Button("Run", variant="primary") | |
| self._connect_event_handlers( | |
| text_input, | |
| launch_research_btn, | |
| file_uploads_log, | |
| stored_messages, | |
| chatbot, | |
| session_state, | |
| ) | |
| return simple_demo | |
| def _connect_event_handlers( | |
| self, | |
| text_input, | |
| launch_research_btn, | |
| file_uploads_log, | |
| stored_messages, | |
| chatbot, | |
| session_state, | |
| ): | |
| """Connect the event handlers for input elements.""" | |
| # Connect text input submit event | |
| text_input.submit( | |
| self.log_user_message, | |
| [text_input, file_uploads_log], | |
| [stored_messages, text_input, launch_research_btn], | |
| ).then( | |
| self.interact_with_agent, | |
| [stored_messages, chatbot, session_state], | |
| [chatbot], | |
| ).then( | |
| lambda: ( | |
| gr.Textbox( | |
| interactive=True, | |
| placeholder="Enter your prompt here and press the button", | |
| ), | |
| gr.Button(interactive=True), | |
| ), | |
| None, | |
| [text_input, launch_research_btn], | |
| ) | |
| # Connect button click event | |
| launch_research_btn.click( | |
| self.log_user_message, | |
| [text_input, file_uploads_log], | |
| [stored_messages, text_input, launch_research_btn], | |
| ).then( | |
| self.interact_with_agent, | |
| [stored_messages, chatbot, session_state], | |
| [chatbot], | |
| ).then( | |
| lambda: ( | |
| gr.Textbox( | |
| interactive=True, | |
| placeholder="Enter your prompt here and press the button", | |
| ), | |
| gr.Button(interactive=True), | |
| ), | |
| None, | |
| [text_input, launch_research_btn], | |
| ) | |
| def main(): | |
| """Main entry point for the application.""" | |
| # Initialize environment | |
| setup_environment() | |
| # Ensure downloads folder exists | |
| os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True) | |
| # Launch UI | |
| GradioUI().launch() | |
| if __name__ == "__main__": | |
| main() | |