Spaces:
Running
Running
| """ | |
| Hierarchical Multi-Agent Research System - LIVE DASHBOARD & REAL-TIME PROGRESS | |
| β¨ Multi-Model Support | π― Configurable AI Models | π Real-Time Progress | π Live Dashboard | |
| This application implements a hierarchical multi-agent research system with: | |
| - Supervisor (Strategy) β Researcher, Analyzer, Critic (Parallel) β Synthesizer | |
| - Real-time progress tracking with live dashboard | |
| - Multi-model support (Qwen, Llama, Mistral) | |
| - Web search capabilities for comprehensive research | |
| """ | |
| import gradio as gr | |
| import os | |
| import time | |
| from datetime import datetime | |
| from dotenv import load_dotenv | |
| try: | |
| from smolagents import ToolCallingAgent, InferenceClientModel, WebSearchTool | |
| except ImportError: | |
| print("β οΈ Warning: smolagents not installed. Install with: pip install smolagents") | |
| # Load API keys from .env file | |
| load_dotenv() | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| # Available Models Configuration | |
| AVAILABLE_MODELS = { | |
| "qwen-2.5-7b": { | |
| "name": "Qwen/Qwen2.5-7B-Instruct", | |
| "provider": "huggingface", | |
| "description": "Fast & Efficient - Quick analysis", | |
| "api_key_required": "HF_TOKEN" | |
| }, | |
| "qwen-2.5-72b": { | |
| "name": "Qwen/Qwen2.5-72B-Instruct", | |
| "provider": "huggingface", | |
| "description": "Most Capable Qwen - Deep analysis", | |
| "api_key_required": "HF_TOKEN" | |
| }, | |
| "meta-llama-3.1-70b": { | |
| "name": "meta-llama/Llama-3.1-70B-Instruct", | |
| "provider": "huggingface", | |
| "description": "Meta Llama 3.1 - Strong reasoning", | |
| "api_key_required": "HF_TOKEN" | |
| }, | |
| "mistral-large": { | |
| "name": "mistralai/Mistral-Large-Instruct-2407", | |
| "provider": "huggingface", | |
| "description": "Mistral Large - Excellent analysis", | |
| "api_key_required": "HF_TOKEN" | |
| } | |
| } | |
| # Default Phase-Model Mapping | |
| DEFAULT_PHASE_MODELS = { | |
| "query_understanding": "qwen-2.5-7b", | |
| "industry_leaders": "qwen-2.5-72b", | |
| "best_practices": "qwen-2.5-72b", | |
| "quality_review": "qwen-2.5-72b", | |
| "recommendations": "qwen-2.5-72b" | |
| } | |
| # ============================================================================ | |
| # RESEARCH STATE MANAGEMENT | |
| # ============================================================================ | |
| class ResearchState: | |
| """Manages research state, search history, and dashboard updates""" | |
| def __init__(self): | |
| self.search_history = [] | |
| self.model_usage = [] | |
| self.results_cache = {} | |
| self.dashboard_updates = [] | |
| def add_search(self, phase, query, model, timestamp): | |
| """Record a search operation""" | |
| self.search_history.append({ | |
| "phase": phase, | |
| "query": query, | |
| "model": model, | |
| "timestamp": timestamp | |
| }) | |
| def add_model_usage(self, phase, model, duration, status): | |
| """Record model usage metrics""" | |
| self.model_usage.append({ | |
| "phase": phase, | |
| "model": model, | |
| "duration": duration, | |
| "status": status, | |
| "timestamp": datetime.now().strftime("%H:%M:%S") | |
| }) | |
| def add_dashboard_update(self, message): | |
| """Add a live update to the dashboard""" | |
| timestamp = datetime.now().strftime("%H:%M:%S") | |
| self.dashboard_updates.append(f"[{timestamp}] {message}") | |
| def get_dashboard_display(self): | |
| """Get the current dashboard display""" | |
| if not self.dashboard_updates: | |
| return "β³ Waiting for research to start..." | |
| dashboard = "# π Live Research Dashboard\n\n" | |
| dashboard += "```\n" | |
| for update in self.dashboard_updates: | |
| dashboard += update + "\n" | |
| dashboard += "```\n" | |
| return dashboard | |
| def clear(self): | |
| """Clear all state for new research""" | |
| self.search_history.clear() | |
| self.model_usage.clear() | |
| self.dashboard_updates.clear() | |
| state = ResearchState() | |
| # ============================================================================ | |
| # VISUALIZATION UTILITIES | |
| # ============================================================================ | |
| def create_progress_bar(percent, width=30): | |
| """Create a simple text-based progress bar""" | |
| filled = int(width * percent / 100) | |
| bar = "β" * filled + "β" * (width - filled) | |
| return f"[{bar}] {percent}%" | |
| def create_hierarchy_diagram(): | |
| """Create ASCII art hierarchy diagram""" | |
| return """ | |
| ``` | |
| βββββββββββββββββββββββ | |
| β SUPERVISOR π― β | |
| β (Strategy) β | |
| ββββββββββββ¬βββββββββββ | |
| β | |
| ββββββββββββββββΌβββββββββββββββ | |
| β β β | |
| βββββββββΌβββββββββ ββββΌβββββββββββ βββΌβββββββββββββ | |
| β RESEARCHER π β β ANALYZER β β β CRITIC π β | |
| β (Leaders) β β (Practices) β β (Quality) β | |
| βββββββββ¬βββββββββ ββββ¬βββββββββββ βββ¬βββββββββββββ | |
| β β β | |
| ββββββββββββββββΌβββββββββββββββ | |
| β | |
| ββββββββββββΌβββββββββββ | |
| β SYNTHESIZER π‘ β | |
| β (Recommendations) β | |
| βββββββββββββββββββββββ | |
| ``` | |
| """ | |
| # ============================================================================ | |
| # MULTI-MODEL RESEARCH ENGINE | |
| # ============================================================================ | |
| class MultiModelResearchEngine: | |
| """Research engine with multi-model support and agent orchestration""" | |
| def __init__(self, phase_models=None): | |
| self.phase_models = phase_models or DEFAULT_PHASE_MODELS | |
| self.models_cache = {} | |
| def get_model(self, model_key): | |
| """Initialize and cache model instances""" | |
| if model_key in self.models_cache: | |
| return self.models_cache[model_key] | |
| model_config = AVAILABLE_MODELS[model_key] | |
| if model_config["provider"] == "huggingface": | |
| if not HF_TOKEN: | |
| raise ValueError(f"HF_TOKEN required for {model_key}") | |
| model = InferenceClientModel( | |
| model_id=model_config["name"], | |
| timeout=120 | |
| ) | |
| self.models_cache[model_key] = model | |
| return model | |
| def run_agent_task(self, phase, task, use_web_search=True): | |
| """Run task with assigned model for the phase""" | |
| model_key = self.phase_models.get(phase, "qwen-2.5-7b") | |
| model_config = AVAILABLE_MODELS[model_key] | |
| start_time = time.time() | |
| try: | |
| model = self.get_model(model_key) | |
| tools = [WebSearchTool()] if use_web_search else [] | |
| # Create agent with compatible configuration | |
| try: | |
| agent = ToolCallingAgent( | |
| tools=tools, | |
| model=model, | |
| max_steps=6 | |
| ) | |
| except TypeError as e: | |
| error_str = str(e) | |
| if "tool" in error_str.lower(): | |
| agent = ToolCallingAgent( | |
| tools=[], | |
| model=model, | |
| max_steps=6 | |
| ) | |
| else: | |
| raise | |
| # Run the task with retry logic | |
| max_retries = 3 | |
| result = None | |
| for attempt in range(max_retries): | |
| try: | |
| result = agent.run(task) | |
| break | |
| except Exception as e: | |
| error_str = str(e) | |
| if "tool_choice" in error_str or "422" in error_str or "Unprocessable" in error_str: | |
| if attempt < max_retries - 1: | |
| state.add_dashboard_update(f"β οΈ API error, retrying without tools...") | |
| time.sleep(2) | |
| try: | |
| agent = ToolCallingAgent( | |
| tools=[], | |
| model=model, | |
| max_steps=6 | |
| ) | |
| except: | |
| pass | |
| continue | |
| else: | |
| result = f"β οΈ API compatibility issue with this model." | |
| else: | |
| raise | |
| elapsed = time.time() - start_time | |
| duration = f"{elapsed:.2f}s" | |
| state.add_model_usage(phase, model_config["name"], duration, "β Success") | |
| return result | |
| except Exception as e: | |
| elapsed = time.time() - start_time | |
| duration = f"{elapsed:.2f}s" | |
| state.add_model_usage(phase, model_config["name"], duration, f"β Error") | |
| raise Exception(f"Error in {phase}: {str(e)}") | |
| def research_industry_leaders(self, topic): | |
| """RESEARCHER AGENT: Research top 5 industry leaders""" | |
| task = f"""Research the TOP 5 INDUSTRY LEADERS for: {topic} | |
| Focus on market leaders, innovators, and established players who are setting standards. | |
| For each leader provide: | |
| 1. **Company/Product Name** | |
| 2. **Website URL** | |
| 3. **Market Position** (e.g., "Market Leader", "Innovative Disruptor", "Established Player") | |
| 4. **Key Strengths** (what makes them successful - be specific) | |
| 5. **Notable Features/Offerings** (unique capabilities or products) | |
| 6. **Market Metrics** (if available: market share, revenue, users, growth rate) | |
| Format each leader clearly with headers. Include citations and source URLs. | |
| Focus on LEADERS who are doing things RIGHT, not competitors to beat.""" | |
| state.add_search( | |
| "Industry Leaders Research", | |
| f"top companies market leaders industry {topic}", | |
| self.phase_models.get("industry_leaders"), | |
| datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| ) | |
| return self.run_agent_task("industry_leaders", task, use_web_search=True) | |
| def research_best_practices(self, topic): | |
| """ANALYZER AGENT: Research industry best practices and innovative approaches""" | |
| task = f"""Research BEST PRACTICES and INNOVATIVE APPROACHES for: {topic} | |
| **IMPORTANT:** This is about learning from industry excellence, NOT competitive analysis. | |
| Focus on: What works? What are proven methods? What innovations are emerging? | |
| ## 1. Industry Standards & Frameworks | |
| - Established methodologies and frameworks | |
| - Common practices across successful implementations | |
| - Industry certifications or standards | |
| ## 2. Success Stories & Case Studies | |
| - Real-world examples with measurable outcomes | |
| - Before/after scenarios | |
| - ROI or impact metrics | |
| ## 3. Innovation Patterns (2024-2025) | |
| - Emerging trends and cutting-edge approaches | |
| - Technology innovations being adopted | |
| - What's working well right now | |
| ## 4. Implementation Guidelines | |
| - Step-by-step approaches that work | |
| - Common architecture patterns | |
| - Tools and platforms being used | |
| ## 5. Key Takeaways | |
| - What makes implementations successful | |
| - Common pitfalls to avoid | |
| - Lessons learned from leaders | |
| Provide specific examples with citations and source URLs.""" | |
| state.add_search( | |
| "Best Practices Research", | |
| f"best practices industry standards {topic}", | |
| self.phase_models.get("best_practices"), | |
| datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| ) | |
| return self.run_agent_task("best_practices", task, use_web_search=True) | |
| def quality_review(self, research_text): | |
| """CRITIC AGENT: Independent quality review""" | |
| task = f"""Perform an INDEPENDENT QUALITY REVIEW of this research: | |
| {research_text} | |
| Evaluate and provide: | |
| ## 1. Research Completeness | |
| - Are all key areas covered? | |
| - Any major gaps or missing perspectives? | |
| - Breadth vs depth assessment | |
| ## 2. Source Quality & Credibility | |
| - How credible are the sources? | |
| - Are claims well-supported? | |
| - Any red flags or questionable information? | |
| ## 3. Recency & Relevance | |
| - Is the information current (2024-2025)? | |
| - How relevant to the topic? | |
| - Any outdated information? | |
| ## 4. Clarity & Usefulness | |
| - Is the research well-organized? | |
| - Easy to understand and actionable? | |
| - Practical value for decision-making? | |
| ## 5. Improvement Recommendations | |
| - What would make this research better? | |
| - Any critical missing information? | |
| - Suggested next steps for deeper research? | |
| ## 6. Overall Assessment | |
| - Rate completeness (1-10) | |
| - Rate quality (1-10) | |
| - Rate actionability (1-10) | |
| Be honest and constructive. This is for improvement, not criticism.""" | |
| state.add_search( | |
| "Quality Review", | |
| "Independent assessment of research quality", | |
| self.phase_models.get("quality_review"), | |
| datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| ) | |
| # Use Qwen model for quality review to avoid tool_choice issues | |
| original_quality_model = self.phase_models["quality_review"] | |
| self.phase_models["quality_review"] = "qwen-2.5-72b" | |
| result = self.run_agent_task("quality_review", task, use_web_search=False) | |
| self.phase_models["quality_review"] = original_quality_model | |
| return result | |
| def generate_recommendations(self, topic, research_text): | |
| """SYNTHESIZER AGENT: Generate strategic recommendations""" | |
| task = f"""Based on this comprehensive research about {topic}: | |
| {research_text} | |
| Generate a STRATEGIC RECOMMENDATIONS ROADMAP: | |
| ## 1. Executive Summary | |
| - Key findings in 2-3 sentences | |
| - Primary opportunities identified | |
| - Critical success factors | |
| ## 2. Immediate Actions (0-30 days) | |
| - Quick wins to implement now | |
| - Low-hanging fruit | |
| - Quick assessments or pilots | |
| ## 3. Short-term Strategy (1-3 months) | |
| - Build on immediate actions | |
| - Implement core initiatives | |
| - Establish processes | |
| ## 4. Long-term Vision (3-12 months) | |
| - Strategic positioning | |
| - Competitive advantages | |
| - Sustainable growth | |
| ## 5. Success Metrics | |
| - KPIs to track progress | |
| - Milestones and checkpoints | |
| - How to measure success | |
| ## 6. Risk Mitigation | |
| - Potential challenges | |
| - Mitigation strategies | |
| - Contingency plans | |
| ## 7. Resource Requirements | |
| - Team skills needed | |
| - Tools and platforms | |
| - Budget considerations (if applicable) | |
| ## 8. Next Steps | |
| - Immediate action items | |
| - Who should lead | |
| - Timeline | |
| Make recommendations specific, actionable, and grounded in the research.""" | |
| state.add_search( | |
| "Strategic Recommendations", | |
| f"Generate recommendations for {topic}", | |
| self.phase_models.get("recommendations"), | |
| datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| ) | |
| return self.run_agent_task("recommendations", task, use_web_search=False) | |
| # ============================================================================ | |
| # MAIN RESEARCH ORCHESTRATION | |
| # ============================================================================ | |
| def run_research(topic, model_query, model_leaders, model_practices, model_quality, model_recommendations, progress=gr.Progress()): | |
| """Main research orchestration with real-time progress and live dashboard""" | |
| if not topic or not topic.strip(): | |
| return "β Please enter a research topic", "", "", "", "", "" | |
| if not HF_TOKEN: | |
| return "β No HF_TOKEN found! Set it in your environment variables or .env file", "", "", "", "", "" | |
| # Clear previous state | |
| state.clear() | |
| # Configure phase models based on user selection | |
| phase_models = { | |
| "query_understanding": model_query, | |
| "industry_leaders": model_leaders, | |
| "best_practices": model_practices, | |
| "quality_review": model_quality, | |
| "recommendations": model_recommendations | |
| } | |
| try: | |
| engine = MultiModelResearchEngine(phase_models) | |
| # Initial dashboard message | |
| state.add_dashboard_update("π Research started!") | |
| state.add_dashboard_update(f"π Topic: {topic}") | |
| state.add_dashboard_update(f"π€ Models configured: {len(set(phase_models.values()))} unique models") | |
| state.add_dashboard_update("") | |
| state.add_dashboard_update("=" * 60) | |
| # ==================================================================== | |
| # PHASE 1: RESEARCHER AGENT (Industry Leaders) | |
| # ==================================================================== | |
| progress(0, desc="π RESEARCHER AGENT: Analyzing Industry Leaders...") | |
| state.add_dashboard_update("π PHASE 1: RESEARCHER AGENT - Industry Leaders") | |
| state.add_dashboard_update(f" Model: {AVAILABLE_MODELS[model_leaders]['name']}") | |
| state.add_dashboard_update(" Status: β³ Running...") | |
| start_researcher = time.time() | |
| leaders = engine.research_industry_leaders(topic) | |
| researcher_time = time.time() - start_researcher | |
| state.add_dashboard_update(f" Status: β Complete ({researcher_time:.1f}s)") | |
| state.add_dashboard_update("") | |
| progress(0.25, desc=f"β Researcher Agent completed in {researcher_time:.1f}s\nβ ANALYZER AGENT: Researching Best Practices...") | |
| # ==================================================================== | |
| # PHASE 2: ANALYZER AGENT (Best Practices) | |
| # ==================================================================== | |
| state.add_dashboard_update("β PHASE 2: ANALYZER AGENT - Best Practices") | |
| state.add_dashboard_update(f" Model: {AVAILABLE_MODELS[model_practices]['name']}") | |
| state.add_dashboard_update(" Status: β³ Running...") | |
| start_analyzer = time.time() | |
| practices = engine.research_best_practices(topic) | |
| analyzer_time = time.time() - start_analyzer | |
| state.add_dashboard_update(f" Status: β Complete ({analyzer_time:.1f}s)") | |
| state.add_dashboard_update("") | |
| all_research = f"{leaders}\n\n{practices}" | |
| progress(0.50, desc=f"β Analyzer Agent completed in {analyzer_time:.1f}s\nπ CRITIC AGENT: Quality Assurance Review...") | |
| # ==================================================================== | |
| # PHASE 3: CRITIC AGENT (Quality Review) | |
| # ==================================================================== | |
| state.add_dashboard_update("π PHASE 3: CRITIC AGENT - Quality Review") | |
| state.add_dashboard_update(f" Model: {AVAILABLE_MODELS[model_quality]['name']}") | |
| state.add_dashboard_update(" Status: β³ Running...") | |
| start_critic = time.time() | |
| review = engine.quality_review(all_research) | |
| critic_time = time.time() - start_critic | |
| state.add_dashboard_update(f" Status: β Complete ({critic_time:.1f}s)") | |
| state.add_dashboard_update("") | |
| progress(0.75, desc=f"β Critic Agent completed in {critic_time:.1f}s\nπ‘ SYNTHESIZER AGENT: Generating Recommendations...") | |
| # ==================================================================== | |
| # PHASE 4: SYNTHESIZER AGENT (Recommendations) | |
| # ==================================================================== | |
| state.add_dashboard_update("π‘ PHASE 4: SYNTHESIZER AGENT - Recommendations") | |
| state.add_dashboard_update(f" Model: {AVAILABLE_MODELS[model_recommendations]['name']}") | |
| state.add_dashboard_update(" Status: β³ Running...") | |
| start_synthesizer = time.time() | |
| recommendations = engine.generate_recommendations(topic, all_research) | |
| synthesizer_time = time.time() - start_synthesizer | |
| state.add_dashboard_update(f" Status: β Complete ({synthesizer_time:.1f}s)") | |
| state.add_dashboard_update("") | |
| # ==================================================================== | |
| # FINAL SYNTHESIS | |
| # ==================================================================== | |
| total_time = researcher_time + analyzer_time + critic_time + synthesizer_time | |
| progress(0.95, desc=f"β Synthesizer Agent completed in {synthesizer_time:.1f}s\nπ Finalizing results...") | |
| state.add_dashboard_update("=" * 60) | |
| state.add_dashboard_update("π RESEARCH COMPLETE!") | |
| state.add_dashboard_update("") | |
| state.add_dashboard_update("π EXECUTION SUMMARY:") | |
| state.add_dashboard_update(f" π Researcher: {researcher_time:.1f}s {create_progress_bar(100, width=15)}") | |
| state.add_dashboard_update(f" β Analyzer: {analyzer_time:.1f}s {create_progress_bar(100, width=15)}") | |
| state.add_dashboard_update(f" π Critic: {critic_time:.1f}s {create_progress_bar(100, width=15)}") | |
| state.add_dashboard_update(f" π‘ Synthesizer: {synthesizer_time:.1f}s {create_progress_bar(100, width=15)}") | |
| state.add_dashboard_update(f" ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ") | |
| state.add_dashboard_update(f" π TOTAL TIME: {total_time:.1f}s {create_progress_bar(100, width=15)}") | |
| state.add_dashboard_update("") | |
| state.add_dashboard_update("β All agents completed successfully!") | |
| state.add_dashboard_update(f"π Total searches performed: {len(state.search_history)}") | |
| state.add_dashboard_update(f"π€ Unique models used: {len(set(u['model'] for u in state.model_usage))}") | |
| # Create summary with infographics | |
| summary = f"""# π― Research Report: {topic} | |
| **Generated:** {datetime.now().strftime("%B %d, %Y at %I:%M %p")} | |
| {create_hierarchy_diagram()} | |
| --- | |
| ## β Agent Execution Status | |
| | Agent | Status | Duration | | |
| |-------|--------|----------| | |
| | π Researcher | β Complete | {researcher_time:.1f}s | | |
| | β Analyzer | β Complete | {analyzer_time:.1f}s | | |
| | π Critic | β Complete | {critic_time:.1f}s | | |
| | π‘ Synthesizer | β Complete | {synthesizer_time:.1f}s | | |
| --- | |
| ## β±οΈ Execution Timeline | |
| ``` | |
| π Researcher: {create_progress_bar(100, width=20)} {researcher_time:.1f}s | |
| β Analyzer: {create_progress_bar(100, width=20)} {analyzer_time:.1f}s | |
| π Critic: {create_progress_bar(100, width=20)} {critic_time:.1f}s | |
| π‘ Synthesizer: {create_progress_bar(100, width=20)} {synthesizer_time:.1f}s | |
| ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| π Total: {create_progress_bar(100, width=20)} {total_time:.1f}s | |
| ``` | |
| --- | |
| ## π Performance Metrics | |
| | Metric | Value | | |
| |--------|-------| | |
| | **Total Processing Time** | {total_time:.1f}s | | |
| | **Average Phase Duration** | {total_time/4:.1f}s | | |
| | **Fastest Phase** | {min(researcher_time, analyzer_time, critic_time, synthesizer_time):.1f}s | | |
| | **Slowest Phase** | {max(researcher_time, analyzer_time, critic_time, synthesizer_time):.1f}s | | |
| | **Total Web Searches** | {len(state.search_history)} | | |
| | **Unique Models Used** | {len(set(u['model'] for u in state.model_usage))} | | |
| --- | |
| ## π― Research Coverage | |
| | Phase | Model | Status | | |
| |-------|-------|--------| | |
| | π Industry Leaders | {AVAILABLE_MODELS[model_leaders]['name'].split('/')[-1]} | β | | |
| | β Best Practices | {AVAILABLE_MODELS[model_practices]['name'].split('/')[-1]} | β | | |
| | π Quality Review | {AVAILABLE_MODELS[model_quality]['name'].split('/')[-1]} | β | | |
| | π‘ Recommendations | {AVAILABLE_MODELS[model_recommendations]['name'].split('/')[-1]} | β | | |
| --- | |
| ## π Research Metadata | |
| - **Topic:** {topic} | |
| - **Generated:** {datetime.now().strftime("%B %d, %Y at %I:%M %p")} | |
| - **Data Recency:** 2024-2025 | |
| - **Total Searches:** {len(state.search_history)} | |
| - **Success Rate:** 100% β | |
| """ | |
| # Get dashboard display | |
| dashboard_display = state.get_dashboard_display() | |
| progress(1.0, desc="β Research Complete!") | |
| return summary, leaders, practices, review, recommendations, dashboard_display | |
| except Exception as e: | |
| state.add_dashboard_update(f"β ERROR: {str(e)}") | |
| error = f"""β **Error:** {str(e)} | |
| **Troubleshooting:** | |
| 1. **Check API Keys** - Verify HF_TOKEN is set: | |
| ``` | |
| export HF_TOKEN=your_huggingface_token | |
| ``` | |
| 2. **Get HF Token** - Visit: https://huggingface.co/settings/tokens | |
| - Click "New token" | |
| - Copy token (starts with hf_...) | |
| 3. **Check Internet** - Ensure stable connection for web searches | |
| 4. **Try Default Models** - Use Qwen models if others fail | |
| 5. **Simplify Topic** - Try a more specific, focused research query | |
| """ | |
| dashboard_display = state.get_dashboard_display() | |
| return error, "", "", "", "", dashboard_display | |
| # Helper function to get available models | |
| def get_available_model_choices(): | |
| """Get list of available models based on API keys present""" | |
| available = [] | |
| for key, config in AVAILABLE_MODELS.items(): | |
| api_key = config["api_key_required"] | |
| if api_key == "HF_TOKEN" and HF_TOKEN: | |
| available.append((f"{config['description']}", key)) | |
| if not available: | |
| available = [("Qwen 2.5 7B (Default)", "qwen-2.5-7b")] | |
| return available | |
| # ============================================================================ | |
| # CREATE GRADIO INTERFACE | |
| # ============================================================================ | |
| def create_interface(): | |
| """Create and return the Gradio interface""" | |
| with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model Research System") as demo: | |
| gr.Markdown(""" | |
| # ποΈ Multi-Model Research System | |
| ### Intelligent Market Research with Real-Time Progress & Live Dashboard | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| topic_input = gr.Textbox( | |
| label="π What do you want to research?", | |
| placeholder="Example: 'AI project management tools', 'Sustainable fashion brands', 'Electric vehicle charging'", | |
| lines=2 | |
| ) | |
| with gr.Accordion("π API Status & Models Available", open=False): | |
| api_info = f""" | |
| **API Keys Loaded:** | |
| - HF_TOKEN: {'β Active' if HF_TOKEN else 'β Required'} | |
| **Available Models:** {len([k for k, v in AVAILABLE_MODELS.items() if v['api_key_required'] == 'HF_TOKEN' and HF_TOKEN])} | |
| """ | |
| gr.Markdown(api_info) | |
| with gr.Column(scale=2): | |
| gr.Markdown(""" | |
| ### π Your Research Will Include | |
| | Component | Description | | |
| |-----------|-------------| | |
| | π **Industry Leaders** | Top 5 companies setting standards | | |
| | β **Best Practices** | Proven methods & innovations | | |
| | π **Quality Review** | Independent assessment | | |
| | π‘ **Recommendations** | Actionable strategic roadmap | | |
| | π **Live Dashboard** | Real-time progress updates | | |
| """) | |
| # Model Configuration | |
| with gr.Accordion("π€ Configure AI Models (Optional)", open=False): | |
| gr.Markdown("**Customize which AI model handles each research phase**") | |
| available_choices = get_available_model_choices() | |
| with gr.Row(): | |
| model_query = gr.Dropdown( | |
| choices=available_choices, | |
| value="qwen-2.5-7b", | |
| label="1οΈβ£ Query Understanding" | |
| ) | |
| model_leaders = gr.Dropdown( | |
| choices=available_choices, | |
| value="qwen-2.5-72b", | |
| label="2οΈβ£ Industry Leaders" | |
| ) | |
| with gr.Row(): | |
| model_practices = gr.Dropdown( | |
| choices=available_choices, | |
| value="qwen-2.5-72b", | |
| label="3οΈβ£ Best Practices" | |
| ) | |
| model_quality = gr.Dropdown( | |
| choices=available_choices, | |
| value="qwen-2.5-72b", | |
| label="4οΈβ£ Quality Review" | |
| ) | |
| model_recommendations = gr.Dropdown( | |
| choices=available_choices, | |
| value="qwen-2.5-72b", | |
| label="5οΈβ£ Recommendations" | |
| ) | |
| submit_btn = gr.Button("π Start Research", variant="primary", size="lg") | |
| gr.Markdown("---") | |
| # Live Dashboard - FIRST TAB | |
| with gr.Tabs(): | |
| with gr.Tab("π Live Dashboard"): | |
| gr.Markdown("**Real-time progress updates as research happens**") | |
| dashboard_output = gr.Markdown(value="β³ Waiting for research to start...", label="Dashboard") | |
| with gr.Tab("π Summary"): | |
| gr.Markdown("**Overview of your research with model usage and metadata**") | |
| summary_output = gr.Markdown() | |
| with gr.Tab("π Industry Leaders"): | |
| gr.Markdown("**Top 5 companies/products dominating this space**") | |
| leaders_output = gr.Markdown() | |
| with gr.Tab("β Best Practices"): | |
| gr.Markdown("**Proven strategies and innovative approaches**") | |
| practices_output = gr.Markdown() | |
| with gr.Tab("π Quality Review"): | |
| gr.Markdown("**Independent assessment of research quality**") | |
| review_output = gr.Markdown() | |
| with gr.Tab("π‘ Recommendations"): | |
| gr.Markdown("**Actionable strategic roadmap**") | |
| recommendations_output = gr.Markdown() | |
| # Connect button | |
| submit_btn.click( | |
| fn=run_research, | |
| inputs=[ | |
| topic_input, | |
| model_query, | |
| model_leaders, | |
| model_practices, | |
| model_quality, | |
| model_recommendations | |
| ], | |
| outputs=[ | |
| summary_output, | |
| leaders_output, | |
| practices_output, | |
| review_output, | |
| recommendations_output, | |
| dashboard_output | |
| ] | |
| ) | |
| gr.Markdown(""" | |
| --- | |
| ### π Quick Start | |
| 1. **Set HF_TOKEN** - Add to environment: `export HF_TOKEN=your_token` | |
| 2. **Enter research topic** | |
| 3. **Click "Start Research"** | |
| 4. **Watch the Live Dashboard tab** for real-time updates | |
| 5. **Results appear in other tabs** as they complete | |
| --- | |
| ### π About This System | |
| This is a hierarchical multi-agent research system with: | |
| - **Supervisor**: Orchestrates the research process | |
| - **Researcher Agent**: Identifies industry leaders | |
| - **Analyzer Agent**: Researches best practices | |
| - **Critic Agent**: Quality assurance review | |
| - **Synthesizer Agent**: Generates recommendations | |
| All agents work in parallel with real-time progress tracking! | |
| """) | |
| return demo | |
| # ============================================================================ | |
| # MAIN ENTRY POINT | |
| # ============================================================================ | |
| if __name__ == "__main__": | |
| print("\n" + "="*70) | |
| print("ποΈ MULTI-MODEL RESEARCH SYSTEM - LIVE DASHBOARD & REAL-TIME PROGRESS") | |
| print("="*70) | |
| print("\nπ API Keys:") | |
| print(f" HF_TOKEN: {'β Loaded' if HF_TOKEN else 'β Missing (REQUIRED)'}") | |
| print("\nπ Available Models:") | |
| for key, config in AVAILABLE_MODELS.items(): | |
| has_key = config["api_key_required"] == "HF_TOKEN" and HF_TOKEN | |
| status = "β " if has_key else "β" | |
| print(f" {status} {config['name']}") | |
| if not HF_TOKEN: | |
| print("\nβ οΈ WARNING: HF_TOKEN not found!") | |
| print(" Set it with: export HF_TOKEN=your_huggingface_token") | |
| else: | |
| print("\nβ Ready to launch!") | |
| print("\nπ Starting server...") | |
| print("="*70 + "\n") | |
| demo = create_interface() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False | |
| ) | |