diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,47 +1,46 @@ from dotenv import load_dotenv -load_dotenv() +load_dotenv() """ -πŸ›οΈ AI Democracy - Multi-Model Consensus System -The Agora: Where artificial minds gather to forge wisdom +AI Democracy - Multi-Model Consensus System +Professional Edition with Clean Architecture -A revolutionary platform for AI model deliberation and consensus building. """ -# AGNO IMPORTS -from agno.agent import Agent -from agno.team.team import Team -import asyncio -from textwrap import dedent -# Add these imports at the top of your file, after the existing imports -from agno.models.openai import OpenAIChat -from agno.models.anthropic import Claude -from agno.models.mistral import MistralChat -from agno.models.sambanova import Sambanova - -# Misc imports -import logging -from enum import Enum from dataclasses import dataclass +from typing import List, Dict, Any, Optional from datetime import datetime -from typing import List, Optional, Dict, Any +import json +import traceback +import re +import logging +from enum import Enum import os import uuid -import json -# Database +# pred Agent frameworks +from agno.agent import Agent +from agno.models.openai import OpenAIChat +from agno.models.anthropic import Claude +from agno.models.mistral import MistralChat +from agno.models.sambanova import Sambanova +from agno.knowledge.knowledge import Knowledge +from agno.vectordb.pgvector import PgVector +from agno.knowledge.embedder.huggingface import HuggingfaceCustomEmbedder from supabase import create_client import gradio as gr # Configure logging -logging.basicConfig(level=logging.INFO) +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) logger = logging.getLogger(__name__) -class ModelType(Enum): - CLAUDE = "claude" - GPT4 = "gpt4" - MISTRAL = "mistral" - SAMBANOVA = "sambanova" + + +# DATA MODELS + class ProblemDomain(Enum): MEDICAL = "medical" @@ -51,6 +50,7 @@ class ProblemDomain(Enum): ETHICAL = "ethical" GENERAL = "general" + @dataclass class ModelResponse: model_name: str @@ -60,6 +60,7 @@ class ModelResponse: timestamp: datetime tokens_used: int = 0 + @dataclass class DebateRound: round_number: int @@ -67,6 +68,7 @@ class DebateRound: consensus_score: float timestamp: datetime + @dataclass class Problem: id: str @@ -77,426 +79,69 @@ class Problem: user_id: str timestamp: datetime -# Utility function -def get_current_timestamp(): - """Get current timestamp in ISO format""" - return datetime.now().isoformat() -# API Keys and Configuration - with validation -def get_api_key(key_name: str) -> Optional[str]: - """Safely get API key with validation""" - key = os.environ.get(key_name) - if not key: - logger.warning(f"⚠️ {key_name} not found in environment variables") - return key -ANTHROPIC_API_KEY = get_api_key("ANTHROPIC_API_KEY") -OPENAI_API_KEY = get_api_key("OPENAI_API_KEY") -MISTRAL_API_KEY = get_api_key("MISTRAL_API_KEY") -SAMBANOVA_API_KEY = get_api_key("SAMBANOVA_API_KEY") -SUPABASE_DB_PASSWORD = get_api_key("SUPABASE_DB_PASSWORD") -SUPABASE_KEY = get_api_key("SUPABASE_KEY") -SUPABASE_URL = get_api_key("SUPABASE_URL") +# CONFIGURATION -# IMPROVED EMBEDDER SETUP -def setup_embedder(): - """Setup embedder with proper error handling""" - try: - from agno.embedder.huggingface import HuggingfaceCustomEmbedder - embedder = HuggingfaceCustomEmbedder() - - # Patch embedding_dimension if missing - if not hasattr(embedder, "embedding_dimension"): - if hasattr(embedder, "model") and hasattr(embedder.model, "get_sentence_embedding_dimension"): - embedder.embedding_dimension = embedder.model.get_sentence_embedding_dimension() - elif hasattr(embedder, "model") and hasattr(embedder.model, "get_output_dimension"): - embedder.embedding_dimension = embedder.model.get_output_dimension() - else: - try: - dummy = embedder.get_embedding("test") - embedder.embedding_dimension = len(dummy) - except Exception: - embedder.embedding_dimension = 384 # Default for MiniLM - - logger.info(f"βœ… Embedder initialized with dimension: {embedder.embedding_dimension}") - return embedder - except Exception as e: - logger.error(f"❌ Failed to initialize embedder: {str(e)}") - return None - -# IMPROVED KNOWLEDGE BASE SETUP -def setup_knowledge_base(embedder): - """Setup knowledge base with proper error handling""" - if not embedder or not SUPABASE_URL or not SUPABASE_DB_PASSWORD: - logger.warning("⚠️ Knowledge base disabled due to missing components") - return None - - try: - from agno.agent import AgentKnowledge - from agno.vectordb.pgvector import PgVector - - knowledge_base = AgentKnowledge( - embedder=embedder, - vector_db=PgVector( - host=SUPABASE_URL.replace("https://", "").split(".")[0], - port=5432, - user="postgres", - password=SUPABASE_DB_PASSWORD, - database="postgres", - table_name="conversations_w_llm", - embedding_dimension=embedder.embedding_dimension, - ), - ) - logger.info("βœ… Knowledge base initialized") - return knowledge_base - except Exception as e: - logger.error(f"❌ Failed to initialize knowledge base: {str(e)}") - return None - -# Initialize components -embedder = setup_embedder() -knowledge_base = setup_knowledge_base(embedder) - - -# Enhanced Output Formatter for AI Democracy System -from dataclasses import dataclass -from typing import List, Dict, Any -from datetime import datetime -import json - -# Enhanced Output Formatter for AI Democracy System - MARKDOWN REMOVED -from dataclasses import dataclass -from typing import List, Dict, Any -from datetime import datetime -import json -import re - -class AgoraOutputFormatter: - """Enhanced formatter for making Agora analysis results more readable and visually appealing""" +class Config: + """Central configuration management""" def __init__(self): - self.emojis = { - 'high_quality': '🌟', - 'medium_quality': '⭐', - 'low_quality': 'πŸ’«', - 'consensus_high': '🎯', - 'consensus_medium': 'πŸ”„', - 'consensus_low': 'πŸ”€', - 'confidence_high': 'πŸ’ͺ', - 'confidence_medium': 'πŸ‘', - 'confidence_low': 'πŸ€”', - 'agent': 'πŸ€–', - 'analysis': 'πŸ”¬', - 'insights': 'πŸ’‘', - 'recommendations': 'πŸ“‹', - 'risks': '⚠️', - 'benefits': 'βœ…', - 'summary': 'πŸ“Š', - 'timestamp': 'πŸ•’', - 'domain': '🏷️', - 'problem': '🎯', - 'quality': '⭐', - 'database': 'πŸ’Ύ' + self.ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") + self.OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") + self.MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") + self.SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY") + self.SUPABASE_URL = os.getenv("SUPABASE_URL") + self.SUPABASE_KEY = os.getenv("SUPABASE_KEY") + self.SUPABASE_DB_PASSWORD = os.getenv("SUPABASE_DB_PASSWORD") + + self._validate() + + def _validate(self): + """Log warnings for missing keys""" + keys = { + 'ANTHROPIC_API_KEY': self.ANTHROPIC_API_KEY, + 'OPENAI_API_KEY': self.OPENAI_API_KEY, + 'MISTRAL_API_KEY': self.MISTRAL_API_KEY, + 'SAMBANOVA_API_KEY': self.SAMBANOVA_API_KEY, } + for name, value in keys.items(): + if not value: + logger.warning(f"{name} not configured") - def format_debate_results(self, problem: 'Problem', debate_round: 'DebateRound', save_success: bool = True) -> tuple[str, str]: - """Format the complete debate results with enhanced readability""" - - # Generate main results - main_output = self._generate_enhanced_main_output(problem, debate_round, save_success) - - # Generate summary - summary_output = self._generate_enhanced_summary(problem, debate_round) - - return main_output, summary_output - - def _generate_enhanced_main_output(self, problem: 'Problem', debate_round: 'DebateRound', save_success: bool) -> str: - """Generate the main enhanced output with beautiful formatting""" - - # Header section - header = self._create_header(problem, debate_round, save_success) - - # Agent responses section - responses_section = self._create_responses_section(debate_round.responses) - - # Consensus analysis section - consensus_section = self._create_consensus_section(debate_round) - - # Quality metrics section - metrics_section = self._create_metrics_section(debate_round.responses) - - return f"{header}\n\n{responses_section}\n\n{consensus_section}\n\n{metrics_section}" + def has_ai_models(self) -> bool: + """Check if at least one AI model is available""" + return any([ + self.ANTHROPIC_API_KEY, + self.OPENAI_API_KEY, + self.MISTRAL_API_KEY, + self.SAMBANOVA_API_KEY + ]) - def _create_header(self, problem: 'Problem', debate_round: 'DebateRound', save_success: bool) -> str: - """Create an attractive header section""" - quality_emoji = self._get_quality_emoji(debate_round.consensus_score) - consensus_emoji = self._get_consensus_emoji(debate_round.consensus_score) - save_status = f"{self.emojis['database']} Saved to database" if save_success else "⚠️ Database save failed" - - return f""" -╔══════════════════════════════��═══════════════════════════════════════════════╗ -β•‘ {quality_emoji} AI DEMOCRACY ANALYSIS RESULTS {quality_emoji} β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• + def has_database(self) -> bool: + """Check if database is configured""" + return all([self.SUPABASE_URL, self.SUPABASE_KEY, self.SUPABASE_DB_PASSWORD]) -{self.emojis['problem']} Problem: {problem.title} -{self.emojis['domain']} Domain: {problem.domain.value.title()} -{consensus_emoji} Consensus Score: {debate_round.consensus_score:.2f}/1.00 ({self._get_consensus_label(debate_round.consensus_score)}) -{self.emojis['timestamp']} Completed: {debate_round.timestamp.strftime('%Y-%m-%d at %H:%M:%S')} -{self.emojis['agent']} AI Agents: {len(debate_round.responses)} real models responded -{save_status} -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Problem Description: β”‚ -β”‚ {self._wrap_text(problem.description, 75)} β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -""" - - def _create_responses_section(self, responses: List['ModelResponse']) -> str: - """Create beautifully formatted agent responses section""" - section_lines = [ - "╔══════════════════════════════════════════════════════════════════════════════╗", - "β•‘ πŸ€– AI AGENT RESPONSES β•‘", - "β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•", - "" - ] - - for i, response in enumerate(responses, 1): - confidence_emoji = self._get_confidence_emoji(response.confidence) - agent_box = self._create_agent_response_box(i, response, confidence_emoji) - section_lines.append(agent_box) - section_lines.append("") - - return "\n".join(section_lines) +class TextUtils: + """Text formatting and processing utilities""" - def _create_agent_response_box(self, index: int, response: 'ModelResponse', confidence_emoji: str) -> str: - """Create a formatted box for each agent response""" - # Parse and format the response content - formatted_response = self._format_agent_response_content(response.response) - - return f"""β”Œβ”€ {index}. {response.model_name} ─{"─" * (65 - len(response.model_name))}┐ -β”‚ {confidence_emoji} Confidence: {response.confidence:.2f} β”‚ {self.emojis['timestamp']} {response.timestamp.strftime('%H:%M:%S')} β”‚ Tokens: ~{int(response.tokens_used)} β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -{formatted_response} -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - - def _clean_markdown(self, text: str) -> str: - """Remove all markdown formatting from text""" - # Remove bold/italic markers - text = re.sub(r'\*\*([^*]+)\*\*', r'\1', text) # **bold** -> bold - text = re.sub(r'\*([^*]+)\*', r'\1', text) # *italic* -> italic - text = re.sub(r'__([^_]+)__', r'\1', text) # __bold__ -> bold - text = re.sub(r'_([^_]+)_', r'\1', text) # _italic_ -> italic - - # Remove headers - text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE) # # Header -> Header - - # Remove code blocks + @staticmethod + def clean_markdown(text: str) -> str: + """Remove markdown formatting""" + text = re.sub(r'\*\*([^*]+)\*\*', r'\1', text) + text = re.sub(r'\*([^*]+)\*', r'\1', text) + text = re.sub(r'^#{1,6}\s+', '', text, flags=re.MULTILINE) text = re.sub(r'```[^`]*```', '[Code Block]', text, flags=re.DOTALL) - text = re.sub(r'`([^`]+)`', r'\1', text) # `code` -> code - - # Remove links - text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) # [text](url) -> text - - # Remove list markers - text = re.sub(r'^[\*\-\+]\s+', 'β€’ ', text, flags=re.MULTILINE) # - item -> β€’ item - text = re.sub(r'^\d+\.\s+', 'β€’ ', text, flags=re.MULTILINE) # 1. item -> β€’ item - - # Remove extra whitespace - text = re.sub(r'\n\s*\n', '\n\n', text) # Multiple newlines -> double newline - text = text.strip() - - return text - - def _format_agent_response_content(self, response_text: str) -> str: - """Format agent response content with markdown removed and better structure""" - # First, clean all markdown - clean_text = self._clean_markdown(response_text) - - # Split into paragraphs - paragraphs = [p.strip() for p in clean_text.split('\n\n') if p.strip()] - formatted_lines = [] - - for paragraph in paragraphs: - if not paragraph.strip(): - continue - - # Check if it's likely a header (short line that's not a sentence) - is_header = ( - len(paragraph) < 80 and - not paragraph.endswith('.') and - not paragraph.endswith('?') and - not paragraph.endswith('!') and - ':' in paragraph[-10:] # Ends with colon nearby - ) - - # Check if it's a numbered point - is_numbered_point = paragraph.strip().startswith('β€’ ') - - if is_header: - # Format as section header - header_text = paragraph.replace(':', '').strip() - formatted_lines.append(f"β”‚ {self.emojis['insights']} {header_text}") - formatted_lines.append("β”‚") - elif is_numbered_point: - # Format as bullet point - point_text = paragraph.replace('β€’ ', '').strip() - wrapped_point = self._wrap_text(f"β€’ {point_text}", 71) - for line in wrapped_point.split('\n'): - if line.strip(): - formatted_lines.append(f"β”‚ {line}") - else: - # Format as regular paragraph - wrapped_lines = self._wrap_text(paragraph.strip(), 73) - for line in wrapped_lines.split('\n'): - if line.strip(): - formatted_lines.append(f"β”‚ {line}") - - # Add spacing between sections - formatted_lines.append("β”‚") - - # Remove the last empty line if it exists - if formatted_lines and formatted_lines[-1] == "β”‚": - formatted_lines.pop() - - return "\n".join(formatted_lines) + text = re.sub(r'`([^`]+)`', r'\1', text) + text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) + text = re.sub(r'^[\*\-\+]\s+', 'β€’ ', text, flags=re.MULTILINE) + text = re.sub(r'^\d+\.\s+', 'β€’ ', text, flags=re.MULTILINE) + return re.sub(r'\n\s*\n', '\n\n', text).strip() - def _create_consensus_section(self, debate_round: 'DebateRound') -> str: - """Create consensus analysis section""" - consensus_emoji = self._get_consensus_emoji(debate_round.consensus_score) - consensus_label = self._get_consensus_label(debate_round.consensus_score) - - return f"""╔══════════════════════════════════════════════════════════════════════════════╗ -β•‘ {consensus_emoji} CONSENSUS ANALYSIS β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -β”Œβ”€ Agreement Level ────────────────────────────────────────────────────────────┐ -β”‚ Score: {debate_round.consensus_score:.2f}/1.00 ({consensus_label}) β”‚ -β”‚ {self._create_consensus_bar(debate_round.consensus_score)} β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -{self._create_consensus_interpretation(debate_round.consensus_score)}""" - - def _create_metrics_section(self, responses: List['ModelResponse']) -> str: - """Create quality metrics section""" - avg_confidence = sum(r.confidence for r in responses) / len(responses) - total_tokens = sum(r.tokens_used for r in responses) - - return f"""╔══════════════════════════════════════════════════════════════════════════════╗ -β•‘ πŸ“Š QUALITY METRICS β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -β”Œβ”€ Response Quality ───────────────────────────────────────────────────────────┐ -β”‚ Average Confidence: {avg_confidence:.2f}/1.00 β”‚ -β”‚ Total Tokens Used: ~{int(total_tokens)} β”‚ -β”‚ Response Distribution: {self._create_response_quality_distribution(responses)} β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - - def _generate_enhanced_summary(self, problem: 'Problem', debate_round: 'DebateRound') -> str: - """Generate an enhanced summary panel""" - avg_confidence = sum(r.confidence for r in debate_round.responses) / len(debate_round.responses) - quality_assessment = self._get_consensus_label(debate_round.consensus_score) - - # Extract key themes from responses (simplified) - key_themes = self._extract_key_themes(debate_round.responses) - - return f"""╔══════════════════════════════════════════════════════════════════════════════╗ -β•‘ πŸ“‹ EXECUTIVE SUMMARY β•‘ -β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - -{self.emojis['problem']} Problem Domain: {problem.domain.value.title()} -{self.emojis['agent']} AI Models Consulted: {len(debate_round.responses)} (Real AI - No Mock Data) -{self.emojis['quality']} Average Confidence: {avg_confidence:.2f}/1.00 -{self.emojis['consensus_high'] if debate_round.consensus_score > 0.7 else self.emojis['consensus_medium'] if debate_round.consensus_score > 0.4 else self.emojis['consensus_low']} Consensus Quality: {quality_assessment} - -β”Œβ”€ Key Insights ──────────────────────────────────────────────────────────────┐ -{key_themes} -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - -β”Œβ”€ Reliability Assessment ────────────────────────────────────────────────────┐ -β”‚ βœ“ All responses from genuine AI models β”‚ -β”‚ βœ“ No artificial or mock data used β”‚ -β”‚ βœ“ Real-time analysis with current model capabilities β”‚ -β”‚ Quality Level: {quality_assessment} ({debate_round.consensus_score:.2f}/1.00) β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - - # Helper methods - def _get_quality_emoji(self, score: float) -> str: - if score >= 0.8: return self.emojis['high_quality'] - elif score >= 0.6: return self.emojis['medium_quality'] - else: return self.emojis['low_quality'] - - def _get_consensus_emoji(self, score: float) -> str: - if score >= 0.7: return self.emojis['consensus_high'] - elif score >= 0.4: return self.emojis['consensus_medium'] - else: return self.emojis['consensus_low'] - - def _get_confidence_emoji(self, confidence: float) -> str: - if confidence >= 0.7: return self.emojis['confidence_high'] - elif confidence >= 0.5: return self.emojis['confidence_medium'] - else: return self.emojis['confidence_low'] - - def _get_consensus_label(self, score: float) -> str: - if score >= 0.8: return "Excellent Agreement" - elif score >= 0.7: return "High Agreement" - elif score >= 0.6: return "Good Agreement" - elif score >= 0.4: return "Moderate Agreement" - elif score >= 0.3: return "Low Agreement" - else: return "Divergent Views" - - def _create_consensus_bar(self, score: float) -> str: - """Create a visual progress bar for consensus score""" - bar_length = 50 - filled = int(score * bar_length) - empty = bar_length - filled - - bar = "β–ˆ" * filled + "β–‘" * empty - return f"β”‚ {bar} β”‚ {score:.1%}" - - def _create_consensus_interpretation(self, score: float) -> str: - """Create interpretation text for consensus score""" - if score >= 0.8: - return """β”Œβ”€ Interpretation ────────────────────────────────────────────────────────────┐ -β”‚ 🌟 Excellent: AI models show strong agreement on key points and approaches β”‚ -β”‚ High confidence in recommendations and consistent reasoning patterns β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - elif score >= 0.6: - return """β”Œβ”€ Interpretation ────────────────────────────────────────────────────────────┐ -β”‚ ⭐ Good: Models generally align with some variation in emphasis or approach β”‚ -β”‚ Solid foundation for decision-making with multiple valid perspectives β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - elif score >= 0.4: - return """β”Œβ”€ Interpretation ────────────────────────────────────────────────────────────┐ -β”‚ πŸ”„ Moderate: Mixed agreement - models see different aspects as priorities β”‚ -β”‚ Consider multiple approaches or gather additional expert input β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - else: - return """β”Œβ”€ Interpretation ────────────────────────────────────────────────────────────┐ -β”‚ πŸ”€ Divergent: Significant disagreement suggests complex or contested issue β”‚ -β”‚ Valuable to explore different perspectives before making decisions β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜""" - - def _create_response_quality_distribution(self, responses: List['ModelResponse']) -> str: - """Create a simple distribution of response qualities""" - high = sum(1 for r in responses if r.confidence >= 0.7) - medium = sum(1 for r in responses if 0.5 <= r.confidence < 0.7) - low = sum(1 for r in responses if r.confidence < 0.5) - - return f"High: {high}, Medium: {medium}, Low: {low}" - - def _extract_key_themes(self, responses: List['ModelResponse']) -> str: - """Extract key themes from responses (simplified version)""" - # This is a simplified theme extraction - you could enhance with NLP - key_themes = [ - "β”‚ β€’ Strategic planning and implementation considerations identified", - "β”‚ β€’ Risk assessment and mitigation strategies discussed", - "β”‚ β€’ Multiple stakeholder perspectives considered", - "β”‚ β€’ Evidence-based recommendations provided" - ] - - return "\n".join(key_themes) - - def _wrap_text(self, text: str, width: int) -> str: - """Wrap text to specified width while preserving words""" + @staticmethod + def wrap_text(text: str, width: int = 80) -> str: + """Wrap text to specified width""" words = text.split() lines = [] current_line = [] @@ -517,135 +162,194 @@ class AgoraOutputFormatter: return "\n".join(lines) - -# Integration example for your Agora class -def integrate_enhanced_formatter(agora_instance): - """Example of how to integrate the enhanced formatter into your existing Agora class""" +class OutputFormatter: + """Professional output formatting""" - # Add this to your Agora class __init__ method: - # self.output_formatter = AgoraOutputFormatter() + SEPARATOR = "─" * 80 + SECTION_SEPARATOR = "─" * 60 - # Then modify your analyze_problem function in the Gradio interface: - def enhanced_analyze_problem(title: str, description: str, domain: str, user_id: str, context: str = ""): - """Enhanced version of analyze_problem with beautiful formatting""" - try: - if not title or not description: - return "❌ Please provide both title and description", "" - - if not agora_instance.agents: - return "❌ No AI agents available. Please check API key configuration.", "❌ No agents configured" - - # Convert domain to enum - domain_lower = domain.lower() - - # Create problem object (assuming you have the Problem class) - problem = Problem( - id=str(uuid.uuid4()), - title=title, - description=description, - domain=ProblemDomain(domain_lower), - context=context or "No additional context provided", - user_id=user_id or "anonymous", - timestamp=datetime.now() - ) - - # Start analysis - try: - debate_round = agora_instance.start_debate(problem) - except Exception as e: - return f"❌ Analysis failed: {str(e)}", f"❌ Error: {str(e)}" - - # Save results - save_success = agora_instance.save_debate_round(problem, debate_round) - - # Use enhanced formatter instead of manual formatting - formatter = AgoraOutputFormatter() - main_output, summary_output = formatter.format_debate_results(problem, debate_round, save_success) - - return main_output, summary_output + @staticmethod + def get_quality_label(score: float) -> str: + """Get quality label for consensus score""" + if score >= 0.8: return "Excellent Agreement" + if score >= 0.7: return "High Agreement" + if score >= 0.6: return "Good Agreement" + if score >= 0.4: return "Moderate Agreement" + if score >= 0.3: return "Low Agreement" + return "Divergent Views" + + @staticmethod + def get_confidence_label(confidence: float) -> str: + """Get confidence level label""" + if confidence >= 0.7: return "High" + if confidence >= 0.5: return "Moderate" + return "Low" + + @staticmethod + def create_progress_bar(score: float, width: int = 50) -> str: + """Create progress bar""" + filled = int(score * width) + return f"[{'β–ˆ' * filled}{'β–‘' * (width - filled)}] {score:.1%}" + + def format_results( + self, + problem: Problem, + debate_round: DebateRound, + save_success: bool + ) -> tuple[str, str]: + """Format complete analysis results""" + main = self._format_main(problem, debate_round, save_success) + summary = self._format_summary(problem, debate_round) + return main, summary + + def _format_main(self, problem: Problem, debate_round: DebateRound, save_success: bool) -> str: + """Format main output""" + quality = self.get_quality_label(debate_round.consensus_score) + db_status = "Saved successfully" if save_success else "Save failed" + + header = f"""ANALYSIS RESULTS +{self.SEPARATOR} + +Problem: {problem.title} +Domain: {problem.domain.value.title()} +Consensus: {debate_round.consensus_score:.2f}/1.00 ({quality}) +Completed: {debate_round.timestamp.strftime('%Y-%m-%d %H:%M:%S')} +Models: {len(debate_round.responses)} agents +Database: {db_status} + +Description: +{TextUtils.wrap_text(problem.description, 75)} +""" + + responses_section = self._format_responses(debate_round.responses) + consensus_section = self._format_consensus(debate_round) + metrics_section = self._format_metrics(debate_round.responses) + + return f"{header}\n\n{responses_section}\n\n{consensus_section}\n\n{metrics_section}" + + def _format_responses(self, responses: List[ModelResponse]) -> str: + """Format agent responses""" + lines = [f"AGENT RESPONSES\n{self.SEPARATOR}\n"] + + for i, resp in enumerate(responses, 1): + conf_label = self.get_confidence_label(resp.confidence) + clean_resp = TextUtils.clean_markdown(resp.response) - except Exception as e: - error_msg = f"❌ **Error during analysis:** {str(e)}" - return error_msg, f"❌ Error: {str(e)}" + lines.append(f"{i}. {resp.model_name}") + lines.append(f"Confidence: {resp.confidence:.2f} ({conf_label}) | " + f"Tokens: ~{int(resp.tokens_used)} | " + f"Time: {resp.timestamp.strftime('%H:%M:%S')}") + lines.append(self.SECTION_SEPARATOR) + lines.append(TextUtils.wrap_text(clean_resp, 75)) + lines.append("") + + return "\n".join(lines) - return enhanced_analyze_problem + def _format_consensus(self, debate_round: DebateRound) -> str: + """Format consensus section""" + quality = self.get_quality_label(debate_round.consensus_score) + bar = self.create_progress_bar(debate_round.consensus_score) + + interpretation = self._get_interpretation(debate_round.consensus_score) + + return f"""CONSENSUS ANALYSIS +{self.SEPARATOR} +Agreement Level: +Score: {debate_round.consensus_score:.2f}/1.00 ({quality}) +{bar} -# IMPROVED SUPABASE DATABASE MANAGER -class SupabaseDatabaseManager: - """Manages database operations for storing conversations and knowledge""" +Interpretation: +{TextUtils.wrap_text(interpretation, 75)}""" - def __init__(self, supabase_url: str, supabase_key: str): - self.supabase_url = supabase_url - self.supabase_key = supabase_key - self.supabase = None - self.connected = False - if supabase_url and supabase_key: - self._init_connection() + def _format_metrics(self, responses: List[ModelResponse]) -> str: + """Format quality metrics""" + avg_conf = sum(r.confidence for r in responses) / len(responses) + total_tokens = sum(r.tokens_used for r in responses) + + high = sum(1 for r in responses if r.confidence >= 0.7) + med = sum(1 for r in responses if 0.5 <= r.confidence < 0.7) + low = sum(1 for r in responses if r.confidence < 0.5) + + return f"""QUALITY METRICS +{self.SEPARATOR} - def _init_connection(self): - """Initialize Supabase connection with error handling""" - try: - self.supabase = create_client(self.supabase_url, self.supabase_key) - # Test connection with a simple query - self.supabase.table('conversations').select('id').limit(1).execute() - self.connected = True - logger.info("βœ… Database connection successful") - except Exception as e: - logger.error(f"❌ Database connection failed: {str(e)}") - self.connected = False - self.supabase = None +Average Confidence: {avg_conf:.2f}/1.00 +Total Tokens: ~{int(total_tokens)} +Distribution: High: {high}, Medium: {med}, Low: {low}""" + + def _format_summary(self, problem: Problem, debate_round: DebateRound) -> str: + """Format executive summary""" + avg_conf = sum(r.confidence for r in debate_round.responses) / len(debate_round.responses) + quality = self.get_quality_label(debate_round.consensus_score) + + return f"""EXECUTIVE SUMMARY +{self.SEPARATOR} - def is_connected(self) -> bool: - """Check if database is connected""" - return self.connected and self.supabase is not None +Problem Domain: {problem.domain.value.title()} +Models Consulted: {len(debate_round.responses)} +Average Confidence: {avg_conf:.2f}/1.00 +Consensus Quality: {quality} - def save_conversation(self, session_id: str, query: str, response: str, context: str = None) -> Optional[str]: - """Save conversation to database""" - if not self.is_connected(): - logger.warning("Database not connected, skipping save") - return None - - try: - conversation_data = { - 'session_id': session_id, - 'query': query, - 'response': response, - 'context': context, - 'timestamp': get_current_timestamp() - } - result = self.supabase.table('conversations').insert(conversation_data).execute() - return result.data[0]['id'] if result.data else None - except Exception as e: - logger.error(f"Error saving conversation: {str(e)}") - return None +Key Insights: +β€’ Strategic planning considerations identified +β€’ Risk assessment and mitigation strategies discussed +β€’ Multiple perspectives analyzed +β€’ Evidence-based recommendations provided - def get_conversation_history(self, session_id: str, limit: int = 10) -> List[Dict]: - """Retrieve conversation history from database""" - if not self.is_connected(): - logger.warning("Database not connected, returning empty history") - return [] - +Reliability: +All responses from genuine AI models +Quality: {quality} ({debate_round.consensus_score:.2f}/1.00)""" + + @staticmethod + def _get_interpretation(score: float) -> str: + """Get consensus interpretation""" + if score >= 0.8: + return ("Excellent: Strong agreement on key points and approaches. " + "High confidence in recommendations with consistent reasoning.") + if score >= 0.6: + return ("Good: Models generally align with some variation. " + "Solid foundation for decision-making.") + if score >= 0.4: + return ("Moderate: Mixed agreement - different priorities identified. " + "Consider multiple approaches.") + return ("Divergent: Significant disagreement on complex issue. " + "Explore different perspectives carefully.") + +class DatabaseManager: + """Handles all database operations""" + + def __init__(self, config: Config): + self.config = config + self.client = None + self.connected = False + self._init_connection() + + def _init_connection(self): + """Initialize database connection""" + if not self.config.has_database(): + logger.warning("Database not configured") + return + try: - result = self.supabase.table('conversations')\ - .select('*')\ - .eq('session_id', session_id)\ - .order('timestamp', desc=True)\ - .limit(limit)\ - .execute() - return result.data if result.data else [] + self.client = create_client( + self.config.SUPABASE_URL, + self.config.SUPABASE_KEY + ) + self.client.table('conversations').select('id').limit(1).execute() + self.connected = True + logger.info("Database connected") except Exception as e: - logger.error(f"Error retrieving conversation history: {str(e)}") - return [] - - def save_problem(self, problem: Problem) -> Optional[str]: + logger.error(f"Database connection failed: {e}") + self.connected = False + + def save_problem(self, problem: Problem) -> bool: """Save problem to database""" - if not self.is_connected(): - logger.warning("Database not connected, skipping problem save") - return None + if not self.connected: + return False try: - problem_data = { + data = { 'id': problem.id, 'title': problem.title, 'description': problem.description, @@ -654,843 +358,444 @@ class SupabaseDatabaseManager: 'user_id': problem.user_id, 'timestamp': problem.timestamp.isoformat() } - result = self.supabase.table('problems').insert(problem_data).execute() - return result.data[0]['id'] if result.data else None + self.client.table('problems').insert(data).execute() + return True except Exception as e: - logger.error(f"Error saving problem: {str(e)}") - return None + logger.error(f"Failed to save problem: {e}") + return False + + def save_responses(self, problem_id: str, responses: List[ModelResponse]) -> bool: + """Save agent responses""" + if not self.connected: + return False + + try: + for resp in responses: + data = { + 'session_id': problem_id, + 'query': f"Analysis by {resp.model_name}", + 'response': resp.response, + 'context': json.dumps({ + 'confidence': resp.confidence, + 'tokens': resp.tokens_used + }), + 'timestamp': resp.timestamp.isoformat() + } + self.client.table('conversations').insert(data).execute() + return True + except Exception as e: + logger.error(f"Failed to save responses: {e}") + return False + + + +# CONSENSUS CALCULATion -# Database manager instance -db_manager = SupabaseDatabaseManager(SUPABASE_URL, SUPABASE_KEY) -# IMPROVED CONSENSUS CALCULATOR - Fixed division by zero class ConsensusCalculator: - """Enhanced consensus calculation with better metrics""" + """Calculate consensus and quality metrics""" @staticmethod def calculate_response_quality(response: str) -> float: - """Calculate quality score based on response content""" + """Calculate quality score for response""" if not response or len(response.strip()) < 10: return 0.1 words = response.split() - sentences = response.split('.') - - # Prevent division by zero - if len(words) == 0: + if not words: return 0.1 - # Quality factors - length_score = min(1.0, len(words) / 100) # Optimal around 100 words - structure_score = min(1.0, len(sentences) / 5) if len(sentences) > 0 else 0.1 - - # Evidence markers - evidence_markers = ['research shows', 'studies indicate', 'data suggests', 'analysis reveals', - 'according to', 'evidence indicates', 'research demonstrates'] - evidence_score = min(0.3, sum(1 for marker in evidence_markers if marker.lower() in response.lower()) * 0.1) + # Length score + length_score = min(1.0, len(words) / 100) - # Reasoning markers - reasoning_markers = ['because', 'therefore', 'however', 'furthermore', 'consequently', - 'moreover', 'additionally', 'thus', 'hence'] - reasoning_score = min(0.3, sum(1 for marker in reasoning_markers if marker.lower() in response.lower()) * 0.05) - - # Confidence modifiers - uncertainty_markers = ['maybe', 'possibly', 'might', 'could be', 'perhaps', 'unsure', 'unclear'] - uncertainty_penalty = min(0.2, sum(1 for marker in uncertainty_markers if marker.lower() in response.lower()) * 0.05) - - # Specificity bonus - specific_markers = ['specifically', 'for example', 'in particular', 'namely', 'such as'] - specificity_bonus = min(0.2, sum(1 for marker in specific_markers if marker.lower() in response.lower()) * 0.05) - - total_score = ( + # structure score + sentences = response.split('.') + structure_score = min(1.0, len(sentences) / 5) if sentences else 0.1 + + # evidence markers + evidence = ['research', 'studies', 'data', 'analysis', 'according'] + evidence_score = min(0.3, sum( + 0.1 for m in evidence if m in response.lower() + )) + + # reasoning markers + reasoning = ['because', 'therefore', 'however', 'furthermore'] + reasoning_score = min(0.3, sum( + 0.05 for m in reasoning if m in response.lower() + )) + + # uncertainty penalty + uncertainty = ['maybe', 'possibly', 'might', 'unclear'] + penalty = min(0.2, sum( + 0.05 for m in uncertainty if m in response.lower() + )) + + score = ( length_score * 0.25 + structure_score * 0.15 + evidence_score + - reasoning_score + - specificity_bonus - - uncertainty_penalty + reasoning_score - + penalty ) - return max(0.1, min(1.0, total_score)) - + return max(0.1, min(1.0, score)) + @staticmethod - def calculate_consensus_score(responses: List[ModelResponse]) -> float: - """Calculate overall consensus score from multiple responses""" - if not responses or len(responses) == 0: + def calculate_consensus(responses: List[ModelResponse]) -> float: + """Calculate overall consensus score""" + if not responses: return 0.0 try: - # Average confidence scores - avg_confidence = sum(r.confidence for r in responses) / len(responses) - - # Response length variance (lower variance = better consensus) - response_lengths = [len(r.response.split()) for r in responses] + # confidence + avg_conf = sum(r.confidence for r in responses) / len(responses) - if len(response_lengths) == 0: - return avg_confidence * 0.7 # No length consistency component + # consistency + lengths = [len(r.response.split()) for r in responses] + if not lengths: + return avg_conf * 0.7 - # Calculate variance safely - mean_length = sum(response_lengths) / len(response_lengths) - length_variance = sum((l - mean_length)**2 for l in response_lengths) / len(response_lengths) - length_consistency = max(0, 1 - (length_variance / 1000)) # Normalize - - # Combine metrics - consensus_score = (avg_confidence * 0.7) + (length_consistency * 0.3) - return min(1.0, max(0.0, consensus_score)) + mean_len = sum(lengths) / len(lengths) + variance = sum((l - mean_len)**2 for l in lengths) / len(lengths) + consistency = max(0, 1 - (variance / 1000)) + return min(1.0, max(0.0, avg_conf * 0.7 + consistency * 0.3)) except Exception as e: - logger.error(f"Error calculating consensus score: {str(e)}") - # Fallback: return average confidence if available - if responses: - try: - return sum(r.confidence for r in responses) / len(responses) - except: - return 0.5 # Default fallback - return 0.0 - -# MAIN AGORA CLASS - FIXED FOR REAL AGENT RESPONSES -class Agora: - """Main Agora class for managing AI debates and consensus""" - - def __init__(self, primary_llm=None): - self.primary_llm = primary_llm or "gpt-4" - self.consensus_calculator = ConsensusCalculator() - self.db_manager = db_manager - self.agents = [] # Store individual agents instead of team - self.available_models = self._check_available_models() - self._initialize_agents() - self.output_formatter = AgoraOutputFormatter() - - def _check_available_models(self): - """Check which models have API keys available""" - available = {} - - models_to_check = { - "Claude Analyst": ("ANTHROPIC_API_KEY", ANTHROPIC_API_KEY), - "GPT-4 Strategist": ("OPENAI_API_KEY", OPENAI_API_KEY), - "Mistral Evaluator": ("MISTRAL_API_KEY", MISTRAL_API_KEY), - "SambaNova Specialist": ("SAMBANOVA_API_KEY", SAMBANOVA_API_KEY) - } - - for model_name, (key_name, api_key) in models_to_check.items(): - available[model_name] = bool(api_key) - logger.info(f"πŸ”‘ {model_name}: {'βœ… Available' if api_key else '❌ No API key'}") - - return available + logger.error(f"Consensus calculation error: {e}") + return 0.5 - def _initialize_agents(self): - """Initialize individual AI agents""" +class AgentManager: + """Manages AI agent initialization""" + + INSTRUCTIONS = """You are an expert AI analyst. Provide thorough, evidence-based analysis. +Focus on actionable insights, clear reasoning, and professional recommendations. +Structure your response clearly without using markdown formatting.""" + + def __init__(self, config: Config, knowledge_base: Optional[Knowledge]): + self.config = config + self.knowledge_base = knowledge_base self.agents = [] - plain_text_instructions = """ - - CRITICAL FORMATTING REQUIREMENTS: - - Do NOT use markdown formatting (no **, ##, -, etc.) - - Use plain text with natural line breaks - - For emphasis, use CAPITALIZATION or quotation marks - - For lists, use numbers (1., 2., 3.) or natural language - - For sections, use clear headers in plain text - - Write as if you're speaking directly to a person - - Use proper paragraph breaks with double line breaks - - Example of good formatting: - - KEY CONSIDERATIONS - - The main challenges include three important areas. First, we need to consider the technical aspects. This involves ensuring proper implementation and testing procedures. - - Second, the strategic implications require careful planning. Organizations should focus on long-term sustainability and stakeholder alignment. - - RECOMMENDATIONS - - Based on this analysis, I recommend the following steps: - - 1. Conduct a thorough assessment of current capabilities - 2. Develop a phased implementation plan - 3. Establish clear success metrics and monitoring systems - - This approach will help ensure successful outcomes while minimizing risks. - - """ - + self._initialize() + + def _initialize(self): + """Initialize all available agents""" + if self.config.ANTHROPIC_API_KEY: + self._add_claude() + if self.config.OPENAI_API_KEY: + self._add_openai() + if self.config.MISTRAL_API_KEY: + self._add_mistral() + if self.config.SAMBANOVA_API_KEY: + self._add_sambanova() + + logger.info(f"Initialized {len(self.agents)} agents") + + def _add_claude(self): + """Add Claude agent""" try: - # Check if we have at least one API key for real AI models - has_real_api_keys = any([ANTHROPIC_API_KEY, OPENAI_API_KEY, MISTRAL_API_KEY, SAMBANOVA_API_KEY]) - - if not has_real_api_keys: - logger.error("❌ No real AI model API keys found - cannot create agents") - return - - # Create Claude agent - if ANTHROPIC_API_KEY: - try: - claude_agent = Agent( - name="Claude Analyst", - role="Critical Analysis Specialist", - model=Claude(id="claude-3-5-sonnet-20240620"), - instructions=""" - You are Claude Analyst, a Critical Analysis Specialist. - Provide expert analysis on the given topic from your specialized perspective. - Be thorough, evidence-based, and constructive in your responses. - Consider both benefits and potential challenges in your analysis. - Structure your response clearly with key insights and recommendations. - Keep responses concise and brief, focusing on actionable insights. - Respond using clearly formatted text with proper line breaks, bullet points, and headings. Do not use escape characters like \n or markdown symbols like ### or - unless you're actually formatting for a markdown-rendering environment. Write as if you're showing it in a user-friendly UI with readable spacing and structure. - RESPONSE GUIDELINES: - 1. Always search for relevant information before answering complex questions - 2. Clearly distinguish between document-based and web-based information - 3. Provide source citations for all information - 4. Synthesize information from multiple sources when available - 5. Ask clarifying questions when the query is ambiguous - 6. Be conversational but informative - {plain_text_instructions} - """, - knowledge=knowledge_base, - ) - self.agents.append(claude_agent) - logger.info("βœ… Created Claude agent") - except Exception as e: - logger.error(f"❌ Failed to create Claude agent: {str(e)}") - - # Create GPT-4 agent - if OPENAI_API_KEY: - try: - openai_agent = Agent( - name="GPT-4 Strategist", - role="Strategic Planning Expert", - model=OpenAIChat(id="gpt-4o"), - instructions=""" - You are GPT-4 Strategist, a Strategic Planning Expert. - Provide expert analysis on the given topic from your specialized perspective. - Be thorough, evidence-based, and constructive in your responses. - Focus on strategic implications, implementation approaches, and long-term considerations. - Structure your response clearly with actionable insights. - Keep responses concise and brief, focusing on strategic value. - Respond using clearly formatted text with proper line breaks, bullet points, and headings. Do not use escape characters like \n or markdown symbols like ### or - unless you're actually formatting for a markdown-rendering environment. Write as if you're showing it in a user-friendly UI with readable spacing and structure. - RESPONSE GUIDELINES: - 1. Always search for relevant information before answering complex questions - 2. Clearly distinguish between document-based and web-based information - 3. Provide source citations for all information - 4. Synthesize information from multiple sources when available - 5. Ask clarifying questions when the query is ambiguous - 6. Be conversational but informative - {plain_text_instructions} - - """, - knowledge=knowledge_base, - ) - self.agents.append(openai_agent) - logger.info("βœ… Created GPT-4 agent") - except Exception as e: - logger.error(f"❌ Failed to create GPT-4 agent: {str(e)}") - - # Create Mistral agent - if MISTRAL_API_KEY: - try: - mistral_agent = Agent( - name="Mistral Evaluator", - role="Solution Evaluation Specialist", - model=MistralChat( - id="mistral-large-latest", - api_key=MISTRAL_API_KEY, - ), # Use model object - instructions=""" - You are Mistral Evaluator, a Solution Evaluation Specialist. - Provide expert analysis on the given topic from your specialized perspective. - Be thorough, evidence-based, and constructive in your responses. - Focus on evaluating different approaches, assessing feasibility, and identifying risks. - Structure your response clearly with evaluation criteria and recommendations. - Keep responses concise and brief, focusing on practical evaluation. - Respond using clearly formatted text with proper line breaks, bullet points, and headings. Do not use escape characters like \n or markdown symbols like ### or - unless you're actually formatting for a markdown-rendering environment. Write as if you're showing it in a user-friendly UI with readable spacing and structure. - RESPONSE GUIDELINES: - 1. Always search for relevant information before answering complex questions - 2. Clearly distinguish between document-based and web-based information - 3. Provide source citations for all information - 4. Synthesize information from multiple sources when available - 5. Ask clarifying questions when the query is ambiguous - 6. Be conversational but informative - {plain_text_instructions} + agent = Agent( + name="Claude Analyst", + role="Critical Analysis", + model=Claude(id="claude-3-5-sonnet-20240620"), + instructions=self.INSTRUCTIONS, + knowledge=self.knowledge_base + ) + self.agents.append(agent) + except Exception as e: + logger.error(f"Claude agent failed: {e}") + + def _add_openai(self): + """Add OpenAI agent""" + try: + agent = Agent( + name="GPT-4 Strategist", + role="Strategic Planning", + model=OpenAIChat(id="gpt-4o"), + instructions=self.INSTRUCTIONS, + knowledge=self.knowledge_base + ) + self.agents.append(agent) + except Exception as e: + logger.error(f"OpenAI agent failed: {e}") + + def _add_mistral(self): + """Add Mistral agent""" + try: + agent = Agent( + name="Mistral Evaluator", + role="Solution Evaluation", + model=MistralChat( + id="mistral-large-latest", + api_key=self.config.MISTRAL_API_KEY + ), + instructions=self.INSTRUCTIONS, + knowledge=self.knowledge_base + ) + self.agents.append(agent) + except Exception as e: + logger.error(f"Mistral agent failed: {e}") + + def _add_sambanova(self): + """Add SambaNova agent""" + try: + agent = Agent( + name="SambaNova Specialist", + role="Technical Implementation", + model=Sambanova(), + instructions=self.INSTRUCTIONS, + knowledge=self.knowledge_base + ) + self.agents.append(agent) + except Exception as e: + logger.error(f"SambaNova agent failed: {e}") - """, - knowledge=knowledge_base, - ) - self.agents.append(mistral_agent) - logger.info("βœ… Created Mistral agent") - except Exception as e: - logger.error(f"❌ Failed to create Mistral agent: {str(e)}") - # Create SambaNova agent - might need different import/setup - if SAMBANOVA_API_KEY: - try: - sambanova_agent = Agent( - name="SambaNova Specialist", - role="Technical Implementation Specialist", - model=Sambanova(), - instructions=""" - You are SambaNova Specialist, a Technical Implementation Specialist. - Provide expert analysis on the given topic from your specialized perspective. - Be thorough, evidence-based, and constructive in your responses. - Focus on technical feasibility, implementation challenges, and system design. - Structure your response clearly with technical details and recommendations. - Keep responses concise and brief, focusing on technical implementation. - Respond using clearly formatted text with proper line breaks, bullet points, and headings. Do not use escape characters like \n or markdown symbols like ### or - unless you're actually formatting for a markdown-rendering environment. Write as if you're showing it in a user-friendly UI with readable spacing and structure. - RESPONSE GUIDELINES: - 1. Always search for relevant information before answering complex questions - 2. Clearly distinguish between document-based and web-based information - 3. Provide source citations for all information - 4. Synthesize information from multiple sources when available - 5. Ask clarifying questions when the query is ambiguous - 6. Be conversational but informative - {plain_text_instructions} - """, - knowledge=knowledge_base, - ) - self.agents.append(sambanova_agent) - logger.info("βœ… SambaNova agent Created") - except Exception as e: - logger.error(f"❌ Failed to create SambaNova agent: {str(e)}") - - logger.info(f"βœ… Successfully initialized {len(self.agents)} agents") - +class Agora: + """Main AI Democracy system""" + + def __init__(self): + self.config = Config() + self.knowledge_base = self._setup_knowledge() + self.db = DatabaseManager(self.config) + self.agent_manager = AgentManager(self.config, self.knowledge_base) + self.calculator = ConsensusCalculator() + self.formatter = OutputFormatter() + + def _setup_knowledge(self) -> Optional[Knowledge]: + """Setup knowledge base""" + try: + embedder = HuggingfaceCustomEmbedder() + if not hasattr(embedder, 'embedding_dimension'): + embedder.embedding_dimension = 384 + + if not self.config.has_database(): + return None + + return Knowledge( + embedder=embedder, + vector_db=PgVector( + host=self.config.SUPABASE_URL.replace("https://", "").split(".")[0], + port=5432, + user="postgres", + password=self.config.SUPABASE_DB_PASSWORD, + database="postgres", + table_name="conversations_w_llm", + embedding_dimension=384 + ) + ) except Exception as e: - logger.error(f"❌ Failed to initialize agents: {str(e)}") - import traceback - logger.error(f"Full traceback: {traceback.format_exc()}") - - def get_agent_status(self): - """Get detailed status of the agents and available models""" - status = { - "agents_initialized": len(self.agents) > 0, - "agent_count": len(self.agents), - "available_models": self.available_models, - "agents": [] - } + logger.error(f"Knowledge base setup failed: {e}") + return None + + @property + def agents(self) -> List[Agent]: + """Get list of agents""" + return self.agent_manager.agents + + def analyze(self, problem: Problem) -> DebateRound: + """Run analysis on problem""" + if not self.agents: + raise Exception("No AI agents available") - for agent in self.agents: - status["agents"].append({ - "name": agent.name, - "role": agent.role, - "model": getattr(agent, 'model', 'unknown') - }) + logger.info(f"Analyzing: {problem.title}") - return status + prompt = f"""Problem Analysis Request - def start_debate(self, problem: Problem) -> DebateRound: - """Start individual agent analysis on a given problem - NO MOCK RESPONSES""" - logger.info(f"🎯 Starting analysis on: {problem.title}") - logger.info(f"πŸ” Available agents: {len(self.agents)}") - - if not self.agents: - raise Exception("❌ No AI agents available - please check API keys configuration") +Title: {problem.title} +Description: {problem.description} +Domain: {problem.domain.value} +Context: {problem.context} + +Provide expert analysis including: +1. Key considerations and challenges +2. Potential solutions or approaches +3. Risk and benefit assessment +4. Specific recommendations + +Be thorough and provide actionable insights.""" responses = [] - - # Create analysis prompt - analysis_prompt = dedent(f""" - **Problem Analysis Request** - - Title: {problem.title} - Description: {problem.description} - Domain: {problem.domain.value} - Context: {problem.context} - - Please provide your expert analysis on this problem, including: - 1. Key considerations and challenges - 2. Potential solutions or approaches - 3. Risks and benefits assessment - 4. Specific recommendations for next steps - - Please be thorough and provide actionable insights from your specialized perspective. - - Timestamp: {get_current_timestamp()} - """) - - # Get response from each agent individually for agent in self.agents: try: - logger.info(f"πŸ“€ Requesting analysis from {agent.name}...") - - # Get response from individual agent - agent_response = agent.run(analysis_prompt) - - if agent_response and len(str(agent_response).strip()) > 20: - # Create model response object - response_text = str(agent_response).strip() - confidence = self.consensus_calculator.calculate_response_quality(response_text) + result = agent.run(prompt) + if result and len(str(result).strip()) > 20: + text = str(result).strip() + confidence = self.calculator.calculate_response_quality(text) - model_response = ModelResponse( + responses.append(ModelResponse( model_name=agent.name, - response=response_text, + response=text, confidence=confidence, - reasoning=f"Direct response from {agent.role}", + reasoning=f"Analysis by {agent.role}", timestamp=datetime.now(), - tokens_used=len(response_text.split()) * 1.3 - ) - responses.append(model_response) - logger.info(f"βœ… Received response from {agent.name} (confidence: {confidence:.2f})") - else: - logger.warning(f"⚠️ Empty or short response from {agent.name}") - + tokens_used=int(len(text.split()) * 1.3) + )) + logger.info(f"{agent.name} responded (conf: {confidence:.2f})") except Exception as e: - logger.error(f"❌ Error getting response from {agent.name}: {str(e)}") - continue + logger.error(f"Error from {agent.name}: {e}") - # Ensure we have at least one response if not responses: - raise Exception("❌ No responses received from any AI agents - check API keys and network connection") + raise Exception("No responses received from agents") - # Calculate consensus score - consensus_score = self.consensus_calculator.calculate_consensus_score(responses) + consensus = self.calculator.calculate_consensus(responses) - # Create debate round - debate_round = DebateRound( + return DebateRound( round_number=1, responses=responses, - consensus_score=consensus_score, + consensus_score=consensus, timestamp=datetime.now() ) - - logger.info(f"βœ… Analysis completed - Generated {len(responses)} real AI responses - Consensus Score: {consensus_score:.2f}") - return debate_round - - def save_debate_round(self, problem: Problem, debate_round: DebateRound) -> bool: - """Save debate round and problem to the database""" + + def save_results(self, problem: Problem, debate: DebateRound) -> bool: + """Save results to database""" try: - # Save problem first - problem_id = self.db_manager.save_problem(problem) - - # Save each response - for response in debate_round.responses: - self.db_manager.save_conversation( - session_id=problem.id, - query=f"Analysis request: {problem.title}", - response=response.response, - context=json.dumps({ - "round_number": debate_round.round_number, - "model_name": response.model_name, - "confidence": response.confidence, - "consensus_score": debate_round.consensus_score, - "tokens_used": response.tokens_used - }) - ) - - logger.info(f"βœ… Saved debate round for problem: {problem.title}") - return True - + prob_saved = self.db.save_problem(problem) + resp_saved = self.db.save_responses(problem.id, debate.responses) + return prob_saved and resp_saved except Exception as e: - logger.error(f"❌ Error saving debate round: {str(e)}") + logger.error(f"Save failed: {e}") return False -# IMPROVED GRADIO INTERFACE -def create_gradio_interface(): - """Create the Gradio interface for Agora""" +def create_interface(): + """Create professional Gradio interface""" - # Initialize Agora instance agora = Agora() - def analyze_problem(title: str, description: str, domain: str, user_id: str, context: str = ""): - """Enhanced problem analysis with beautiful formatting using AgoraOutputFormatter""" - - # Initialize the formatter - formatter = AgoraOutputFormatter() - + def analyze_problem(title, description, domain, user_id, context): + """Analyze problem and return formatted results""" try: - # Input validation if not title or not description: - error_output = """ - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ❌ INPUT ERROR β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - β”Œβ”€ Missing Required Information ──────────────────────────────────────────────┐ - β”‚ Please provide both a title and description for your problem. β”‚ - β”‚ β”‚ - β”‚ Required fields: β”‚ - β”‚ οΏ½οΏ½ πŸ“ Problem Title: Clear, concise title β”‚ - β”‚ β€’ πŸ“„ Problem Description: Detailed explanation of the issue β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - """ - return error_output.strip(), "❌ Missing required information" + return "Error: Title and description required", "" - # Check if agents are available if not agora.agents: - error_output = f""" - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ❌ CONFIGURATION ERROR β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - β”Œβ”€ No AI Agents Available ────────────────────────────────────────────────────┐ - β”‚ The system cannot proceed without properly configured AI agents. β”‚ - β”‚ β”‚ - β”‚ Please check: β”‚ - β”‚ β€’ πŸ”‘ API keys are properly set in environment variables β”‚ - β”‚ β€’ πŸ€– At least one AI model is configured and accessible β”‚ - β”‚ β€’ 🌐 Network connection allows API calls to AI services β”‚ - β”‚ β”‚ - β”‚ Current Status: {len(agora.agents)} agents initialized β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - """ - return error_output.strip(), "❌ No agents configured" - - # Convert domain to enum safely - try: - domain_lower = domain.lower() - problem_domain = ProblemDomain(domain_lower) - except ValueError: - # Fallback to GENERAL if invalid domain - problem_domain = ProblemDomain.GENERAL - logger.warning(f"Invalid domain '{domain}', defaulting to GENERAL") + return "Error: No AI agents available", "" - # Create problem object problem = Problem( id=str(uuid.uuid4()), title=title.strip(), description=description.strip(), - domain=problem_domain, - context=context.strip() if context else "No additional context provided", - user_id=user_id.strip() if user_id else "anonymous", + domain=ProblemDomain(domain.lower()), + context=context.strip() or "None provided", + user_id=user_id.strip() or "anonymous", timestamp=datetime.now() ) - # Log analysis start - logger.info(f"πŸš€ Starting enhanced analysis for: {problem.title}") - logger.info(f"πŸ” Domain: {problem.domain.value} | Agents: {len(agora.agents)}") + debate = agora.analyze(problem) + saved = agora.save_results(problem, debate) - # Show progress indicator (for UI feedback) - progress_output = f""" - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ πŸ”„ ANALYSIS IN PROGRESS β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - 🎯 **Analyzing:** {problem.title} - 🏷️ **Domain:** {problem.domain.value.title()} - πŸ€– **Consulting {len(agora.agents)} AI Agents...** - - β”Œβ”€ Progress ──────────────────────────────────────────────────────────────────┐ - β”‚ ⏳ Sending analysis requests to AI models... β”‚ - β”‚ πŸ”„ This may take 30-60 seconds depending on model response times β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - """ + main, summary = agora.formatter.format_results(problem, debate, saved) + return main, summary - # Start the actual analysis - try: - debate_round = agora.start_debate(problem) - logger.info(f"βœ… Analysis completed - {len(debate_round.responses)} responses received") - - except Exception as e: - error_msg = str(e) - logger.error(f"❌ Analysis failed: {error_msg}") - - error_output = f""" - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ❌ ANALYSIS FAILED β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - β”Œβ”€ Error Details ─────────────────────────────────────────────────────────────┐ - β”‚ {formatter._wrap_text(error_msg, 73)} β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - - β”Œβ”€ Troubleshooting Steps ─────────────────────────────────────────────────────┐ - β”‚ 1. πŸ”‘ Verify all API keys are valid and have sufficient credits β”‚ - β”‚ 2. 🌐 Check internet connection and firewall settings β”‚ - β”‚ 3. πŸ€– Ensure AI services are operational (check status pages) β”‚ - β”‚ 4. πŸ“ Try with a simpler problem description β”‚ - β”‚ 5. πŸ”„ Refresh the page and try again β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - """ - return error_output.strip(), f"❌ Analysis Error: {error_msg}" - - # Validate that we got meaningful responses - if not debate_round.responses: - error_output = """ - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ❌ NO RESPONSES RECEIVED β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - β”Œβ”€ Issue Description ─────────────────────────────────────────────────────────┐ - β”‚ No AI agents provided responses to the analysis request. β”‚ - β”‚ This could indicate API key issues or service unavailability. β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - """ - return error_output.strip(), "❌ No responses received" - - # Save results to database - try: - save_success = agora.save_debate_round(problem, debate_round) - if save_success: - logger.info("βœ… Results saved to database successfully") - else: - logger.warning("⚠️ Database save failed - results not persisted") - except Exception as e: - logger.error(f"❌ Database save error: {str(e)}") - save_success = False - - # Use the enhanced formatter to create beautiful output - try: - main_output, summary_output = formatter.format_debate_results( - problem=problem, - debate_round=debate_round, - save_success=save_success - ) - - # Log success metrics - avg_confidence = sum(r.confidence for r in debate_round.responses) / len(debate_round.responses) - logger.info(f"πŸ“Š Analysis completed successfully:") - logger.info(f" β€’ Responses: {len(debate_round.responses)}") - logger.info(f" β€’ Avg Confidence: {avg_confidence:.2f}") - logger.info(f" β€’ Consensus Score: {debate_round.consensus_score:.2f}") - logger.info(f" β€’ Database Saved: {'Yes' if save_success else 'No'}") - - return main_output, summary_output - - except Exception as e: - logger.error(f"❌ Formatting error: {str(e)}") - - # Fallback to basic formatting if formatter fails - fallback_output = f""" - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ⚠️ ANALYSIS COMPLETE (BASIC FORMAT) β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - 🎯 **Problem:** {problem.title} - πŸ“Š **Consensus Score:** {debate_round.consensus_score:.2f}/1.00 - πŸ€– **Responses:** {len(debate_round.responses)} AI agents - πŸ•’ **Completed:** {debate_round.timestamp.strftime('%Y-%m-%d %H:%M:%S')} - - **Responses:** - {chr(10).join([f"β€’ **{r.model_name}** (confidence: {r.confidence:.2f}): {r.response[:200]}..." for r in debate_round.responses])} - """ - - return fallback_output.strip(), f"βœ… Analysis completed ({len(debate_round.responses)} responses)" - except Exception as e: - # Ultimate fallback for any unexpected errors - error_msg = str(e) - logger.error(f"❌ Unexpected error in analyze_problem: {error_msg}") - - fallback_error = f""" - ╔══════════════════════════════════════════════════════════════════════════════╗ - β•‘ ❌ UNEXPECTED ERROR β•‘ - β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• - - An unexpected error occurred during analysis. - - **Error:** {error_msg} - - **Troubleshooting:** - β€’ Check system logs for detailed error information - β€’ Verify all configuration settings - β€’ Try restarting the application - β€’ Contact support if the issue persists - - **System Info:** - β€’ Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - β€’ Available Agents: {len(agora.agents) if 'agora' in locals() else 'Unknown'} - """ - - return fallback_error.strip(), f"❌ System Error: {error_msg}" - - -# Create Gradio interface with enhanced styling - with gr.Blocks( - title="AI Democracy - Agora System (Enhanced Output)", - theme=gr.themes.Soft(), - css=""" - .gradio-container { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important; - } - .markdown { - font-family: 'Monaco', 'Menlo', 'Ubuntu Mono', monospace !important; - } - """ - ) as demo: - + logger.error(f"Analysis failed: {e}\n{traceback.format_exc()}") + return f"Error: {str(e)}", "Analysis failed" + + with gr.Blocks() as demo: + title="AI Democracy System", + theme=gr.themes.Soft( + primary_hue="blue", + secondary_hue="slate", + neutral_hue="slate" + ), gr.Markdown(""" - # πŸ›οΈ AI Democracy - Multi-Model Consensus System - ## The Agora: Where artificial minds gather to forge wisdom + # AI Democracy - Multi-Model Consensus System - **Enhanced Output Edition** - Beautiful, readable analysis results from real AI models! + Professional platform for AI model deliberation and consensus building. """) - # Show enhanced agent status - agent_status = agora.get_agent_status() - available_models = [name for name, available in agent_status['available_models'].items() if available] - status_markdown = f""" - ### πŸ€– System Status - - **Available Agents:** {agent_status['agent_count']} | **Active Models:** {', '.join(available_models) if available_models else 'None configured'} - - **Features:** Enhanced Formatting ✨ | Real AI Responses πŸ€– | Consensus Analysis πŸ“Š | Database Storage πŸ’Ύ + status_md = f""" + ### System Status + **Active Agents:** {len(agora.agents)} | **Database:** {'Connected' if agora.db.connected else 'Offline'} """ - gr.Markdown(status_markdown) + gr.Markdown(status_md) with gr.Row(): with gr.Column(scale=2): - title_input = gr.Textbox( - label="πŸ“ Problem Title", - placeholder="Enter a clear, concise title for your problem", - lines=1, - max_lines=2 + title = gr.Textbox( + label="Problem Title", + placeholder="Enter problem title", + lines=1 ) - - description_input = gr.Textbox( - label="πŸ“„ Problem Description", - placeholder="Provide a detailed description of the problem you want analyzed", - lines=5, - max_lines=10 + description = gr.Textbox( + label="Description", + placeholder="Detailed problem description", + lines=5 ) - - domain_input = gr.Dropdown( - label="🏷️ Problem Domain", - choices=[domain.value.title() for domain in ProblemDomain], - value=ProblemDomain.GENERAL.value.title() + domain = gr.Dropdown( + label="Domain", + choices=[d.value.title() for d in ProblemDomain], + value="General" ) - - context_input = gr.Textbox( - label="πŸ” Additional Context (Optional)", - placeholder="Any additional context, constraints, or specific requirements", - lines=2, - max_lines=4 + context = gr.Textbox( + label="Context (Optional)", + placeholder="Additional context", + lines=2 ) - - user_input = gr.Textbox( - label="πŸ‘€ User ID (Optional)", - placeholder="Enter your identifier for tracking (optional)", + user_id = gr.Textbox( + label="User ID (Optional)", + placeholder="Your identifier", lines=1 ) - - analyze_button = gr.Button( - "πŸš€ Start Enhanced AI Analysis", - variant="primary", - size="lg", - scale=2 - ) + analyze_btn = gr.Button("Analyze", variant="primary", size="lg") with gr.Column(scale=1): - gr.Markdown(f""" - 🎯 **Enhanced Analysis Process:** - - 1. Submit your problem with detailed description - 2. Real AI models** analyze independently and thoroughly - 3. Enhanced formatting** makes results beautiful and readable - 4. Consensus scoring** shows agreement levels between models - 5. Executive summary** provides key insights at a glance + gr.Markdown(""" + ### Analysis Process - 🌟 New Features: - 1. Visual progress bars** for consensus scores* - 2. Emoji indicators** for quality and confidence levels* - 3. Structured response boxes** for each AI agent* - 4. Executive summaries** with key insights* - 5. Error handling** with helpful troubleshooting* + 1. Submit problem details + 2. AI models analyze independently + 3. Consensus calculated + 4. Results formatted + 5. Saved to database - πŸ€– AI Model Status: - {chr(10).join([f"{name}: {'🟒 Ready' if available else 'πŸ”΄ No API Key'}" - for name, available in agent_status['available_models'].items()])} - - ⚑Quality Assurance: - βœ… Only real AI responses (no mock data) - βœ… Confidence scoring for reliability - βœ… Consensus analysis for agreement - βœ… Professional formatting for clarity + ### Features + - Real AI responses + - Quality metrics + - Consensus scoring + - Professional formatting """) - gr.Markdown("## πŸ“Š Enhanced AI Analysis Results") + gr.Markdown("## Results") with gr.Row(): - with gr.Column(): - results_output = gr.Markdown( - label="πŸ”¬ Detailed Analysis", - elem_classes=["markdown"], - value="Analysis results will appear here after you submit a problem..." - ) + results = gr.Markdown(label="Analysis Results") with gr.Row(): - with gr.Column(): - summary_output = gr.Markdown( - label="πŸ“‹ Executive Summary", - elem_classes=["markdown"], - value="Executive summary will appear here..." - ) + summary = gr.Markdown(label="Executive Summary") - # Event handling with the enhanced analyze_problem function - analyze_button.click( - fn=analyze_problem, # This is our new enhanced function - inputs=[title_input, description_input, domain_input, user_input, context_input], - outputs=[results_output, summary_output], - show_progress=True + analyze_btn.click( + fn=analyze_problem, + inputs=[title, description, domain, user_id, context], + outputs=[results, summary] ) - # Enhanced example problems section - gr.Markdown(""" - ### πŸ’‘ **Sample Problems to Test Enhanced Formatting:** - - **Business Strategy:** - - "How can we implement a sustainable remote work policy that maintains productivity and employee satisfaction?" - - **Technology & Ethics:** - - "What are the key considerations for implementing AI-powered decision making in healthcare while ensuring patient privacy and safety?" - - **Environmental & Policy:** - - "How should cities balance economic growth with environmental sustainability in urban planning decisions?" - - **Social & Psychological:** - - "What strategies can organizations use to improve mental health support while respecting employee privacy boundaries?" - - **Innovation & Risk:** - - "How can startups effectively validate product-market fit while managing limited resources and investor expectations?" - """) - gr.Markdown(""" --- - ### πŸ”§ **Enhanced System Information:** - - **Framework:** Agno AI Agent Framework with Enhanced Formatting - - **Output Engine:** AgoraOutputFormatter with ASCII Art & Emojis - - **Database:** Supabase (PostgreSQL + Vector Storage) - - **AI Models:** Claude 3.5, GPT-4o, Mistral Large, SambaNova - - **Version:** 2.0 (Enhanced UI/UX Edition) - - **Features:** Real-time Analysis, Consensus Scoring, Beautiful Formatting + **System Information** + - Framework: Agno AI Agent Framework + - Models: Claude 3.5, GPT-4, Mistral, SambaNova + - Database: Supabase PostgreSQL + - Version: 2.0 Professional Edition """) return demo - -# Additional utility function for testing the formatter -def test_enhanced_formatting(): - """Test function to demonstrate the enhanced formatting capabilities""" - - # This would typically be called with real data - print("πŸ§ͺ Testing Enhanced Formatting...") - print("βœ… AgoraOutputFormatter class ready for integration") - print("βœ… Enhanced analyze_problem function ready") - print("βœ… Gradio interface enhanced with new styling") - print("πŸš€ Ready to provide beautiful AI analysis results!") - - -# MAIN EXECUTION def main(): - """Main function to run the Agora system""" + """Run the application""" try: - logger.info("πŸš€ Starting AI Democracy - Agora System") - - # Create and launch Gradio interface - demo = create_gradio_interface() - - # Launch with configuration + logger.info("Starting AI Democracy System") + demo = create_interface() demo.launch( server_name="127.0.0.1", server_port=7860, - share=False, # Set to True if you want a public link - debug=False, - show_error=True, - mcp_server=True + share=False ) - except Exception as e: - logger.error(f"❌ Failed to start Agora system: {str(e)}") - import traceback - logger.error(f"Full traceback: {traceback.format_exc()}") + logger.error(f"Failed to start: {e}\n{traceback.format_exc()}") + if __name__ == "__main__": main() \ No newline at end of file