Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import json | |
| import time | |
| import random | |
| from datetime import datetime | |
| from typing import Dict, Any, List, Optional | |
| # Simulated backend services (in production, these would connect to the actual backend) | |
| class MockNLPEngine: | |
| """Mock NLP Engine for email analysis""" | |
| def analyze_email(self, subject: str, content: str) -> Dict[str, Any]: | |
| text = f"{subject} {content}".lower() | |
| # Sentiment analysis | |
| positive_words = ["good", "great", "excellent", "happy", "love", "thank", "appreciate"] | |
| negative_words = ["bad", "terrible", "problem", "issue", "error", "failed", "hate", "angry"] | |
| positive_count = sum(1 for word in positive_words if word in text) | |
| negative_count = sum(1 for word in negative_words if word in text) | |
| if positive_count > negative_count: | |
| sentiment = "positive" | |
| sentiment_confidence = min(0.6 + positive_count * 0.1, 0.95) | |
| elif negative_count > positive_count: | |
| sentiment = "negative" | |
| sentiment_confidence = min(0.6 + negative_count * 0.1, 0.95) | |
| else: | |
| sentiment = "neutral" | |
| sentiment_confidence = 0.7 | |
| # Topic classification | |
| topic_patterns = { | |
| "Work & Business": ["meeting", "project", "deadline", "work", "office", "business", "report"], | |
| "Personal & Family": ["family", "friend", "personal", "home", "birthday", "vacation"], | |
| "Finance": ["invoice", "payment", "budget", "money", "bank", "transaction"], | |
| "Marketing": ["promotion", "offer", "discount", "sale", "newsletter", "subscribe"], | |
| "Technical": ["bug", "error", "code", "system", "update", "software", "server"] | |
| } | |
| topic_scores = {} | |
| for topic, keywords in topic_patterns.items(): | |
| score = sum(1 for kw in keywords if kw in text) | |
| if score > 0: | |
| topic_scores[topic] = score | |
| if topic_scores: | |
| topic = max(topic_scores, key=topic_scores.get) | |
| topic_confidence = min(0.5 + topic_scores[topic] * 0.15, 0.95) | |
| else: | |
| topic = "General" | |
| topic_confidence = 0.5 | |
| # Intent detection | |
| if "?" in text or any(w in text for w in ["what", "how", "when", "where", "why"]): | |
| intent = "question" | |
| elif any(w in text for w in ["please", "could you", "can you", "would you"]): | |
| intent = "request" | |
| elif any(w in text for w in ["thank", "thanks", "appreciate"]): | |
| intent = "gratitude" | |
| elif any(w in text for w in ["sorry", "apologize", "apologies"]): | |
| intent = "apology" | |
| else: | |
| intent = "informational" | |
| # Urgency detection | |
| urgent_words = ["urgent", "asap", "immediately", "critical", "emergency", "deadline"] | |
| urgency = "high" if any(w in text for w in urgent_words) else "normal" | |
| # Keywords extraction | |
| words = text.split() | |
| keywords = [w for w in words if len(w) > 4 and w.isalpha()][:10] | |
| return { | |
| "sentiment": sentiment, | |
| "sentiment_confidence": sentiment_confidence, | |
| "topic": topic, | |
| "topic_confidence": topic_confidence, | |
| "intent": intent, | |
| "urgency": urgency, | |
| "categories": [topic], | |
| "keywords": keywords, | |
| "reasoning": f"Email classified as {topic} with {sentiment} sentiment. Intent appears to be {intent}.", | |
| "suggested_labels": [topic.lower().replace(" & ", "_").replace(" ", "_"), urgency], | |
| "risk_flags": ["potential_spam"] if "unsubscribe" in text else [] | |
| } | |
| class MockDashboardService: | |
| """Mock Dashboard Service for statistics""" | |
| def get_stats(self) -> Dict[str, Any]: | |
| return { | |
| "total_emails": random.randint(1000, 5000), | |
| "auto_labeled": random.randint(500, 2000), | |
| "categories": random.randint(8, 15), | |
| "time_saved": f"{random.randint(10, 50)}h {random.randint(0, 59)}m", | |
| "weekly_growth": { | |
| "emails": random.randint(50, 200), | |
| "percentage": round(random.uniform(5, 25), 1) | |
| } | |
| } | |
| class MockWorkflowEngine: | |
| """Mock Workflow Engine""" | |
| def __init__(self): | |
| self.workflows = { | |
| "default": {"name": "Default Workflow", "description": "Standard email processing", "nodes": 4}, | |
| "ai_enhanced": {"name": "AI Enhanced", "description": "Full AI analysis pipeline", "nodes": 6}, | |
| "quick_filter": {"name": "Quick Filter", "description": "Fast categorization only", "nodes": 2} | |
| } | |
| def list_workflows(self) -> List[Dict[str, Any]]: | |
| return [{"id": k, **v} for k, v in self.workflows.items()] | |
| def run_workflow(self, workflow_id: str, email_data: Dict[str, Any]) -> Dict[str, Any]: | |
| time.sleep(0.5) # Simulate processing | |
| return { | |
| "status": "completed", | |
| "workflow_id": workflow_id, | |
| "processed_at": datetime.now().isoformat(), | |
| "results": { | |
| "nodes_executed": self.workflows.get(workflow_id, {}).get("nodes", 0), | |
| "success": True | |
| } | |
| } | |
| # Initialize mock services | |
| nlp_engine = MockNLPEngine() | |
| dashboard_service = MockDashboardService() | |
| workflow_engine = MockWorkflowEngine() | |
| # |