Spaces:
Runtime error
Runtime error
File size: 5,482 Bytes
98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 98b6f12 38aece4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | import gradio as gr
import json
import time
import random
from datetime import datetime
from typing import Dict, Any, List, Optional
# Simulated backend services (in production, these would connect to the actual backend)
class MockNLPEngine:
"""Mock NLP Engine for email analysis"""
def analyze_email(self, subject: str, content: str) -> Dict[str, Any]:
text = f"{subject} {content}".lower()
# Sentiment analysis
positive_words = ["good", "great", "excellent", "happy", "love", "thank", "appreciate"]
negative_words = ["bad", "terrible", "problem", "issue", "error", "failed", "hate", "angry"]
positive_count = sum(1 for word in positive_words if word in text)
negative_count = sum(1 for word in negative_words if word in text)
if positive_count > negative_count:
sentiment = "positive"
sentiment_confidence = min(0.6 + positive_count * 0.1, 0.95)
elif negative_count > positive_count:
sentiment = "negative"
sentiment_confidence = min(0.6 + negative_count * 0.1, 0.95)
else:
sentiment = "neutral"
sentiment_confidence = 0.7
# Topic classification
topic_patterns = {
"Work & Business": ["meeting", "project", "deadline", "work", "office", "business", "report"],
"Personal & Family": ["family", "friend", "personal", "home", "birthday", "vacation"],
"Finance": ["invoice", "payment", "budget", "money", "bank", "transaction"],
"Marketing": ["promotion", "offer", "discount", "sale", "newsletter", "subscribe"],
"Technical": ["bug", "error", "code", "system", "update", "software", "server"]
}
topic_scores = {}
for topic, keywords in topic_patterns.items():
score = sum(1 for kw in keywords if kw in text)
if score > 0:
topic_scores[topic] = score
if topic_scores:
topic = max(topic_scores, key=topic_scores.get)
topic_confidence = min(0.5 + topic_scores[topic] * 0.15, 0.95)
else:
topic = "General"
topic_confidence = 0.5
# Intent detection
if "?" in text or any(w in text for w in ["what", "how", "when", "where", "why"]):
intent = "question"
elif any(w in text for w in ["please", "could you", "can you", "would you"]):
intent = "request"
elif any(w in text for w in ["thank", "thanks", "appreciate"]):
intent = "gratitude"
elif any(w in text for w in ["sorry", "apologize", "apologies"]):
intent = "apology"
else:
intent = "informational"
# Urgency detection
urgent_words = ["urgent", "asap", "immediately", "critical", "emergency", "deadline"]
urgency = "high" if any(w in text for w in urgent_words) else "normal"
# Keywords extraction
words = text.split()
keywords = [w for w in words if len(w) > 4 and w.isalpha()][:10]
return {
"sentiment": sentiment,
"sentiment_confidence": sentiment_confidence,
"topic": topic,
"topic_confidence": topic_confidence,
"intent": intent,
"urgency": urgency,
"categories": [topic],
"keywords": keywords,
"reasoning": f"Email classified as {topic} with {sentiment} sentiment. Intent appears to be {intent}.",
"suggested_labels": [topic.lower().replace(" & ", "_").replace(" ", "_"), urgency],
"risk_flags": ["potential_spam"] if "unsubscribe" in text else []
}
class MockDashboardService:
"""Mock Dashboard Service for statistics"""
def get_stats(self) -> Dict[str, Any]:
return {
"total_emails": random.randint(1000, 5000),
"auto_labeled": random.randint(500, 2000),
"categories": random.randint(8, 15),
"time_saved": f"{random.randint(10, 50)}h {random.randint(0, 59)}m",
"weekly_growth": {
"emails": random.randint(50, 200),
"percentage": round(random.uniform(5, 25), 1)
}
}
class MockWorkflowEngine:
"""Mock Workflow Engine"""
def __init__(self):
self.workflows = {
"default": {"name": "Default Workflow", "description": "Standard email processing", "nodes": 4},
"ai_enhanced": {"name": "AI Enhanced", "description": "Full AI analysis pipeline", "nodes": 6},
"quick_filter": {"name": "Quick Filter", "description": "Fast categorization only", "nodes": 2}
}
def list_workflows(self) -> List[Dict[str, Any]]:
return [{"id": k, **v} for k, v in self.workflows.items()]
def run_workflow(self, workflow_id: str, email_data: Dict[str, Any]) -> Dict[str, Any]:
time.sleep(0.5) # Simulate processing
return {
"status": "completed",
"workflow_id": workflow_id,
"processed_at": datetime.now().isoformat(),
"results": {
"nodes_executed": self.workflows.get(workflow_id, {}).get("nodes", 0),
"success": True
}
}
# Initialize mock services
nlp_engine = MockNLPEngine()
dashboard_service = MockDashboardService()
workflow_engine = MockWorkflowEngine()
# |