diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -2,6 +2,7 @@
š ARF Ultimate Investor Demo v3.8.0 - ENTERPRISE EDITION
Main entry point with comprehensive 5-tab interface
Uses actual ARF OSS v3.3.6 framework
+FULLY CORRECTED VERSION - Integrated with all components
"""
import logging
@@ -9,8 +10,10 @@ import sys
import traceback
import json
import datetime
+import asyncio
+import time
from pathlib import Path
-from typing import Dict, List, Any, Optional, Tuple
+from typing import Dict, List, Any, Optional, Tuple, Union
# Configure logging
logging.basicConfig(
@@ -23,1658 +26,797 @@ logging.basicConfig(
)
logger = logging.getLogger(__name__)
-# Add parent directory to path for module imports
+# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent))
+# ===========================================
+# ARF FRAMEWORK IMPORT - CORRECTED BASED ON PACKAGE STRUCTURE
+# ===========================================
+ARF_OSS_AVAILABLE = False
+OSS_VERSION = "3.3.6"
+HealingIntent = None
+IntentSource = None
+IntentStatus = None
+OSSMCPClient = None
+
try:
- # Import ARF OSS framework (actual package)
+ # ATTEMPT 1: Import from public package (correct based on pyproject.toml)
try:
from agentic_reliability_framework import __version__ as arf_version
- from agentic_reliability_framework.arf_core.models.healing_intent import (
- HealingIntent, create_scale_out_intent, create_rollback_intent
- )
- from agentic_reliability_framework.arf_core.engine.simple_mcp_client import OSSMCPClient
- from agentic_reliability_framework.engine.mcp_server import MCPServer, MCPMode
+ from agentic_reliability_framework.models import HealingIntent, IntentSource, IntentStatus
+ from agentic_reliability_framework.engine import OSSMCPClient
ARF_OSS_AVAILABLE = True
OSS_VERSION = arf_version
- logger.info(f"ā
Successfully imported ARF OSS v{OSS_VERSION}")
-
- # Create OSS client instance
- oss_client = OSSMCPClient()
+ logger.info(f"ā
Successfully imported ARF OSS v{OSS_VERSION} from public package")
- except ImportError as e:
- logger.warning(f"Failed to import ARF OSS: {e}")
- ARF_OSS_AVAILABLE = False
- OSS_VERSION = "3.3.6 (Mock)"
+ except ImportError as e1:
+ logger.debug(f"Attempt 1 failed: {e1}")
- # Mock classes for demo
- class HealingIntent:
- def __init__(self, action: str, component: str, parameters: Dict, **kwargs):
- self.action = action
- self.component = component
- self.parameters = parameters
- self.justification = kwargs.get('justification', '')
- self.confidence = kwargs.get('confidence', 0.85)
- self.similar_incidents = kwargs.get('similar_incidents', [])
- self.rag_similarity_score = kwargs.get('rag_similarity_score')
+ # ATTEMPT 2: Import from modules (development structure)
+ try:
+ # For Hugging Face demo, use enhanced mock classes
+ logger.info("Using enhanced mock classes for demo environment")
+
+ # Define enums matching the actual structure
+ class IntentSource:
+ OSS_ANALYSIS = "oss_analysis"
+ RAG_SIMILARITY = "rag_similarity"
+ HUMAN_OVERRIDE = "human_override"
+ AUTOMATED_LEARNING = "automated_learning"
- def to_enterprise_request(self) -> Dict:
- return {
- 'action': self.action,
- 'component': self.component,
- 'parameters': self.parameters,
- 'justification': self.justification,
- 'confidence': self.confidence,
- 'requires_enterprise': True,
- 'oss_metadata': {
- 'similar_incidents_count': len(self.similar_incidents),
- 'rag_used': self.rag_similarity_score is not None
- }
- }
+ class IntentStatus:
+ CREATED = "created"
+ OSS_ADVISORY_ONLY = "oss_advisory_only"
+ PENDING_EXECUTION = "pending_execution"
+ EXECUTING = "executing"
+ COMPLETED = "completed"
+ FAILED = "failed"
- def mark_as_oss_advisory(self):
- return self
-
- class OSSMCPClient:
- def __init__(self):
- self.mode = "advisory"
+ # Enhanced HealingIntent based on actual structure
+ class HealingIntent:
+ def __init__(self, action: str, component: str, parameters: Dict, **kwargs):
+ self.action = action
+ self.component = component
+ self.parameters = parameters
+ self.justification = kwargs.get('justification', '')
+ self.confidence = kwargs.get('confidence', 0.85)
+ self.similar_incidents = kwargs.get('similar_incidents', [])
+ self.rag_similarity_score = kwargs.get('rag_similarity_score')
+ self.source = kwargs.get('source', IntentSource.OSS_ANALYSIS)
+ self.status = kwargs.get('status', IntentStatus.CREATED)
+ self.intent_id = kwargs.get('intent_id', f"intent_{int(time.time())}")
+ self.oss_edition = "oss"
+ self.requires_enterprise = True
+ self.execution_allowed = False
+ self.created_at = time.time()
+
+ def to_enterprise_request(self) -> Dict:
+ return {
+ 'action': self.action,
+ 'component': self.component,
+ 'parameters': self.parameters,
+ 'justification': self.justification,
+ 'confidence': self.confidence,
+ 'requires_enterprise': self.requires_enterprise,
+ 'oss_metadata': {
+ 'similar_incidents_count': len(self.similar_incidents),
+ 'rag_used': self.rag_similarity_score is not None,
+ 'source': self.source,
+ 'oss_edition': self.oss_edition,
+ 'execution_allowed': self.execution_allowed
+ },
+ 'intent_id': self.intent_id,
+ 'created_at': self.created_at
+ }
- async def analyze_and_recommend(self, tool_name: str, component: str,
- parameters: Dict, context: Optional[Dict] = None) -> HealingIntent:
- # Simulate RAG similarity search
- similar_incidents = [
- {"id": "inc_001", "similarity": 0.78, "resolution": "scaled_out", "component": "redis"},
- {"id": "inc_045", "similarity": 0.65, "resolution": "restarted", "component": "database"},
- {"id": "inc_089", "similarity": 0.59, "resolution": "circuit_breaker", "component": "api"}
- ]
+ def mark_as_oss_advisory(self):
+ self.status = IntentStatus.OSS_ADVISORY_ONLY
+ self.execution_allowed = False
+ return self
- return HealingIntent(
- action=tool_name,
- component=component,
- parameters=parameters,
- justification=f"OSS Analysis: Based on {len(similar_incidents)} similar incidents, recommend {tool_name} for {component}",
- confidence=0.82 + (len(similar_incidents) * 0.01),
- similar_incidents=similar_incidents,
- rag_similarity_score=0.72
- )
-
- oss_client = OSSMCPClient()
-
- MCPMode = type('MCPMode', (), {
- 'ADVISORY': 'advisory',
- 'APPROVAL': 'approval',
- 'AUTONOMOUS': 'autonomous'
- })
+ @classmethod
+ def from_analysis(cls, action: str, component: str, parameters: Dict,
+ justification: str, confidence: float, **kwargs):
+ return cls(
+ action=action,
+ component=component,
+ parameters=parameters,
+ justification=justification,
+ confidence=confidence,
+ similar_incidents=kwargs.get('similar_incidents'),
+ rag_similarity_score=kwargs.get('rag_similarity_score'),
+ source=kwargs.get('source', IntentSource.OSS_ANALYSIS)
+ )
+
+ class OSSMCPClient:
+ def __init__(self):
+ self.mode = "advisory"
+
+ async def analyze_and_recommend(self, tool_name: str, component: str,
+ parameters: Dict, context: Optional[Dict] = None) -> HealingIntent:
+ similar_incidents = [
+ {"id": "inc_001", "similarity": 0.78, "resolution": "scaled_out",
+ "component": "redis", "success": True, "timestamp": "2024-01-15T10:30:00"},
+ {"id": "inc_045", "similarity": 0.65, "resolution": "restarted",
+ "component": "database", "success": True, "timestamp": "2024-01-10T14:20:00"},
+ {"id": "inc_089", "similarity": 0.59, "resolution": "circuit_breaker",
+ "component": "api", "success": False, "timestamp": "2024-01-05T09:15:00"}
+ ]
+
+ rag_score = sum(inc["similarity"] for inc in similar_incidents[:3]) / 3 if similar_incidents else 0.67
+
+ return HealingIntent.from_analysis(
+ action=tool_name,
+ component=component,
+ parameters=parameters,
+ justification=f"OSS Analysis: Based on {len(similar_incidents)} similar historical incidents with {rag_score:.0%} average similarity",
+ confidence=0.82 + (len(similar_incidents) * 0.01),
+ similar_incidents=similar_incidents,
+ rag_similarity_score=rag_score,
+ source=IntentSource.RAG_SIMILARITY
+ ).mark_as_oss_advisory()
+
+ # Set module-level variables
+ IntentSource = IntentSource
+ IntentStatus = IntentStatus
+ OSSMCPClient = OSSMCPClient
+ logger.info("ā
Using enhanced mock classes for demo")
+
+ except Exception as e2:
+ logger.warning(f"Enhanced mock creation failed: {e2}")
+
+ # Minimal fallback
+ class HealingIntent:
+ def __init__(self, **kwargs):
+ pass
+ def to_enterprise_request(self):
+ return {"error": "ARF not available"}
+ @classmethod
+ def from_analysis(cls, **kwargs):
+ return cls()
+ def mark_as_oss_advisory(self):
+ return self
+
+ class OSSMCPClient:
+ def __init__(self):
+ self.mode = "advisory"
+ async def analyze_and_recommend(self, **kwargs):
+ return HealingIntent()
+
+except Exception as e:
+ logger.error(f"ā CRITICAL: Failed to initialize ARF framework: {e}")
+ logger.error(traceback.format_exc())
+ # Provide absolute minimal fallbacks
+ class HealingIntent:
+ def __init__(self, **kwargs):
+ pass
+ def to_enterprise_request(self):
+ return {"error": "ARF not available"}
+ @classmethod
+ def from_analysis(cls, **kwargs):
+ return cls()
+ class OSSMCPClient:
+ async def analyze_and_recommend(self, **kwargs):
+ return HealingIntent()
+
+# ===========================================
+# IMPORT SUPPORTING MODULES
+# ===========================================
+try:
+ # Import UI components
+ from ui.components import (
+ create_header, create_status_bar, create_tab1_incident_demo,
+ create_tab2_business_roi, create_tab3_audit_trail,
+ create_tab4_enterprise_features, create_tab5_learning_engine,
+ create_footer
+ )
- # Import Gradio and visualization libraries
- import gradio as gr
- import plotly.graph_objects as go
- import plotly.express as px
- import pandas as pd
- import numpy as np
- from plotly.subplots import make_subplots
+ # Import demo orchestrator
+ from demo.orchestrator import DemoOrchestrator
- # ===========================================
- # COMPREHENSIVE DATA MODELS
- # ===========================================
+ # Import visualizations
+ from core.visualizations import EnhancedVisualizationEngine
- class AuditTrailManager:
- """Manage audit trail and execution history"""
-
- def __init__(self):
- self.execution_history = []
- self.incident_history = []
- self._initialize_sample_data()
-
- def _initialize_sample_data(self):
- """Initialize with sample historical data"""
- base_time = datetime.datetime.now() - datetime.timedelta(hours=2)
-
- # Sample execution history
- sample_executions = [
- self._create_execution_entry(
- base_time - datetime.timedelta(minutes=90),
- "Cache Miss Storm", 4, 7200, "ā
Executed", "Auto-scaled cache"
- ),
- self._create_execution_entry(
- base_time - datetime.timedelta(minutes=75),
- "Memory Leak", 3, 5200, "ā
Executed", "Fixed memory leak"
- ),
- self._create_execution_entry(
- base_time - datetime.timedelta(minutes=60),
- "API Rate Limit", 4, 2800, "ā
Executed", "Increased rate limits"
- ),
- self._create_execution_entry(
- base_time - datetime.timedelta(minutes=45),
- "DB Connection Pool", 4, 3800, "ā
Executed", "Scaled connection pool"
- ),
- ]
-
- self.execution_history = sample_executions
-
- # Sample incident history
- services = ["API Gateway", "Database", "Redis Cache", "Auth Service", "Payment Service"]
-
- for i in range(10):
- incident_time = base_time - datetime.timedelta(minutes=i * 15)
- self.incident_history.append({
- "timestamp": incident_time,
- "time_str": incident_time.strftime("%H:%M"),
- "service": services[i % len(services)],
- "type": "Cache Miss Storm" if i % 3 == 0 else "Memory Leak",
- "severity": 3 if i % 3 == 0 else 2,
- "description": f"High latency on {services[i % len(services)]}",
- "id": f"inc_{i:03d}"
- })
-
- def _create_execution_entry(self, timestamp, scenario, actions, savings, status, details):
- """Create an execution history entry"""
- return {
- "timestamp": timestamp,
- "time_str": timestamp.strftime("%H:%M"),
- "scenario": scenario,
- "actions": str(actions),
- "savings": f"${savings:,}",
- "status": status,
- "details": details,
- "id": f"exec_{len(self.execution_history):03d}"
+ logger.info("ā
Successfully imported all supporting modules")
+
+except ImportError as e:
+ logger.warning(f"Could not import some modules: {e}")
+ # We'll define minimal versions inline if needed
+
+# ===========================================
+# INCIDENT SCENARIOS (from original app.py)
+# ===========================================
+INCIDENT_SCENARIOS = {
+ "Cache Miss Storm": {
+ "description": "Redis cluster experiencing 80% cache miss rate causing database overload",
+ "severity": "CRITICAL",
+ "metrics": {
+ "Cache Hit Rate": "18.5% (Critical)",
+ "Database Load": "92% (Overloaded)",
+ "Response Time": "1850ms (Slow)",
+ "Affected Users": "45,000",
+ "Eviction Rate": "125/sec"
+ },
+ "impact": {
+ "Revenue Loss": "$8,500/hour",
+ "Page Load Time": "+300%",
+ "Users Impacted": "45,000",
+ "SLA Violation": "Yes",
+ "Customer Sat": "-40%"
+ },
+ "component": "redis_cache",
+ "oss_analysis": {
+ "status": "ā
ARF OSS Analysis Complete",
+ "recommendations": [
+ "Increase Redis cache memory allocation",
+ "Implement cache warming strategy",
+ "Optimize key patterns",
+ "Add circuit breaker for database fallback"
+ ],
+ "estimated_time": "60+ minutes",
+ "engineers_needed": "2-3 SREs + 1 DBA",
+ "manual_effort": "High",
+ "total_cost": "$8,500"
+ },
+ "enterprise_results": {
+ "actions_completed": [
+ "ā
Auto-scaled Redis cluster: 4GB ā 8GB",
+ "ā
Deployed intelligent cache warming",
+ "ā
Optimized 12 key patterns with ML",
+ "ā
Implemented circuit breaker"
+ ],
+ "metrics_improvement": {
+ "Cache Hit Rate": "18.5% ā 72%",
+ "Response Time": "1850ms ā 450ms",
+ "Database Load": "92% ā 45%"
+ },
+ "business_impact": {
+ "Recovery Time": "60 min ā 12 min",
+ "Cost Saved": "$7,200",
+ "Users Impacted": "45,000 ā 0",
+ "Revenue Protected": "$1,700",
+ "MTTR Improvement": "80% reduction"
}
+ }
+ },
+ "Database Connection Pool Exhaustion": {
+ "description": "Database connection pool exhausted causing API timeouts and user failures",
+ "severity": "HIGH",
+ "metrics": {
+ "Active Connections": "98/100 (Critical)",
+ "API Latency": "2450ms",
+ "Error Rate": "15.2%",
+ "Queue Depth": "1250",
+ "Connection Wait": "45s"
+ },
+ "impact": {
+ "Revenue Loss": "$4,200/hour",
+ "Affected Services": "API Gateway, User Service, Payment",
+ "SLA Violation": "Yes",
+ "Partner Impact": "3 external APIs"
+ },
+ "component": "database"
+ },
+ "Memory Leak in Production": {
+ "description": "Java service memory leak causing gradual performance degradation",
+ "severity": "HIGH",
+ "metrics": {
+ "Memory Usage": "96% (Critical)",
+ "GC Pause Time": "4500ms",
+ "Error Rate": "28.5%",
+ "Restart Frequency": "12/hour",
+ "Heap Fragmentation": "42%"
+ },
+ "impact": {
+ "Revenue Loss": "$5,500/hour",
+ "Session Loss": "8,500 users",
+ "Customer Impact": "High",
+ "Support Tickets": "+300%"
+ },
+ "component": "java_service"
+ }
+}
+
+# ===========================================
+# AUDIT TRAIL MANAGER (from original app.py)
+# ===========================================
+class AuditTrailManager:
+ """Manage audit trail and execution history"""
+
+ def __init__(self):
+ self.execution_history = []
+ self.incident_history = []
+ self._initialize_sample_data()
+
+ def _initialize_sample_data(self):
+ """Initialize with sample historical data"""
+ base_time = datetime.datetime.now() - datetime.timedelta(hours=2)
- def add_execution(self, scenario: str, actions: List[str],
- savings: int, approval_required: bool, details: str = ""):
- """Add new execution to history"""
- entry = self._create_execution_entry(
- datetime.datetime.now(),
- scenario,
- len(actions),
- savings,
- "ā
Approved & Executed" if approval_required else "ā
Auto-Executed",
- details
- )
- self.execution_history.insert(0, entry)
- return entry
-
- def add_incident(self, scenario_name: str, metrics: Dict):
- """Add incident to history"""
- entry = {
- "timestamp": datetime.datetime.now(),
- "time_str": datetime.datetime.now().strftime("%H:%M"),
- "service": "Demo System",
- "type": scenario_name,
- "severity": 3,
- "description": f"Demo incident: {scenario_name}",
- "id": f"inc_{len(self.incident_history):03d}"
- }
- self.incident_history.insert(0, entry)
- return entry
+ sample_executions = [
+ self._create_execution_entry(
+ base_time - datetime.timedelta(minutes=90),
+ "Cache Miss Storm", 4, 7200, "ā
Executed", "Auto-scaled cache"
+ ),
+ self._create_execution_entry(
+ base_time - datetime.timedelta(minutes=75),
+ "Memory Leak", 3, 5200, "ā
Executed", "Fixed memory leak"
+ ),
+ self._create_execution_entry(
+ base_time - datetime.timedelta(minutes=60),
+ "API Rate Limit", 4, 2800, "ā
Executed", "Increased rate limits"
+ ),
+ ]
- def get_execution_history_table(self, limit: int = 10) -> List[List]:
- """Get execution history for table display"""
- return [
- [entry["time_str"], entry["scenario"], entry["actions"],
- entry["status"], entry["savings"], entry["details"]]
- for entry in self.execution_history[:limit]
- ]
+ self.execution_history = sample_executions
- def get_incident_history_table(self, limit: int = 15) -> List[List]:
- """Get incident history for table display"""
- return [
- [entry["time_str"], entry["service"], entry["type"],
- f"{entry['severity']}/3", entry["description"]]
- for entry in self.incident_history[:limit]
- ]
+ services = ["API Gateway", "Database", "Redis Cache", "Auth Service"]
- def export_audit_trail(self) -> str:
- """Export audit trail as JSON"""
- total_savings = sum(
- int(e["savings"].replace("$", "").replace(",", ""))
- for e in self.execution_history
- if "$" in e["savings"]
- )
-
- return json.dumps({
- "executions": self.execution_history,
- "incidents": self.incident_history,
- "exported_at": datetime.datetime.now().isoformat(),
- "total_executions": len(self.execution_history),
- "total_incidents": len(self.incident_history),
- "total_savings": total_savings,
- "arf_version": OSS_VERSION,
- "oss_available": ARF_OSS_AVAILABLE
- }, indent=2, default=str)
+ for i in range(8):
+ incident_time = base_time - datetime.timedelta(minutes=i * 15)
+ self.incident_history.append({
+ "timestamp": incident_time,
+ "time_str": incident_time.strftime("%H:%M"),
+ "service": services[i % len(services)],
+ "type": "Cache Miss Storm" if i % 3 == 0 else "Memory Leak",
+ "severity": 3 if i % 3 == 0 else 2,
+ "description": f"High latency on {services[i % len(services)]}",
+ "id": f"inc_{i:03d}"
+ })
- # ===========================================
- # INCIDENT SCENARIOS
- # ===========================================
+ def _create_execution_entry(self, timestamp, scenario, actions, savings, status, details):
+ return {
+ "timestamp": timestamp,
+ "time_str": timestamp.strftime("%H:%M"),
+ "scenario": scenario,
+ "actions": str(actions),
+ "savings": f"${savings:,}",
+ "status": status,
+ "details": details,
+ "id": f"exec_{len(self.execution_history):03d}"
+ }
- INCIDENT_SCENARIOS = {
- "Cache Miss Storm": {
- "description": "Redis cluster experiencing 80% cache miss rate causing database overload",
- "severity": "CRITICAL",
- "metrics": {
- "Cache Hit Rate": "18.5% (Critical)",
- "Database Load": "92% (Overloaded)",
- "Response Time": "1850ms (Slow)",
- "Affected Users": "45,000",
- "Eviction Rate": "125/sec"
- },
- "impact": {
- "Revenue Loss": "$8,500/hour",
- "Page Load Time": "+300%",
- "Users Impacted": "45,000",
- "SLA Violation": "Yes",
- "Customer Sat": "-40%"
- },
- "oss_analysis": {
- "status": "ā
ARF OSS Analysis Complete",
- "recommendations": [
- "Increase Redis cache memory allocation",
- "Implement cache warming strategy",
- "Optimize key patterns (TTL adjustments)",
- "Add circuit breaker for database fallback",
- "Deploy monitoring for cache hit rate trends"
- ],
- "estimated_time": "60+ minutes",
- "engineers_needed": "2-3 SREs + 1 DBA",
- "manual_effort": "High",
- "total_cost": "$8,500",
- "healing_intent": "scale_out_cache"
- },
- "enterprise_results": {
- "actions_completed": [
- "ā
Auto-scaled Redis cluster: 4GB ā 8GB",
- "ā
Deployed intelligent cache warming service",
- "ā
Optimized 12 key patterns with ML recommendations",
- "ā
Implemented circuit breaker with 95% success rate",
- "ā
Validated recovery with automated testing"
- ],
- "metrics_improvement": {
- "Cache Hit Rate": "18.5% ā 72%",
- "Response Time": "1850ms ā 450ms",
- "Database Load": "92% ā 45%",
- "Throughput": "1250 ā 2450 req/sec"
- },
- "business_impact": {
- "Recovery Time": "60 min ā 12 min",
- "Cost Saved": "$7,200",
- "Users Impacted": "45,000 ā 0",
- "Revenue Protected": "$1,700",
- "MTTR Improvement": "80% reduction"
- }
- }
- },
- "Database Connection Pool Exhaustion": {
- "description": "Database connection pool exhausted causing API timeouts and user failures",
- "severity": "HIGH",
- "metrics": {
- "Active Connections": "98/100 (Critical)",
- "API Latency": "2450ms",
- "Error Rate": "15.2%",
- "Queue Depth": "1250",
- "Connection Wait": "45s"
- },
- "impact": {
- "Revenue Loss": "$4,200/hour",
- "Affected Services": "API Gateway, User Service, Payment",
- "SLA Violation": "Yes",
- "Partner Impact": "3 external APIs"
- }
- },
- "Memory Leak in Production": {
- "description": "Java service memory leak causing gradual performance degradation",
- "severity": "HIGH",
- "metrics": {
- "Memory Usage": "96% (Critical)",
- "GC Pause Time": "4500ms",
- "Error Rate": "28.5%",
- "Restart Frequency": "12/hour",
- "Heap Fragmentation": "42%"
- },
- "impact": {
- "Revenue Loss": "$5,500/hour",
- "Session Loss": "8,500 users",
- "Customer Impact": "High",
- "Support Tickets": "+300%"
- }
+ def add_execution(self, scenario: str, actions: List[str],
+ savings: int, approval_required: bool, details: str = ""):
+ entry = self._create_execution_entry(
+ datetime.datetime.now(),
+ scenario,
+ len(actions),
+ savings,
+ "ā
Approved & Executed" if approval_required else "ā
Auto-Executed",
+ details
+ )
+ self.execution_history.insert(0, entry)
+ return entry
+
+ def add_incident(self, scenario_name: str, metrics: Dict):
+ entry = {
+ "timestamp": datetime.datetime.now(),
+ "time_str": datetime.datetime.now().strftime("%H:%M"),
+ "service": "Demo System",
+ "type": scenario_name,
+ "severity": 3,
+ "description": f"Demo incident: {scenario_name}",
+ "id": f"inc_{len(self.incident_history):03d}"
}
- }
+ self.incident_history.insert(0, entry)
+ return entry
+
+ def get_execution_history_table(self, limit: int = 10) -> List[List]:
+ return [
+ [entry["time_str"], entry["scenario"], entry["actions"],
+ entry["status"], entry["savings"], entry["details"]]
+ for entry in self.execution_history[:limit]
+ ]
- # ===========================================
- # BUSINESS LOGIC
- # ===========================================
+ def get_incident_history_table(self, limit: int = 15) -> List[List]:
+ return [
+ [entry["time_str"], entry["service"], entry["type"],
+ f"{entry['severity']}/3", entry["description"]]
+ for entry in self.incident_history[:limit]
+ ]
- class BusinessLogic:
- """Business logic for the demo"""
+ def export_audit_trail(self) -> str:
+ total_savings = sum(
+ int(e["savings"].replace("$", "").replace(",", ""))
+ for e in self.execution_history
+ if "$" in e["savings"]
+ )
- def __init__(self, audit_manager: AuditTrailManager, oss_client):
- self.audit_manager = audit_manager
- self.oss_client = oss_client
- self.license_info = {
- "valid": True,
- "customer_name": "Demo Enterprise Corp",
- "customer_email": "demo@enterprise.com",
- "tier": "ENTERPRISE",
- "expires_at": "2024-12-31T23:59:59",
- "features": ["autonomous_healing", "compliance", "audit_trail", "multi_cloud"],
- "max_services": 100,
- "max_incidents_per_month": 1000,
- "status": "ā
Active"
- }
+ return json.dumps({
+ "executions": self.execution_history,
+ "incidents": self.incident_history,
+ "exported_at": datetime.datetime.now().isoformat(),
+ "total_executions": len(self.execution_history),
+ "total_incidents": len(self.incident_history),
+ "total_savings": total_savings,
+ "arf_version": OSS_VERSION,
+ "oss_available": ARF_OSS_AVAILABLE
+ }, indent=2, default=str)
+
+# ===========================================
+# CREATE DEMO INTERFACE - CORRECTED
+# ===========================================
+def create_demo_interface():
+ """Create the 5-tab demo interface - CORRECTED VERSION"""
+
+ # Import gradio here to avoid issues
+ import gradio as gr
+
+ # Initialize components
+ audit_manager = AuditTrailManager()
+
+ # Create OSS client
+ oss_client = OSSMCPClient() if OSSMCPClient else None
+
+ # Create orchestrator
+ orchestrator = DemoOrchestrator(arf_client=oss_client)
+
+ # Create visualization engine
+ try:
+ viz_engine = EnhancedVisualizationEngine()
+ except:
+ # Fallback to simple visualizations
+ class SimpleVizEngine:
+ def create_interactive_timeline(self, scenario):
+ import plotly.graph_objects as go
+ fig = go.Figure()
+ fig.add_trace(go.Scatter(x=[1,2,3], y=[1,2,1]))
+ fig.update_layout(title="Timeline", height=400)
+ return fig
+ def create_executive_dashboard(self, roi=None):
+ import plotly.graph_objects as go
+ fig = go.Figure()
+ fig.add_trace(go.Bar(x=['A','B','C'], y=[1,2,3]))
+ fig.update_layout(title="Dashboard", height=400)
+ return fig
+ viz_engine = SimpleVizEngine()
+
+ # Custom CSS
+ custom_css = """
+ .gradio-container {
+ max-width: 1800px !important;
+ margin: auto !important;
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif !important;
+ }
+ h1 {
+ background: linear-gradient(90deg, #1a365d 0%, #2d3748 100%);
+ -webkit-background-clip: text;
+ -webkit-text-fill-color: transparent;
+ background-clip: text;
+ font-weight: 800 !important;
+ font-size: 2.5rem !important;
+ margin-bottom: 0.5rem !important;
+ }
+ .critical { color: #FF6B6B !important; font-weight: 900 !important; }
+ .success { color: #4ECDC4 !important; font-weight: 900 !important; }
+ .tab-nav { background: linear-gradient(90deg, #f8fafc 0%, #ffffff 100%) !important; }
+ .metric-card {
+ background: white !important;
+ border-radius: 10px !important;
+ padding: 20px !important;
+ box-shadow: 0 2px 8px rgba(0,0,0,0.06) !important;
+ border-left: 4px solid #4ECDC4 !important;
+ margin-bottom: 15px !important;
+ }
+ """
+
+ with gr.Blocks(
+ title=f"š ARF Investor Demo v3.8.0",
+ theme=gr.themes.Soft(
+ primary_hue="blue",
+ secondary_hue="teal"
+ ),
+ css=custom_css
+ ) as demo:
- async def run_oss_analysis(self, scenario_name: str) -> Dict:
- """Run OSS analysis using actual ARF framework"""
- scenario = INCIDENT_SCENARIOS.get(scenario_name, {})
+ # ============ HEADER ============
+ create_header(OSS_VERSION, ARF_OSS_AVAILABLE)
+
+ # ============ STATUS BAR ============
+ create_status_bar()
+
+ # ============ 5 TABS ============
+ with gr.Tabs():
+
+ # TAB 1: LIVE INCIDENT DEMO
+ with gr.TabItem("š„ Live Incident Demo"):
+ # Get components from UI module
+ (scenario_dropdown, scenario_description, metrics_display, impact_display,
+ timeline_output, oss_btn, enterprise_btn, approval_toggle, demo_btn,
+ approval_display, config_display, results_display) = create_tab1_incident_demo(
+ INCIDENT_SCENARIOS, "Cache Miss Storm"
+ )
- # Create HealingIntent using OSS client
- healing_intent = await self.oss_client.analyze_and_recommend(
- tool_name="scale_out",
- component="redis_cache",
- parameters={"scale_factor": 2.0, "resource_type": "memory"},
- context={
- "incident_type": scenario_name,
- "metrics": scenario.get("metrics", {}),
- "severity": scenario.get("severity", "HIGH")
- }
+ # TAB 2: BUSINESS IMPACT & ROI
+ with gr.TabItem("š° Business Impact & ROI"):
+ (dashboard_output, monthly_slider, impact_slider, team_slider,
+ calculate_btn, roi_output) = create_tab2_business_roi()
+
+ # TAB 3: AUDIT TRAIL & HISTORY
+ with gr.TabItem("š Audit Trail & History"):
+ (refresh_btn, clear_btn, export_btn, execution_table, savings_chart,
+ incident_table, memory_graph, export_text) = create_tab3_audit_trail()
+
+ # TAB 4: ENTERPRISE FEATURES
+ with gr.TabItem("š¢ Enterprise Features"):
+ (license_display, validate_btn, trial_btn, upgrade_btn, features_table,
+ compliance_display, integrations_table, mcp_mode) = create_tab4_enterprise_features()
+
+ # TAB 5: LEARNING ENGINE
+ with gr.TabItem("š§ Learning Engine"):
+ (learning_graph, graph_type, show_labels, search_query, search_btn,
+ clear_btn_search, search_results, stats_display, patterns_display,
+ performance_display) = create_tab5_learning_engine()
+
+ # ============ FOOTER ============
+ create_footer()
+
+ # ============ EVENT HANDLERS ============
+
+ # Scenario dropdown change
+ def update_scenario(scenario_name):
+ scenario = INCIDENT_SCENARIOS.get(scenario_name, {})
+ timeline = viz_engine.create_interactive_timeline(scenario)
+ return (
+ f"### {scenario_name}\n{scenario.get('description', 'No description')}",
+ scenario.get("metrics", {}),
+ scenario.get("impact", {}),
+ timeline
)
+
+ scenario_dropdown.change(
+ fn=update_scenario,
+ inputs=[scenario_dropdown],
+ outputs=[scenario_description, metrics_display, impact_display, timeline_output]
+ )
+
+ # OSS Analysis button
+ async def run_oss_analysis(scenario_name):
+ scenario = INCIDENT_SCENARIOS.get(scenario_name, {})
+ analysis = await orchestrator.analyze_incident(scenario_name, scenario)
- # Build analysis response
- analysis = scenario.get("oss_analysis", {}).copy()
- analysis["healing_intent"] = healing_intent.to_enterprise_request()
- analysis["arf_oss_version"] = OSS_VERSION
- analysis["analysis_timestamp"] = datetime.datetime.now().isoformat()
+ # Add to audit trail
+ audit_manager.add_incident(scenario_name, scenario.get("metrics", {}))
- # Add to incident history
- self.audit_manager.add_incident(scenario_name, scenario.get("metrics", {}))
+ # Update tables
+ incident_table_data = audit_manager.get_incident_history_table()
- return analysis
+ return analysis, incident_table_data
- def execute_enterprise_healing(self, scenario_name: str, approval_required: bool) -> Tuple[Dict, Dict]:
- """Execute enterprise healing"""
+ oss_btn.click(
+ fn=run_oss_analysis,
+ inputs=[scenario_dropdown],
+ outputs=[results_display, incident_table]
+ )
+
+ # Enterprise Healing button
+ def execute_healing(scenario_name, approval_required):
scenario = INCIDENT_SCENARIOS.get(scenario_name, {})
- results = scenario.get("enterprise_results", {}).copy()
- # Add enterprise context
- results["enterprise_context"] = {
- "approval_required": approval_required,
- "execution_mode": "autonomous" if not approval_required else "approval",
- "timestamp": datetime.datetime.now().isoformat(),
- "license_tier": self.license_info["tier"]
+ # Create mock healing intent
+ healing_intent = {
+ "action": "scale_out",
+ "component": scenario.get("component", "unknown"),
+ "parameters": {"scale_factor": 2},
+ "justification": f"Healing {scenario_name}"
}
- # Calculate savings
- savings = 7200 if scenario_name == "Cache Miss Storm" else 4200
+ # Execute through orchestrator
+ execution = orchestrator.execute_healing(
+ scenario_name,
+ healing_intent,
+ mode="approval" if approval_required else "autonomous"
+ )
# Add to audit trail
- self.audit_manager.add_execution(
+ audit_manager.add_execution(
scenario_name,
- results.get("actions_completed", []),
- savings,
+ ["scale_out", "circuit_breaker", "monitoring"],
+ 7200,
approval_required,
- f"Healed {scenario_name} incident"
+ f"Healed {scenario_name}"
)
# Create approval HTML
- approval_html = self._create_approval_html(scenario_name, approval_required)
-
- return approval_html, results
-
- def _create_approval_html(self, scenario_name: str, approval_required: bool) -> str:
- """Create approval workflow HTML"""
if approval_required:
- return f"""
-
+ approval_html = """
+
-
- š”ļø
-
-
Approval Required
-
-
-
-
Action: Scale resources for {scenario_name}
-
Risk Level: Low
-
Blast Radius: Limited to affected service
-
Auto-rollback: Available
+
+ ā
+
Approved & Executed
-
-
-
Status
-
ā
Approved & Executed
-
-
- {datetime.datetime.now().strftime("%H:%M:%S")}
-
+
+ Action approved by system administrator and executed successfully.
"""
else:
- return f"""
-
+ approval_html = """
+
-
-
-
-
Action: Autonomous healing for {scenario_name}
-
Mode: Fully autonomous
-
Guardrails: Safety limits active
-
Rollback: Ready if needed
-
-
-
-
-
Status
-
ā
Successfully completed
-
-
- {datetime.datetime.now().strftime("%H:%M:%S")}
-
+
+ Action executed autonomously by ARF Enterprise.
"""
-
- def calculate_roi(self, monthly_incidents: int, avg_impact: int, team_size: int) -> Dict:
- """Calculate ROI"""
- try:
- annual_impact = monthly_incidents * 12 * avg_impact
- team_cost = team_size * 150000 # $150k per engineer
- savings = annual_impact * 0.82 # 82% savings with ARF
-
- roi_multiplier = savings / team_cost if team_cost > 0 else 0
-
- if roi_multiplier >= 5.0:
- recommendation = "š Excellent fit for ARF Enterprise"
- icon = "š"
- color = "#28a745"
- elif roi_multiplier >= 2.0:
- recommendation = "ā
Good ROI with ARF Enterprise"
- icon = "ā
"
- color = "#20c997"
- elif roi_multiplier >= 1.0:
- recommendation = "ā ļø Consider ARF OSS edition first"
- icon = "ā¹ļø"
- color = "#ffc107"
- else:
- recommendation = "š Start with ARF OSS (free)"
- icon = "š"
- color = "#6c757d"
-
- payback = (team_cost / (savings / 12)) if savings > 0 else 0
-
- return {
- "analysis": {
- "your_annual_impact": f"${annual_impact:,.0f}",
- "your_team_cost": f"${team_cost:,.0f}",
- "potential_savings": f"${savings:,.0f}",
- "your_roi_multiplier": f"{roi_multiplier:.1f}Ć",
- "vs_industry_average": "5.2Ć average ROI",
- "recommendation": f"{icon} {recommendation}",
- "recommendation_color": color,
- "payback_period": f"{payback:.1f} months" if savings > 0 else "N/A",
- "annual_savings_potential": f"${savings - team_cost:,.0f}" if savings > team_cost else "$0"
- }
- }
- except Exception as e:
- return {"error": f"Calculation error: {str(e)}"}
-
- # ===========================================
- # VISUALIZATION ENGINE
- # ===========================================
-
- class VisualizationEngine:
- """Enhanced visualization engine"""
-
- @staticmethod
- def create_incident_timeline() -> go.Figure:
- """Create interactive incident timeline"""
- fig = go.Figure()
-
- # Create timeline events
- now = datetime.datetime.now()
- events = [
- {"time": now - datetime.timedelta(minutes=25), "event": "š Cache hit rate drops to 18.5%", "type": "problem"},
- {"time": now - datetime.timedelta(minutes=22), "event": "ā ļø Alert: Database load hits 92%", "type": "alert"},
- {"time": now - datetime.timedelta(minutes=20), "event": "š¤ ARF detects pattern", "type": "detection"},
- {"time": now - datetime.timedelta(minutes=18), "event": "š§ Analysis: Cache Miss Storm identified", "type": "analysis"},
- {"time": now - datetime.timedelta(minutes=15), "event": "ā” Healing actions executed", "type": "action"},
- {"time": now - datetime.timedelta(minutes=12), "event": "ā
Cache hit rate recovers to 72%", "type": "recovery"},
- {"time": now - datetime.timedelta(minutes=10), "event": "š System stabilized", "type": "stable"}
- ]
-
- color_map = {
- "problem": "#FF6B6B", "alert": "#FFA726", "detection": "#42A5F5",
- "analysis": "#AB47BC", "action": "#66BB6A", "recovery": "#26A69A",
- "stable": "#2E7D32"
- }
- # Add events
- for event in events:
- fig.add_trace(go.Scatter(
- x=[event["time"]],
- y=[1],
- mode='markers+text',
- marker=dict(
- size=20,
- color=color_map[event["type"]],
- symbol='circle' if event["type"] in ['problem', 'alert'] else 'diamond',
- line=dict(width=2, color='white')
- ),
- text=[event["event"]],
- textposition="top center",
- hoverinfo="text",
- hovertemplate="
%{text}%{x|%H:%M:%S}
",
- showlegend=False
- ))
+ # Update execution table
+ execution_table_data = audit_manager.get_execution_history_table()
- # Add connecting lines
- times = [event["time"] for event in events]
- fig.add_trace(go.Scatter(
- x=times,
- y=[1] * len(times),
- mode='lines',
- line=dict(color='rgba(100, 100, 100, 0.3)', width=2, dash='dash'),
- hoverinfo='none',
- showlegend=False
- ))
-
- fig.update_layout(
- title="
Incident Timeline - Cache Miss Storm Resolution",
- xaxis_title="Time ā",
- yaxis=dict(
- showticklabels=False,
- range=[0.5, 1.5]
- ),
- height=450,
- showlegend=False,
- paper_bgcolor='white',
- plot_bgcolor='white',
- hovermode='closest',
- xaxis=dict(
- tickformat='%H:%M',
- gridcolor='rgba(200,200,200,0.2)',
- showgrid=True
- ),
- margin=dict(l=50, r=50, t=80, b=50)
- )
-
- return fig
+ return approval_html, execution, execution_table_data
- @staticmethod
- def create_business_dashboard() -> go.Figure:
- """Create executive business dashboard"""
- fig = make_subplots(
- rows=2, cols=2,
- subplot_titles=('Annual Cost Impact', 'Team Capacity Shift',
- 'MTTR Comparison', 'ROI Analysis'),
- vertical_spacing=0.15,
- horizontal_spacing=0.15
- )
-
- # 1. Cost Impact
- categories = ['Without ARF', 'With ARF Enterprise', 'Net Savings']
- values = [2960000, 1000000, 1960000]
-
- fig.add_trace(
- go.Bar(
- x=categories,
- y=values,
- marker_color=['#FF6B6B', '#4ECDC4', '#45B7D1'],
- text=[f'${v/1000000:.1f}M' for v in values],
- textposition='auto',
- name='Cost Impact'
- ),
- row=1, col=1
- )
-
- # 2. Team Capacity Shift
- labels = ['Firefighting', 'Innovation', 'Strategic Work']
- before = [60, 20, 20]
- after = [10, 60, 30]
-
- fig.add_trace(
- go.Bar(
- x=labels,
- y=before,
- name='Before ARF',
- marker_color='#FF6B6B'
- ),
- row=1, col=2
- )
-
- fig.add_trace(
- go.Bar(
- x=labels,
- y=after,
- name='After ARF Enterprise',
- marker_color='#4ECDC4'
- ),
- row=1, col=2
- )
-
- # 3. MTTR Comparison
- mttr_categories = ['Manual', 'Traditional', 'ARF OSS', 'ARF Enterprise']
- mttr_values = [120, 45, 25, 8]
-
- fig.add_trace(
- go.Bar(
- x=mttr_categories,
- y=mttr_values,
- marker_color=['#FF6B6B', '#FFE66D', '#45B7D1', '#4ECDC4'],
- text=[f'{v} min' for v in mttr_values],
- textposition='auto',
- name='MTTR'
- ),
- row=2, col=1
- )
-
- # 4. ROI Gauge
- fig.add_trace(
- go.Indicator(
- mode="gauge+number+delta",
- value=5.2,
- title={'text': "ROI Multiplier"},
- delta={'reference': 1.0, 'increasing': {'color': "green"}},
- gauge={
- 'axis': {'range': [0, 10], 'tickwidth': 1},
- 'bar': {'color': "#4ECDC4"},
- 'steps': [
- {'range': [0, 2], 'color': "lightgray"},
- {'range': [2, 4], 'color': "gray"},
- {'range': [4, 6], 'color': "lightgreen"},
- {'range': [6, 10], 'color': "green"}
- ],
- 'threshold': {
- 'line': {'color': "red", 'width': 4},
- 'thickness': 0.75,
- 'value': 5.2
- }
- }
- ),
- row=2, col=2
- )
-
- fig.update_layout(
- height=700,
- showlegend=True,
- paper_bgcolor='white',
- plot_bgcolor='white',
- title_text="
Executive Business Dashboard",
- barmode='group',
- margin=dict(l=50, r=50, t=100, b=50)
- )
-
- return fig
+ enterprise_btn.click(
+ fn=execute_healing,
+ inputs=[scenario_dropdown, approval_toggle],
+ outputs=[approval_display, results_display, execution_table]
+ )
- @staticmethod
- def create_execution_history_chart(audit_manager: AuditTrailManager) -> go.Figure:
- """Create execution history visualization"""
- executions = audit_manager.execution_history[:10]
-
- if not executions:
- fig = go.Figure()
- fig.update_layout(
- title="
No execution history yet",
- height=400,
- paper_bgcolor='white',
- plot_bgcolor='white',
- xaxis_showgrid=True,
- yaxis_showgrid=True
- )
- return fig
-
- # Extract data
- scenarios = [e["scenario"] for e in executions]
- savings = []
- for e in executions:
- try:
- savings.append(int(e["savings"].replace("$", "").replace(",", "")))
- except:
- savings.append(0)
-
- fig = go.Figure(data=[
- go.Bar(
- x=scenarios,
- y=savings,
- marker_color='#4ECDC4',
- text=[f'${s:,.0f}' for s in savings],
- textposition='outside',
- name='Cost Saved',
- hovertemplate="
%{x}Savings: %{text}
"
- )
- ])
-
- fig.update_layout(
- title="
Execution History - Cost Savings",
- xaxis_title="Scenario",
- yaxis_title="Cost Saved ($)",
- height=500,
- paper_bgcolor='white',
- plot_bgcolor='white',
- showlegend=False,
- xaxis_showgrid=True,
- yaxis_showgrid=True,
- margin=dict(l=50, r=50, t=80, b=100)
+ # Quick Demo button
+ async def run_quick_demo():
+ # Run OSS analysis
+ scenario = INCIDENT_SCENARIOS["Cache Miss Storm"]
+ analysis = await orchestrator.analyze_incident("Cache Miss Storm", scenario)
+
+ # Execute healing
+ execution = orchestrator.execute_healing(
+ "Cache Miss Storm",
+ {"action": "scale_out", "component": "redis_cache"},
+ mode="autonomous"
)
- return fig
-
- @staticmethod
- def create_memory_graph(audit_manager: AuditTrailManager) -> go.Figure:
- """Create incident memory graph"""
- incidents = audit_manager.incident_history[:15]
-
- if not incidents:
- # Create sample graph
- fig = go.Figure()
-
- # Create nodes in a circle
- angles = np.linspace(0, 2*np.pi, 5, endpoint=False)
- radius = 1
- x = radius * np.cos(angles)
- y = radius * np.sin(angles)
-
- fig.add_trace(go.Scatter(
- x=x,
- y=y,
- mode='markers+text',
- marker=dict(size=30, color=['#FF6B6B', '#4ECDC4', '#45B7D1', '#96CEB4', '#FFE66D']),
- text=['Cache', 'DB', 'API', 'Auth', 'Payment'],
- textposition="top center"
- ))
-
- fig.update_layout(
- title="
Incident Memory Graph (Sample)",
- showlegend=False,
- height=600,
- paper_bgcolor='white',
- plot_bgcolor='white',
- xaxis=dict(showgrid=False, zeroline=False, showticklabels=False, range=[-1.5, 1.5]),
- yaxis=dict(showgrid=False, zeroline=False, showticklabels=False, range=[-1.5, 1.5]),
- margin=dict(l=20, r=20, t=60, b=20)
- )
- return fig
-
- # Create actual graph from incidents
- nodes = []
- for i, incident in enumerate(incidents):
- nodes.append({
- "x": np.cos(2 * np.pi * i / len(incidents)),
- "y": np.sin(2 * np.pi * i / len(incidents)),
- "size": 15 + incident["severity"] * 5,
- "color": "#FF6B6B" if incident["severity"] == 3 else "#FFA726" if incident["severity"] == 2 else "#42A5F5",
- "label": incident["type"][:15],
- "service": incident["service"]
- })
-
- fig = go.Figure()
-
- # Add nodes
- fig.add_trace(go.Scatter(
- x=[node["x"] for node in nodes],
- y=[node["y"] for node in nodes],
- mode='markers+text',
- marker=dict(
- size=[node["size"] for node in nodes],
- color=[node["color"] for node in nodes],
- line=dict(width=2, color='white')
- ),
- text=[node["label"] for node in nodes],
- textposition="top center",
- hovertext=[f"Service: {node['service']}" for node in nodes],
- hoverinfo="text",
- name="Incidents"
- ))
-
- # Add edges (connect similar types)
- for i in range(len(nodes)):
- for j in range(i + 1, len(nodes)):
- if incidents[i]["type"] == incidents[j]["type"]:
- fig.add_trace(go.Scatter(
- x=[nodes[i]["x"], nodes[j]["x"], None],
- y=[nodes[i]["y"], nodes[j]["y"], None],
- mode='lines',
- line=dict(width=1, color='rgba(100, 100, 100, 0.2)'),
- hoverinfo='none',
- showlegend=False
- ))
-
- fig.update_layout(
- title=f"
Incident Memory Graph ({len(incidents)} incidents)",
- showlegend=False,
- height=600,
- paper_bgcolor='white',
- plot_bgcolor='white',
- xaxis=dict(showgrid=False, zeroline=False, showticklabels=False, range=[-1.5, 1.5]),
- yaxis=dict(showgrid=False, zeroline=False, showticklabels=False, range=[-1.5, 1.5]),
- margin=dict(l=50, r=50, t=80, b=50)
+ # Update audit trail
+ audit_manager.add_incident("Cache Miss Storm", scenario.get("metrics", {}))
+ audit_manager.add_execution(
+ "Cache Miss Storm",
+ ["scale_out", "circuit_breaker"],
+ 7200,
+ False,
+ "Quick demo execution"
)
- return fig
-
- # ===========================================
- # CREATE DEMO INTERFACE
- # ===========================================
-
- def create_demo_interface():
- """Create the 5-tab demo interface"""
-
- # Initialize components
- audit_manager = AuditTrailManager()
- business_logic = BusinessLogic(audit_manager, oss_client)
- viz_engine = VisualizationEngine()
-
- # Custom CSS
- custom_css = """
- .gradio-container {
- max-width: 1800px !important;
- margin: auto !important;
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, sans-serif !important;
- }
- h1 {
- background: linear-gradient(90deg, #1a365d 0%, #2d3748 100%);
- -webkit-background-clip: text;
- -webkit-text-fill-color: transparent;
- background-clip: text;
- font-weight: 800 !important;
- font-size: 2.5rem !important;
- margin-bottom: 0.5rem !important;
- }
- .critical {
- color: #FF6B6B !important;
- font-weight: 900 !important;
- }
- .success {
- color: #4ECDC4 !important;
- font-weight: 900 !important;
- }
- .tab-nav {
- background: linear-gradient(90deg, #f8fafc 0%, #ffffff 100%) !important;
- border-radius: 10px !important;
- padding: 5px !important;
- margin-bottom: 20px !important;
- }
- .metric-card {
- background: white !important;
- border-radius: 10px !important;
- padding: 20px !important;
- box-shadow: 0 2px 8px rgba(0,0,0,0.06) !important;
- border-left: 4px solid #4ECDC4 !important;
- margin-bottom: 15px !important;
- }
- .enterprise-badge {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- color: white !important;
- padding: 8px 16px !important;
- border-radius: 20px !important;
- font-weight: 700 !important;
- font-size: 0.85rem !important;
- display: inline-block !important;
- margin: 5px 0 !important;
- }
- .oss-badge {
- background: linear-gradient(135deg, #4299e1 0%, #38b2ac 100%) !important;
- color: white !important;
- padding: 8px 16px !important;
- border-radius: 20px !important;
- font-weight: 700 !important;
- font-size: 0.85rem !important;
- display: inline-block !important;
- margin: 5px 0 !important;
- }
- """
-
- with gr.Blocks(
- title=f"š ARF Investor Demo v3.8.0",
- theme=gr.themes.Soft(
- primary_hue="blue",
- secondary_hue="teal",
- font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"]
- ),
- css=custom_css
- ) as demo:
+ # Update all tables
+ execution_table_data = audit_manager.get_execution_history_table()
+ incident_table_data = audit_manager.get_incident_history_table()
- # ============ HEADER ============
- gr.Markdown(f"""
-
-
š Agentic Reliability Framework
-
Investor Demo v3.8.0 - Enterprise Edition
-
-
-
š¢ Enterprise Features
-
š OSS v{OSS_VERSION}
-
- š 5.2Ć Average ROI
-
-
- ā” 85% MTTR Reduction
+ # Create approval HTML
+ approval_html = """
+
+
+
+ ā”
+
Quick Demo Completed
-
-
- Experience the full journey from
OSS Advisory
- to
Enterprise Autonomous Healing.
- See how ARF transforms reliability operations.
+
+ OSS analysis ā Enterprise execution completed successfully.
- """)
-
- # ============ SYSTEM STATUS ============
- with gr.Row():
- with gr.Column(scale=1):
- status_html = f"""
-
-
-
-
-
ARF OSS Integration
-
- {"ā
Connected v" + OSS_VERSION if ARF_OSS_AVAILABLE else "ā ļø Mock Mode"}
-
-
-
-
- """
- gr.HTML(status_html)
-
- with gr.Column(scale=2):
- performance_html = """
-
-
-
-
Performance Metrics
-
-
-
Auto-Heal Rate
-
81.7%
-
-
-
Avg Resolution
-
8.2 min
-
-
-
Cost Savings
-
$6.2M/yr
-
-
-
-
-
- """
- gr.HTML(performance_html)
-
- with gr.Column(scale=1):
- license_html = """
-
-
License Status
-
-
-
ENTERPRISE
-
Active ⢠Expires 2024-12-31
-
-
- ā
Valid
-
-
-
- """
- gr.HTML(license_html)
-
- # ============ 5 TABS ============
- with gr.Tabs():
-
- # TAB 1: LIVE INCIDENT DEMO
- with gr.TabItem("š„ Live Incident Demo"):
- with gr.Row():
- # Left Panel
- with gr.Column(scale=1):
- gr.Markdown("### š¬ Incident Scenario")
- scenario_dropdown = gr.Dropdown(
- choices=list(INCIDENT_SCENARIOS.keys()),
- value="Cache Miss Storm",
- label="Select critical incident:",
- interactive=True
- )
-
- scenario_description = gr.Markdown(
- value=INCIDENT_SCENARIOS["Cache Miss Storm"]["description"]
- )
-
- gr.Markdown("### š Current Crisis Metrics")
- metrics_display = gr.JSON(
- value=INCIDENT_SCENARIOS["Cache Miss Storm"]["metrics"],
- label="Live Metrics",
- show_label=True
- )
-
- gr.Markdown("### š° Business Impact")
- impact_display = gr.JSON(
- value=INCIDENT_SCENARIOS["Cache Miss Storm"]["impact"],
- label="Impact Analysis",
- show_label=True
- )
-
- # Right Panel
- with gr.Column(scale=2):
- gr.Markdown("### š Incident Timeline")
- timeline_output = gr.Plot(
- value=viz_engine.create_incident_timeline(),
- label="",
- show_label=False
- )
-
- gr.Markdown("### ā” Take Action")
- with gr.Row():
- oss_btn = gr.Button(
- "š Run OSS Analysis",
- variant="secondary",
- size="lg",
- elem_id="oss_btn"
- )
- enterprise_btn = gr.Button(
- "š Execute Enterprise Healing",
- variant="primary",
- size="lg",
- elem_id="enterprise_btn"
- )
-
- with gr.Row():
- approval_toggle = gr.Checkbox(
- label="š Require Manual Approval",
- value=True,
- info="Toggle to show approval workflow vs auto-execution",
- interactive=True
- )
- demo_mode_btn = gr.Button(
- "ā” Quick Demo",
- variant="secondary",
- size="sm",
- elem_id="demo_btn"
- )
-
- approval_display = gr.HTML(
- value="
Approval status will appear here after execution
"
- )
-
- config_display = gr.JSON(
- label="āļø Enterprise Configuration",
- value={"approval_required": True, "compliance_mode": "strict"},
- show_label=True
- )
-
- results_display = gr.JSON(
- label="šÆ Execution Results",
- value={"status": "Ready for execution..."},
- show_label=True
- )
-
- # TAB 2: BUSINESS IMPACT & ROI
- with gr.TabItem("š° Business Impact & ROI"):
- with gr.Column():
- gr.Markdown("### š Executive Business Dashboard")
- dashboard_output = gr.Plot(
- value=viz_engine.create_business_dashboard(),
- label="",
- show_label=False
- )
-
- gr.Markdown("### š§® Interactive ROI Calculator")
- with gr.Row():
- with gr.Column(scale=1):
- monthly_slider = gr.Slider(
- 1, 100, value=15, step=1,
- label="Monthly incidents",
- interactive=True
- )
- impact_slider = gr.Slider(
- 1000, 50000, value=8500, step=500,
- label="Average incident impact ($)",
- interactive=True
- )
- team_slider = gr.Slider(
- 1, 20, value=5, step=1,
- label="Reliability team size",
- interactive=True
- )
- calculate_btn = gr.Button(
- "Calculate My ROI",
- variant="primary",
- size="lg"
- )
-
- with gr.Column(scale=2):
- roi_output = gr.JSON(
- label="Your ROI Analysis",
- value={"analysis": "Adjust sliders and click Calculate"},
- show_label=True
- )
-
- with gr.Row():
- with gr.Column():
- gr.Markdown("""
- **š ARF Enterprise ROI Metrics**
- - **Average ROI:** 5.2Ć first year
- - **Payback Period:** 2-3 months
- - **Auto-Heal Rate:** 81.7%
- - **MTTR Reduction:** 85%
- - **Cost Savings:** $6.2M average annually
- """)
- with gr.Column():
- gr.Markdown("""
- **šÆ Business Impact**
- - **Engineer Time:** 325+ hours reclaimed annually
- - **SLA Compliance:** 99.9% maintained
- - **Customer Satisfaction:** +40% improvement
- - **Revenue Protection:** $8,500+/hour saved
- - **Innovation Capacity:** 60% increase
- """)
-
- # TAB 3: AUDIT TRAIL & HISTORY
- with gr.TabItem("š Audit Trail & History"):
- with gr.Row():
- # Left Column - Execution History
- with gr.Column(scale=1):
- gr.Markdown("### š Execution History (Audit Trail)")
-
- with gr.Row():
- refresh_btn = gr.Button("š Refresh", variant="secondary", size="sm")
- clear_btn = gr.Button("šļø Clear", variant="stop", size="sm")
- export_btn = gr.Button("š„ Export", variant="secondary", size="sm")
-
- execution_table = gr.Dataframe(
- headers=["Time", "Scenario", "Actions", "Status", "Savings", "Details"],
- value=audit_manager.get_execution_history_table(),
- label="",
- interactive=False,
- wrap=True
- )
-
- gr.Markdown("### š Visual History")
- execution_chart = gr.Plot(
- value=viz_engine.create_execution_history_chart(audit_manager),
- label="",
- show_label=False
- )
-
- # Right Column - Incident History
- with gr.Column(scale=1):
- gr.Markdown("### š Incident History")
-
- incident_table = gr.Dataframe(
- headers=["Time", "Service", "Type", "Severity", "Description"],
- value=audit_manager.get_incident_history_table(),
- label="",
- interactive=False,
- wrap=True
- )
-
- gr.Markdown("### š§ Memory Graph")
- memory_graph = gr.Plot(
- value=viz_engine.create_memory_graph(audit_manager),
- label="",
- show_label=False
- )
-
- gr.Markdown("### š¤ Export & Analytics")
- export_text = gr.Textbox(
- label="Full Audit Trail (JSON)",
- value=audit_manager.export_audit_trail(),
- lines=8,
- interactive=False
- )
-
- # TAB 4: ENTERPRISE FEATURES
- with gr.TabItem("š¢ Enterprise Features"):
- with gr.Row():
- # Left Column
- with gr.Column(scale=1):
- gr.Markdown("### š License Management")
-
- license_display = gr.JSON(
- value=business_logic.license_info,
- label="License Information",
- show_label=True
- )
-
- with gr.Row():
- validate_btn = gr.Button("š Validate", variant="secondary")
- trial_btn = gr.Button("š Start Trial", variant="primary")
- upgrade_btn = gr.Button("š Upgrade", variant="secondary")
-
- gr.Markdown("### ā” Feature Matrix")
-
- features_data = [
- ["š¤ Autonomous Healing", "ā", "ā
Auto", "ā
AI-Driven"],
- ["š Executive Dashboards", "Basic", "Advanced", "ā
Comprehensive"],
- ["š Compliance Automation", "ā", "ā
", "ā
SOC2/GDPR"],
- ["š Predictive Analytics", "ā", "Basic", "ā
ML-Powered"],
- ["š Auto-Remediation", "Manual", "ā
Auto", "ā
Continuous"],
- ["šÆ SLA Guarantees", "ā", "ā", "ā
99.9%"],
- ["š Cost Optimization", "Basic", "Advanced", "ā
AI-Optimized"],
- ["š Role-Based Access", "ā", "ā
", "ā
Granular"],
- ["š Audit Trail", "Basic", "ā
", "ā
Comprehensive"],
- ["š Multi-Cloud", "ā", "ā", "ā
Native"],
- ]
-
- features_table = gr.Dataframe(
- value=features_data,
- headers=["Feature", "OSS", "Starter", "Enterprise"],
- label="",
- interactive=False,
- wrap=True
- )
-
- # Right Column
- with gr.Column(scale=1):
- gr.Markdown("### š Compliance Status")
-
- compliance_status = gr.JSON(
- value={
- "SOC2": {"status": "ā
Certified", "expires": "2025-06-30"},
- "GDPR": {"status": "ā
Compliant", "last_audit": "2024-10-15"},
- "HIPAA": {"status": "š” In Progress", "eta": "2024-12-31"},
- "ISO27001": {"status": "ā
Certified", "cert_id": "ISO-2024-001"},
- "CCPA": {"status": "ā
Compliant", "verified": True}
- },
- label="Compliance Certifications",
- show_label=True
- )
-
- gr.Markdown("### š Integration Hub")
-
- integrations_data = [
- ["AWS", "CloudWatch, S3, Lambda", "ā
Connected"],
- ["Azure", "Monitor, Log Analytics", "ā
Connected"],
- ["GCP", "Operations, BigQuery", "ā
Connected"],
- ["Datadog", "Metrics, Logs, APM", "ā
Connected"],
- ["New Relic", "Full-stack", "ā
Connected"],
- ["PagerDuty", "Incident Response", "ā
Connected"],
- ["ServiceNow", "ITSM & CMDB", "ā
Connected"],
- ["Slack", "Notifications", "ā
Connected"],
- ]
-
- integrations_table = gr.Dataframe(
- value=integrations_data,
- headers=["Platform", "Services", "Status"],
- label="",
- interactive=False,
- wrap=True
- )
-
- # TAB 5: LEARNING ENGINE
- with gr.TabItem("š§ Learning Engine"):
- with gr.Row():
- # Left Column
- with gr.Column(scale=2):
- gr.Markdown("### š§ Incident Memory Graph")
-
- memory_graph_plot = gr.Plot(
- value=viz_engine.create_memory_graph(audit_manager),
- label="",
- show_label=False
- )
-
- with gr.Row():
- graph_type = gr.Radio(
- choices=["Force Directed", "Hierarchical", "Timeline"],
- value="Force Directed",
- label="Graph Type",
- interactive=True
- )
- show_weights = gr.Checkbox(label="Show Edge Weights", value=True, interactive=True)
-
- gr.Markdown("### š Similarity Search")
-
- search_query = gr.Textbox(
- label="Search for similar incidents",
- placeholder="Describe incident or paste metrics...",
- lines=2,
- interactive=True
- )
-
- with gr.Row():
- search_btn = gr.Button("š Search", variant="primary")
- clear_search_btn = gr.Button("Clear", variant="secondary")
-
- search_results = gr.Dataframe(
- headers=["Incident", "Similarity", "Resolution", "Actions"],
- value=[],
- label="",
- interactive=False,
- wrap=True
- )
-
- # Right Column
- with gr.Column(scale=1):
- gr.Markdown("### š Learning Statistics")
-
- learning_stats = gr.JSON(
- value={
- "total_incidents": len(audit_manager.incident_history),
- "resolved_automatically": len([e for e in audit_manager.execution_history if "Executed" in e["status"]]),
- "patterns_detected": 5,
- "confidence_threshold": 0.85,
- "memory_size": f"{len(audit_manager.incident_history) * 0.5:.1f} KB",
- "similar_incidents_found": 12
- },
- label="Learning Engine Statistics",
- show_label=True
- )
-
- gr.Markdown("### šÆ Pattern Detection")
-
- pattern_analysis = gr.JSON(
- value={
- "most_common": "Cache Miss Storm",
- "frequency": "45% of incidents",
- "avg_resolution_time": "8.2 minutes",
- "success_rate": "92%",
- "recommendations": [
- "Implement proactive cache monitoring",
- "Add circuit breaker for database fallback",
- "Optimize cache TTL settings"
- ]
- },
- label="Pattern Analysis",
- show_label=True
- )
-
- # ============ EVENT HANDLERS ============
-
- # Scenario dropdown change
- def update_scenario(scenario_name):
- scenario = INCIDENT_SCENARIOS.get(scenario_name, {})
- return (
- f"### {scenario_name}\n{scenario.get('description', 'No description')}",
- scenario.get("metrics", {}),
- scenario.get("impact", {}),
- viz_engine.create_incident_timeline()
- )
-
- scenario_dropdown.change(
- fn=update_scenario,
- inputs=[scenario_dropdown],
- outputs=[scenario_description, metrics_display, impact_display, timeline_output]
- )
-
- # OSS Analysis button
- async def run_oss_analysis(scenario_name):
- analysis = await business_logic.run_oss_analysis(scenario_name)
- incident_table_data = audit_manager.get_incident_history_table()
- memory_plot = viz_engine.create_memory_graph(audit_manager)
- return analysis, incident_table_data, memory_plot
-
- oss_btn.click(
- fn=run_oss_analysis,
- inputs=[scenario_dropdown],
- outputs=[results_display, incident_table, memory_graph]
- )
-
- # Enterprise Healing button
- def execute_healing(scenario_name, approval_required):
- approval_html, results = business_logic.execute_enterprise_healing(scenario_name, approval_required)
- execution_table_data = audit_manager.get_execution_history_table()
- execution_chart_plot = viz_engine.create_execution_history_chart(audit_manager)
- return approval_html, results, execution_table_data, execution_chart_plot
-
- enterprise_btn.click(
- fn=execute_healing,
- inputs=[scenario_dropdown, approval_toggle],
- outputs=[approval_display, results_display, execution_table, execution_chart]
- )
-
- # Quick Demo button
- async def run_quick_demo():
- # Run OSS analysis
- analysis = await business_logic.run_oss_analysis("Cache Miss Storm")
-
- # Execute enterprise healing
- approval_html, results = business_logic.execute_enterprise_healing("Cache Miss Storm", False)
-
- # Update all displays
- execution_table_data = audit_manager.get_execution_history_table()
- incident_table_data = audit_manager.get_incident_history_table()
- execution_chart_plot = viz_engine.create_execution_history_chart(audit_manager)
- memory_plot = viz_engine.create_memory_graph(audit_manager)
-
- return (
- analysis,
- approval_html,
- results,
- execution_table_data,
- incident_table_data,
- execution_chart_plot,
- memory_plot,
- gr.Checkbox.update(value=False)
- )
-
- demo_mode_btn.click(
- fn=run_quick_demo,
- outputs=[
- results_display,
- approval_display,
- results_display,
- execution_table,
- incident_table,
- execution_chart,
- memory_graph,
- approval_toggle
- ]
- )
-
- # ROI Calculator
- def calculate_roi(monthly, impact, team):
- return business_logic.calculate_roi(monthly, impact, team)
-
- calculate_btn.click(
- fn=calculate_roi,
- inputs=[monthly_slider, impact_slider, team_slider],
- outputs=[roi_output]
- )
-
- # Audit Trail Refresh
- def refresh_audit_trail():
- execution_table_data = audit_manager.get_execution_history_table()
- incident_table_data = audit_manager.get_incident_history_table()
- execution_chart_plot = viz_engine.create_execution_history_chart(audit_manager)
- memory_plot = viz_engine.create_memory_graph(audit_manager)
- export_data = audit_manager.export_audit_trail()
- return execution_table_data, incident_table_data, execution_chart_plot, memory_plot, export_data
-
- refresh_btn.click(
- fn=refresh_audit_trail,
- outputs=[execution_table, incident_table, execution_chart, memory_graph, export_text]
- )
-
- # Clear History
- def clear_audit_trail():
- audit_manager.execution_history = []
- audit_manager.incident_history = []
- audit_manager._initialize_sample_data()
- return refresh_audit_trail()
-
- clear_btn.click(
- fn=clear_audit_trail,
- outputs=[execution_table, incident_table, execution_chart, memory_graph, export_text]
- )
-
- # Export Audit Trail
- def update_export():
- return audit_manager.export_audit_trail()
-
- export_btn.click(
- fn=update_export,
- outputs=[export_text]
- )
-
- # License Management
- def validate_license():
- business_logic.license_info["last_validated"] = datetime.datetime.now().isoformat()
- business_logic.license_info["validation_code"] = "VAL-2024-001"
- return business_logic.license_info
-
- validate_btn.click(
- fn=validate_license,
- outputs=[license_display]
- )
-
- def start_trial():
- business_logic.license_info["tier"] = "TRIAL"
- business_logic.license_info["expires_at"] = (datetime.datetime.now() + datetime.timedelta(days=30)).isoformat()
- business_logic.license_info["status"] = "š Trial Active (30 days)"
- return business_logic.license_info
-
- trial_btn.click(
- fn=start_trial,
- outputs=[license_display]
- )
-
- def upgrade_license():
- business_logic.license_info["tier"] = "PLATFORM"
- business_logic.license_info["status"] = "š Upgraded to Platform Edition"
- return business_logic.license_info
-
- upgrade_btn.click(
- fn=upgrade_license,
- outputs=[license_display]
- )
-
- # Learning Engine Search
- def search_similar_incidents(query):
- if not query.strip():
- return []
-
- # Mock search results
- results = [
- ["Cache Miss Storm", "92%", "ā
Resolved", "Scale cache + circuit breaker"],
- ["Database Connection Pool", "78%", "ā
Resolved", "Increase pool size"],
- ["Memory Leak", "65%", "ā ļø Pending", "Restart + monitoring"],
- ["API Rate Limit", "58%", "ā
Resolved", "Increase limits + caching"],
- ]
-
- return results
-
- search_btn.click(
- fn=search_similar_incidents,
- inputs=[search_query],
- outputs=[search_results]
- )
-
- clear_search_btn.click(
- fn=lambda: [],
- outputs=[search_results]
- )
-
- # Graph type change
- def update_graph_view(graph_type, show_weights):
- return viz_engine.create_memory_graph(audit_manager)
-
- graph_type.change(
- fn=update_graph_view,
- inputs=[graph_type, show_weights],
- outputs=[memory_graph_plot]
- )
-
- show_weights.change(
- fn=update_graph_view,
- inputs=[graph_type, show_weights],
- outputs=[memory_graph_plot]
+ """
+
+ return (
+ analysis,
+ approval_html,
+ execution,
+ execution_table_data,
+ incident_table_data,
+ gr.Checkbox.update(value=False)
)
+
+ demo_btn.click(
+ fn=run_quick_demo,
+ outputs=[
+ results_display,
+ approval_display,
+ results_display,
+ execution_table,
+ incident_table,
+ approval_toggle
+ ]
+ )
+
+ # ROI Calculator
+ def calculate_roi(monthly, impact, team):
+ company_data = {
+ "monthly_incidents": monthly,
+ "avg_cost_per_incident": impact,
+ "team_size": team
+ }
+ roi_result = orchestrator.calculate_roi(company_data)
+
+ # Format for display
+ formatted_result = {
+ "annual_impact": f"${roi_result['annual_impact']:,.0f}",
+ "team_cost": f"${roi_result['team_cost']:,.0f}",
+ "potential_savings": f"${roi_result['potential_savings']:,.0f}",
+ "roi_multiplier": f"{roi_result['roi_multiplier']:.1f}Ć",
+ "payback_months": f"{roi_result['payback_months']:.1f} months",
+ "recommendation": roi_result['recommendation']
+ }
- # Initialize with default plots
- demo.load(
- fn=lambda: (
- viz_engine.create_business_dashboard(),
- viz_engine.create_incident_timeline(),
- viz_engine.create_execution_history_chart(audit_manager),
- viz_engine.create_memory_graph(audit_manager),
- audit_manager.export_audit_trail()
- ),
- outputs=[dashboard_output, timeline_output, execution_chart, memory_graph, export_text]
- )
+ # Update dashboard with user-specific ROI
+ dashboard = viz_engine.create_executive_dashboard(roi_result)
- # ============ FOOTER ============
- gr.Markdown("""
-
-
-
-
š Experience the Journey
-
- - 1. Start with OSS - Get recommendations
- - 2. Calculate ROI - See your savings
- - 3. Execute Healing - Experience autonomy
- - 4. View Audit Trail - Track everything
- - 5. Explore Features - See enterprise power
-
-
-
-
š Get Started
-
- - š§ Contact: sales@arfinvestor.com
- - š Docs: docs.arfinvestor.com
- - š¬ Slack: Join 2,500+ engineers
- - š Trial: 30-day enterprise trial
- - š Demo: Live demo available
-
-
-
-
š”ļø Trust & Security
-
- - ā
SOC 2 Type II Certified
- - ā
GDPR & CCPA Compliant
- - ā
ISO 27001 Certified
- - ā
HIPAA Ready
- - ā
Enterprise-grade Security
-
-
-
-
-
-
Ā© 2024 Agentic Reliability Framework. Demo v3.8.0 Enterprise Edition.
-
This demonstration uses ARF OSS v{OSS_VERSION}. Actual enterprise features require license activation.
-
-
- """.format(OSS_VERSION=OSS_VERSION))
+ return formatted_result, dashboard
- return demo
-
- return create_demo_interface()
-
-except Exception as e:
- logger.error(f"Failed to create demo: {e}")
- logger.error(traceback.format_exc())
-
- # Minimal fallback
- import gradio as gr
-
- with gr.Blocks(title="š ARF Demo - Error") as demo:
- gr.Markdown(f"""
- # ā ļø ARF Demo Initialization Error
+ calculate_btn.click(
+ fn=calculate_roi,
+ inputs=[monthly_slider, impact_slider, team_slider],
+ outputs=[roi_output, dashboard_output]
+ )
+
+ # Audit Trail Refresh
+ def refresh_audit_trail():
+ execution_table_data = audit_manager.get_execution_history_table()
+ incident_table_data = audit_manager.get_incident_history_table()
+ export_data = audit_manager.export_audit_trail()
+ return execution_table_data, incident_table_data, export_data
- Failed to initialize the demo:
+ refresh_btn.click(
+ fn=refresh_audit_trail,
+ outputs=[execution_table, incident_table, export_text]
+ )
+
+ # Clear History
+ def clear_audit_trail():
+ audit_manager.execution_history = []
+ audit_manager.incident_history = []
+ audit_manager._initialize_sample_data()
+ return refresh_audit_trail()
+
+ clear_btn.click(
+ fn=clear_audit_trail,
+ outputs=[execution_table, incident_table, export_text]
+ )
+
+ # Export Audit Trail
+ def update_export():
+ return audit_manager.export_audit_trail()
+
+ export_btn.click(
+ fn=update_export,
+ outputs=[export_text]
+ )
+
+ # Search similar incidents
+ def search_incidents(query):
+ if not query.strip():
+ return []
+ results = orchestrator.get_similar_incidents(query)
+ return [[r["id"], f"{r['similarity']:.0%}", r["scenario"], r["resolution"]]
+ for r in results]
+
+ search_btn.click(
+ fn=search_incidents,
+ inputs=[search_query],
+ outputs=[search_results]
+ )
- ```python
- {str(e)}
- ```
+ clear_btn_search.click(
+ fn=lambda: [],
+ outputs=[search_results]
+ )
- Please check the logs for details.
- """)
+ # Initialize dashboard on load
+ demo.load(
+ fn=lambda: viz_engine.create_executive_dashboard(),
+ outputs=[dashboard_output]
+ )
return demo
-
+# ===========================================
+# MAIN EXECUTION - CORRECTED
+# ===========================================
def main():
- """Main entry point"""
+ """Main entry point - CORRECTED"""
print("š Starting ARF Ultimate Investor Demo v3.8.0...")
print("=" * 70)
print("š Features:")
@@ -1685,18 +827,26 @@ def main():
print(" ⢠Enterprise Features & License Management")
print(" ⢠Learning Engine with Pattern Detection")
print("=" * 70)
- print(f"\nš¦ Using ARF OSS v{OSS_VERSION if 'OSS_VERSION' in locals() else '3.3.6'}")
- print("š Opening web interface at http://localhost:7860...")
+ print(f"\nš¦ Using ARF OSS v{OSS_VERSION}")
+ print(f"š§ ARF Available: {ARF_OSS_AVAILABLE}")
+ print("š Opening web interface...")
+ # Create and launch the demo interface
demo = create_demo_interface()
- demo.launch(
- server_name="0.0.0.0",
- server_port=7860,
- share=False,
- debug=False,
- show_error=True
- )
-
+
+ # Launch configuration
+ launch_config = {
+ "server_name": "0.0.0.0",
+ "server_port": 7860,
+ "share": False,
+ "debug": False,
+ "show_error": True
+ }
+
+ demo.launch(**launch_config)
+# ===========================================
+# EXECUTION GUARD - CORRECTED
+# ===========================================
if __name__ == "__main__":
main()
\ No newline at end of file