| """ |
| 🚀 ARF Ultimate Investor Demo v3.8.0 - ENTERPRISE EDITION |
| With Audit Trail, Incident History, Memory Graph, and Enterprise Features |
| """ |
|
|
| import logging |
| import datetime |
| import random |
| import uuid |
| import json |
| import tempfile |
| from typing import Dict, List, Optional, Any, Tuple |
| from collections import deque |
| import gradio as gr |
| import plotly.graph_objects as go |
| import pandas as pd |
| import numpy as np |
| from plotly.subplots import make_subplots |
|
|
| |
| try: |
| from agentic_reliability_framework.arf_core.models.healing_intent import ( |
| HealingIntent, |
| create_scale_out_intent |
| ) |
| from agentic_reliability_framework.arf_core.engine.simple_mcp_client import OSSMCPClient |
| ARF_OSS_AVAILABLE = True |
| except ImportError: |
| ARF_OSS_AVAILABLE = False |
| |
| class HealingIntent: |
| def __init__(self, **kwargs): |
| self.intent_type = kwargs.get("intent_type", "scale_out") |
| self.parameters = kwargs.get("parameters", {}) |
| |
| def to_dict(self) -> Dict[str, Any]: |
| return { |
| "intent_type": self.intent_type, |
| "parameters": self.parameters, |
| "created_at": datetime.datetime.now().isoformat() |
| } |
| |
| def create_scale_out_intent(resource_type: str, scale_factor: float = 2.0) -> HealingIntent: |
| return HealingIntent( |
| intent_type="scale_out", |
| parameters={ |
| "resource_type": resource_type, |
| "scale_factor": scale_factor, |
| "action": "Increase capacity" |
| } |
| ) |
| |
| class OSSMCPClient: |
| def analyze_incident(self, metrics: Dict, pattern: str = "") -> Dict[str, Any]: |
| return { |
| "status": "analysis_complete", |
| "recommendations": [ |
| "Increase resource allocation", |
| "Implement monitoring", |
| "Add circuit breakers", |
| "Optimize configuration" |
| ], |
| "confidence": 0.92 |
| } |
|
|
| |
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
| INCIDENT_SCENARIOS = { |
| "Cache Miss Storm": { |
| "description": "Redis cluster experiencing 80% cache miss rate causing database overload", |
| "severity": "CRITICAL", |
| "metrics": { |
| "Cache Hit Rate": "18.5% (Critical)", |
| "Database Load": "92% (Overloaded)", |
| "Response Time": "1850ms (Slow)", |
| "Affected Users": "45,000", |
| "Eviction Rate": "125/sec" |
| }, |
| "impact": { |
| "Revenue Loss": "$8,500/hour", |
| "Page Load Time": "+300%", |
| "Users Impacted": "45,000", |
| "SLA Violation": "Yes", |
| "Customer Sat": "-40%" |
| }, |
| "oss_analysis": { |
| "status": "✅ ARF OSS Analysis Complete", |
| "recommendations": [ |
| "Increase Redis cache memory allocation", |
| "Implement cache warming strategy", |
| "Optimize key patterns (TTL adjustments)", |
| "Add circuit breaker for database fallback", |
| "Deploy monitoring for cache hit rate trends" |
| ], |
| "estimated_time": "60+ minutes", |
| "engineers_needed": "2-3 SREs + 1 DBA", |
| "manual_effort": "High", |
| "total_cost": "$8,500", |
| "healing_intent": "scale_out_cache" |
| }, |
| "enterprise_results": { |
| "actions_completed": [ |
| "✅ Auto-scaled Redis cluster: 4GB → 8GB", |
| "✅ Deployed intelligent cache warming service", |
| "✅ Optimized 12 key patterns with ML recommendations", |
| "✅ Implemented circuit breaker with 95% success rate", |
| "✅ Validated recovery with automated testing" |
| ], |
| "metrics_improvement": { |
| "Cache Hit Rate": "18.5% → 72%", |
| "Response Time": "1850ms → 450ms", |
| "Database Load": "92% → 45%", |
| "Throughput": "1250 → 2450 req/sec" |
| }, |
| "business_impact": { |
| "Recovery Time": "60 min → 12 min", |
| "Cost Saved": "$7,200", |
| "Users Impacted": "45,000 → 0", |
| "Revenue Protected": "$1,700", |
| "MTTR Improvement": "80% reduction" |
| } |
| } |
| }, |
| "Database Connection Pool Exhaustion": { |
| "description": "Database connection pool exhausted causing API timeouts and user failures", |
| "severity": "HIGH", |
| "metrics": { |
| "Active Connections": "98/100 (Critical)", |
| "API Latency": "2450ms", |
| "Error Rate": "15.2%", |
| "Queue Depth": "1250", |
| "Connection Wait": "45s" |
| }, |
| "impact": { |
| "Revenue Loss": "$4,200/hour", |
| "Affected Services": "API Gateway, User Service, Payment", |
| "SLA Violation": "Yes", |
| "Partner Impact": "3 external APIs" |
| } |
| }, |
| "Memory Leak in Production": { |
| "description": "Java service memory leak causing gradual performance degradation", |
| "severity": "HIGH", |
| "metrics": { |
| "Memory Usage": "96% (Critical)", |
| "GC Pause Time": "4500ms", |
| "Error Rate": "28.5%", |
| "Restart Frequency": "12/hour", |
| "Heap Fragmentation": "42%" |
| }, |
| "impact": { |
| "Revenue Loss": "$5,500/hour", |
| "Session Loss": "8,500 users", |
| "Customer Impact": "High", |
| "Support Tickets": "+300%" |
| } |
| }, |
| "API Rate Limit Exceeded": { |
| "description": "Global API rate limit exceeded causing 429 errors for external clients", |
| "severity": "MEDIUM", |
| "metrics": { |
| "429 Error Rate": "42.5%", |
| "Successful Requests": "58.3%", |
| "API Latency": "120ms", |
| "Queue Depth": "1250", |
| "Client Satisfaction": "65/100" |
| }, |
| "impact": { |
| "Revenue Loss": "$1,800/hour", |
| "Affected Partners": "8", |
| "Partner SLA Violations": "3", |
| "Business Impact": "Medium" |
| } |
| }, |
| "Microservice Cascading Failure": { |
| "description": "Order service failure causing cascading failures in dependent services", |
| "severity": "CRITICAL", |
| "metrics": { |
| "Order Failure Rate": "68.2%", |
| "Circuit Breakers Open": "4", |
| "Retry Storm Intensity": "425", |
| "Error Propagation": "85%", |
| "System Stability": "15/100" |
| }, |
| "impact": { |
| "Revenue Loss": "$25,000/hour", |
| "Abandoned Carts": "12,500", |
| "Affected Users": "75,000", |
| "Brand Damage": "High" |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| class AuditTrailManager: |
| """Manage audit trail and execution history""" |
| |
| def __init__(self) -> None: |
| self.execution_history = deque(maxlen=50) |
| self.incident_history = deque(maxlen=100) |
| self._initialize_sample_data() |
| |
| def _initialize_sample_data(self) -> None: |
| """Initialize with sample historical data""" |
| base_time = datetime.datetime.now() - datetime.timedelta(hours=2) |
| |
| |
| sample_executions = [ |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=90), |
| "Cache Miss Storm", 4, 7200, "✅ Executed", "Auto-scaled cache" |
| ), |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=75), |
| "Memory Leak", 3, 5200, "✅ Executed", "Fixed memory leak" |
| ), |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=60), |
| "API Rate Limit", 4, 2800, "✅ Executed", "Increased rate limits" |
| ), |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=45), |
| "DB Connection Pool", 4, 3800, "✅ Executed", "Scaled connection pool" |
| ), |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=30), |
| "Cascading Failure", 5, 12500, "✅ Executed", "Isolated services" |
| ), |
| self._create_execution_entry( |
| base_time - datetime.timedelta(minutes=15), |
| "Cache Miss Storm", 4, 7200, "✅ Executed", "Optimized cache" |
| ) |
| ] |
| |
| for execution in sample_executions: |
| self.execution_history.append(execution) |
| |
| |
| services = ["API Gateway", "Database", "Cache", "Auth Service", "Payment Service", |
| "Order Service", "User Service", "Session Service"] |
| |
| for _ in range(25): |
| incident_time = base_time - datetime.timedelta(minutes=random.randint(5, 120)) |
| self.incident_history.append({ |
| "timestamp": incident_time, |
| "time_str": incident_time.strftime("%H:%M"), |
| "service": random.choice(services), |
| "type": random.choice(list(INCIDENT_SCENARIOS.keys())), |
| "severity": random.randint(1, 3), |
| "description": f"{random.choice(['High latency', 'Connection failed', 'Memory spike', 'Timeout'])} on {random.choice(services)}", |
| "id": str(uuid.uuid4())[:8] |
| }) |
| |
| def _create_execution_entry(self, timestamp: datetime.datetime, scenario: str, |
| actions: int, savings: int, status: str, details: str) -> Dict[str, Any]: |
| """Create an execution history entry""" |
| return { |
| "timestamp": timestamp, |
| "time_str": timestamp.strftime("%H:%M"), |
| "scenario": scenario, |
| "actions": str(actions), |
| "savings": f"${savings:,}", |
| "status": status, |
| "details": details, |
| "id": str(uuid.uuid4())[:8] |
| } |
| |
| def add_execution(self, scenario: str, actions: List[str], |
| savings: int, approval_required: bool, details: str = "") -> Dict[str, Any]: |
| """Add new execution to history""" |
| entry = self._create_execution_entry( |
| datetime.datetime.now(), |
| scenario, |
| len(actions), |
| savings, |
| "✅ Approved & Executed" if approval_required else "✅ Auto-Executed", |
| details |
| ) |
| self.execution_history.appendleft(entry) |
| return entry |
| |
| def add_incident(self, scenario_name: str, metrics: Dict) -> Dict[str, Any]: |
| """Add incident to history""" |
| severity = 2 if "MEDIUM" in INCIDENT_SCENARIOS.get(scenario_name, {}).get("severity", "") else 3 |
| entry = { |
| "timestamp": datetime.datetime.now(), |
| "time_str": datetime.datetime.now().strftime("%H:%M"), |
| "service": "Demo System", |
| "type": scenario_name, |
| "severity": severity, |
| "description": f"Demo incident: {scenario_name}", |
| "id": str(uuid.uuid4())[:8] |
| } |
| self.incident_history.appendleft(entry) |
| return entry |
| |
| def get_execution_history_table(self, limit: int = 10) -> List[List[str]]: |
| """Get execution history for table display""" |
| return [ |
| [entry["time_str"], entry["scenario"], entry["actions"], |
| entry["status"], entry["savings"], entry["details"]] |
| for entry in list(self.execution_history)[:limit] |
| ] |
| |
| def get_incident_history_table(self, limit: int = 15) -> List[List[str]]: |
| """Get incident history for table display""" |
| return [ |
| [entry["time_str"], entry["service"], entry["type"], |
| f"{entry['severity']}/3", entry["description"]] |
| for entry in list(self.incident_history)[:limit] |
| ] |
| |
| def clear_history(self) -> Tuple[List[List[str]], List[List[str]]]: |
| """Clear all history""" |
| self.execution_history.clear() |
| self.incident_history.clear() |
| self._initialize_sample_data() |
| return self.get_execution_history_table(), self.get_incident_history_table() |
| |
| def export_audit_trail(self) -> str: |
| """Export audit trail as JSON""" |
| total_savings = 0 |
| for e in self.execution_history: |
| if "$" in e["savings"]: |
| try: |
| total_savings += int(e["savings"].replace("$", "").replace(",", "")) |
| except ValueError: |
| continue |
| |
| return json.dumps({ |
| "executions": list(self.execution_history), |
| "incidents": list(self.incident_history), |
| "exported_at": datetime.datetime.now().isoformat(), |
| "total_executions": len(self.execution_history), |
| "total_incidents": len(self.incident_history), |
| "total_savings": total_savings |
| }, indent=2, default=str) |
|
|
| |
| |
| |
|
|
| class EnhancedVisualizationEngine: |
| """Enhanced visualization engine with memory graph support""" |
| |
| @staticmethod |
| def create_incident_timeline() -> go.Figure: |
| """Create interactive incident timeline""" |
| fig = go.Figure() |
| |
| |
| now = datetime.datetime.now() |
| events = [ |
| {"time": now - datetime.timedelta(minutes=25), "event": "📉 Cache hit rate drops to 18.5%", "type": "problem"}, |
| {"time": now - datetime.timedelta(minutes=22), "event": "⚠️ Alert: Database load hits 92%", "type": "alert"}, |
| {"time": now - datetime.timedelta(minutes=20), "event": "🤖 ARF detects pattern", "type": "detection"}, |
| {"time": now - datetime.timedelta(minutes=18), "event": "🧠 Analysis: Cache Miss Storm identified", "type": "analysis"}, |
| {"time": now - datetime.timedelta(minutes=15), "event": "⚡ Healing actions executed", "type": "action"}, |
| {"time": now - datetime.timedelta(minutes=12), "event": "✅ Cache hit rate recovers to 72%", "type": "recovery"}, |
| {"time": now - datetime.timedelta(minutes=10), "event": "📊 System stabilized", "type": "stable"} |
| ] |
| |
| color_map = { |
| "problem": "red", "alert": "orange", "detection": "blue", |
| "analysis": "purple", "action": "green", "recovery": "lightgreen", |
| "stable": "darkgreen" |
| } |
| |
| for event in events: |
| fig.add_trace(go.Scatter( |
| x=[event["time"]], |
| y=[1], |
| mode='markers+text', |
| marker=dict( |
| size=15, |
| color=color_map[event["type"]], |
| symbol='circle' if event["type"] in ['problem', 'alert'] else 'diamond', |
| line=dict(width=2, color='white') |
| ), |
| text=[event["event"]], |
| textposition="top center", |
| name=event["type"].capitalize(), |
| hovertemplate="<b>%{text}</b><br>%{x|%H:%M:%S}<extra></extra>" |
| )) |
| |
| fig.update_layout( |
| title="<b>Incident Timeline - Cache Miss Storm Resolution</b>", |
| xaxis_title="Time →", |
| yaxis_title="Event Type", |
| height=450, |
| showlegend=True, |
| paper_bgcolor='rgba(0,0,0,0)', |
| plot_bgcolor='rgba(0,0,0,0)', |
| hovermode='closest', |
| xaxis=dict( |
| tickformat='%H:%M', |
| gridcolor='rgba(200,200,200,0.2)' |
| ), |
| yaxis=dict( |
| showticklabels=False, |
| gridcolor='rgba(200,200,200,0.1)' |
| ) |
| ) |
| |
| return fig |
| |
| @staticmethod |
| def create_business_dashboard() -> go.Figure: |
| """Create executive business dashboard""" |
| fig = make_subplots( |
| rows=2, cols=2, |
| subplot_titles=('Annual Cost Impact', 'Team Capacity Shift', |
| 'MTTR Comparison', 'ROI Analysis'), |
| vertical_spacing=0.15, |
| horizontal_spacing=0.15 |
| ) |
| |
| |
| categories = ['Without ARF', 'With ARF Enterprise', 'Net Savings'] |
| values = [2960000, 1000000, 1960000] |
| |
| fig.add_trace( |
| go.Bar( |
| x=categories, |
| y=values, |
| marker_color=['#FF6B6B', '#4ECDC4', '#45B7D1'], |
| text=[f'${v/1000000:.1f}M' for v in values], |
| textposition='auto', |
| name='Cost Impact' |
| ), |
| row=1, col=1 |
| ) |
| |
| |
| labels = ['Firefighting', 'Innovation', 'Strategic Work'] |
| before = [60, 20, 20] |
| after = [10, 60, 30] |
| |
| fig.add_trace( |
| go.Bar( |
| x=labels, |
| y=before, |
| name='Before ARF', |
| marker_color='#FF6B6B' |
| ), |
| row=1, col=2 |
| ) |
| |
| fig.add_trace( |
| go.Bar( |
| x=labels, |
| y=after, |
| name='After ARF Enterprise', |
| marker_color='#4ECDC4' |
| ), |
| row=1, col=2 |
| ) |
| |
| |
| mttr_categories = ['Manual', 'Traditional', 'ARF OSS', 'ARF Enterprise'] |
| mttr_values = [120, 45, 25, 8] |
| |
| fig.add_trace( |
| go.Bar( |
| x=mttr_categories, |
| y=mttr_values, |
| marker_color=['#FF6B6B', '#FFE66D', '#45B7D1', '#4ECDC4'], |
| text=[f'{v} min' for v in mttr_values], |
| textposition='auto', |
| name='MTTR' |
| ), |
| row=2, col=1 |
| ) |
| |
| |
| fig.add_trace( |
| go.Indicator( |
| mode="gauge+number+delta", |
| value=5.2, |
| title={'text': "ROI Multiplier"}, |
| delta={'reference': 1.0, 'increasing': {'color': "green"}}, |
| gauge={ |
| 'axis': {'range': [0, 10], 'tickwidth': 1}, |
| 'bar': {'color': "#4ECDC4"}, |
| 'steps': [ |
| {'range': [0, 2], 'color': "lightgray"}, |
| {'range': [2, 4], 'color': "gray"}, |
| {'range': [4, 6], 'color': "lightgreen"}, |
| {'range': [6, 10], 'color': "green"} |
| ], |
| 'threshold': { |
| 'line': {'color': "red", 'width': 4}, |
| 'thickness': 0.75, |
| 'value': 5.2 |
| } |
| } |
| ), |
| row=2, col=2 |
| ) |
| |
| fig.update_layout( |
| height=700, |
| showlegend=True, |
| paper_bgcolor='rgba(0,0,0,0)', |
| plot_bgcolor='rgba(0,0,0,0)', |
| title_text="<b>Executive Business Dashboard</b>", |
| barmode='group' |
| ) |
| |
| return fig |
| |
| @staticmethod |
| def create_execution_history_chart(audit_manager: AuditTrailManager) -> go.Figure: |
| """Create execution history visualization""" |
| executions = list(audit_manager.execution_history)[:10] |
| |
| if not executions: |
| fig = go.Figure() |
| fig.update_layout( |
| title="No execution history yet", |
| height=400, |
| paper_bgcolor='rgba(0,0,0,0)', |
| plot_bgcolor='rgba(0,0,0,0)' |
| ) |
| return fig |
| |
| |
| scenarios = [e["scenario"] for e in executions] |
| savings = [] |
| for e in executions: |
| try: |
| savings.append(int(e["savings"].replace("$", "").replace(",", ""))) |
| except ValueError: |
| savings.append(0) |
| |
| fig = go.Figure(data=[ |
| go.Bar( |
| x=scenarios, |
| y=savings, |
| marker_color='#4ECDC4', |
| text=[f'${s:,.0f}' for s in savings], |
| textposition='outside', |
| name='Cost Saved', |
| hovertemplate="<b>%{x}</b><br>Savings: %{text}<extra></extra>" |
| ) |
| ]) |
| |
| fig.update_layout( |
| title="<b>Execution History - Cost Savings</b>", |
| xaxis_title="Scenario", |
| yaxis_title="Cost Saved ($)", |
| height=500, |
| paper_bgcolor='rgba(0,0,0,0)', |
| plot_bgcolor='rgba(0,0,0,0)', |
| showlegend=False |
| ) |
| |
| return fig |
| |
| @staticmethod |
| def create_memory_graph(audit_manager: AuditTrailManager, graph_type: str = "Force Directed", |
| show_weights: bool = True, auto_layout: bool = True) -> go.Figure: |
| """Create interactive memory graph visualization""" |
| fig = go.Figure() |
| |
| |
| incidents = list(audit_manager.incident_history)[:20] |
| |
| if not incidents: |
| |
| nodes = [ |
| {"id": "Incident_1", "label": "Cache Miss", "type": "incident", "size": 20}, |
| {"id": "Action_1", "label": "Scale Cache", "type": "action", "size": 15}, |
| {"id": "Outcome_1", "label": "Resolved", "type": "outcome", "size": 15}, |
| {"id": "Component_1", "label": "Redis", "type": "component", "size": 18}, |
| ] |
| |
| edges = [ |
| {"source": "Incident_1", "target": "Action_1", "weight": 0.9, "label": "resolved_by"}, |
| {"source": "Action_1", "target": "Outcome_1", "weight": 1.0, "label": "leads_to"}, |
| {"source": "Incident_1", "target": "Component_1", "weight": 0.8, "label": "affects"}, |
| ] |
| else: |
| |
| nodes = [] |
| edges = [] |
| |
| for i, incident in enumerate(incidents): |
| node_id = f"Incident_{i}" |
| nodes.append({ |
| "id": node_id, |
| "label": incident["type"][:20], |
| "type": "incident", |
| "size": 15 + (incident.get("severity", 2) * 5), |
| "severity": incident.get("severity", 2) |
| }) |
| |
| |
| if i > 0: |
| prev_id = f"Incident_{i-1}" |
| edges.append({ |
| "source": prev_id, |
| "target": node_id, |
| "weight": 0.7, |
| "label": "related_to" |
| }) |
| |
| |
| color_map = { |
| "incident": "#FF6B6B", |
| "action": "#4ECDC4", |
| "outcome": "#45B7D1", |
| "component": "#96CEB4" |
| } |
| |
| |
| node_x = [] |
| node_y = [] |
| node_text = [] |
| node_color = [] |
| node_size = [] |
| |
| for i, node in enumerate(nodes): |
| |
| angle = 2 * np.pi * i / len(nodes) |
| radius = 1.0 |
| |
| node_x.append(radius * np.cos(angle)) |
| node_y.append(radius * np.sin(angle)) |
| node_text.append(f"{node['label']}<br>Type: {node['type']}") |
| node_color.append(color_map.get(node["type"], "#999999")) |
| node_size.append(node.get("size", 15)) |
| |
| fig.add_trace(go.Scatter( |
| x=node_x, |
| y=node_y, |
| mode='markers+text', |
| marker=dict( |
| size=node_size, |
| color=node_color, |
| line=dict(width=2, color='white') |
| ), |
| text=[node["label"] for node in nodes], |
| textposition="top center", |
| hovertext=node_text, |
| hoverinfo="text", |
| name="Nodes" |
| )) |
| |
| |
| for edge in edges: |
| try: |
| source_idx = next(i for i, n in enumerate(nodes) if n["id"] == edge["source"]) |
| target_idx = next(i for i, n in enumerate(nodes) if n["id"] == edge["target"]) |
| |
| fig.add_trace(go.Scatter( |
| x=[node_x[source_idx], node_x[target_idx], None], |
| y=[node_y[source_idx], node_y[target_idx], None], |
| mode='lines', |
| line=dict( |
| width=2 * edge.get("weight", 1.0), |
| color='rgba(100, 100, 100, 0.5)' |
| ), |
| hoverinfo='none', |
| showlegend=False |
| )) |
| except StopIteration: |
| continue |
| |
| fig.update_layout( |
| title="<b>Incident Memory Graph</b>", |
| showlegend=True, |
| height=600, |
| paper_bgcolor='rgba(0,0,0,0)', |
| plot_bgcolor='rgba(0,0,0,0)', |
| hovermode='closest', |
| xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), |
| yaxis=dict(showgrid=False, zeroline=False, showticklabels=False), |
| margin=dict(l=20, r=20, t=40, b=20) |
| ) |
| |
| return fig |
| |
| @staticmethod |
| def create_pattern_analysis_chart(analysis_data: Dict[str, Any]) -> go.Figure: |
| """Create pattern analysis visualization""" |
| fig = make_subplots( |
| rows=2, cols=2, |
| subplot_titles=('Incident Frequency', 'Resolution Times', |
| 'Success Rates', 'Pattern Correlation'), |
| vertical_spacing=0.15 |
| ) |
| |
| |
| patterns = ['Cache Issues', 'DB Connections', 'Memory Leaks', 'API Limits', 'Cascading'] |
| frequencies = [12, 8, 5, 7, 3] |
| resolution_times = [8.2, 15.5, 45.2, 5.1, 32.8] |
| success_rates = [92, 85, 78, 96, 65] |
| |
| |
| fig.add_trace( |
| go.Bar(x=patterns, y=frequencies, name='Frequency'), |
| row=1, col=1 |
| ) |
| |
| |
| fig.add_trace( |
| go.Bar(x=patterns, y=resolution_times, name='Resolution Time (min)'), |
| row=1, col=2 |
| ) |
| |
| |
| fig.add_trace( |
| go.Bar(x=patterns, y=success_rates, name='Success Rate %'), |
| row=2, col=1 |
| ) |
| |
| |
| corr_matrix = np.array([ |
| [1.0, 0.3, 0.1, 0.2, 0.05], |
| [0.3, 1.0, 0.4, 0.1, 0.25], |
| [0.1, 0.4, 1.0, 0.05, 0.6], |
| [0.2, 0.1, 0.05, 1.0, 0.1], |
| [0.05, 0.25, 0.6, 0.1, 1.0] |
| ]) |
| |
| fig.add_trace( |
| go.Heatmap(z=corr_matrix, x=patterns, y=patterns), |
| row=2, col=2 |
| ) |
| |
| fig.update_layout( |
| height=700, |
| showlegend=False, |
| title_text="<b>Pattern Analysis Dashboard</b>" |
| ) |
| |
| return fig |
|
|
| |
| |
| |
|
|
| class EnhancedBusinessLogic: |
| """Enhanced business logic with enterprise features""" |
| |
| def __init__(self, audit_manager: AuditTrailManager): |
| self.audit_manager = audit_manager |
| self.viz_engine = EnhancedVisualizationEngine() |
| self.license_info = { |
| "valid": True, |
| "customer_name": "Demo Enterprise Corp", |
| "customer_email": "demo@enterprise.com", |
| "tier": "ENTERPRISE", |
| "expires_at": "2024-12-31T23:59:59", |
| "features": ["autonomous_healing", "compliance", "audit_trail", "multi_cloud"], |
| "max_services": 100, |
| "max_incidents_per_month": 1000, |
| "status": "✅ Active" |
| } |
| self.mcp_mode = "approval" |
| self.learning_stats = { |
| "total_incidents": 127, |
| "resolved_automatically": 89, |
| "average_resolution_time": "8.2 min", |
| "success_rate": "92.1%", |
| "patterns_detected": 24, |
| "confidence_threshold": 0.85, |
| "memory_size": "4.7 MB", |
| "embeddings": 127, |
| "graph_nodes": 89, |
| "graph_edges": 245 |
| } |
| |
| def run_oss_analysis(self, scenario_name: str) -> Dict[str, Any]: |
| """Run OSS analysis""" |
| scenario = INCIDENT_SCENARIOS.get(scenario_name, {}) |
| analysis = scenario.get("oss_analysis", {}) |
| |
| if not analysis: |
| analysis = { |
| "status": "✅ Analysis Complete", |
| "recommendations": [ |
| "Increase resource allocation", |
| "Implement monitoring", |
| "Add circuit breakers", |
| "Optimize configuration" |
| ], |
| "estimated_time": "45-60 minutes", |
| "engineers_needed": "2-3", |
| "manual_effort": "Required", |
| "total_cost": "$3,000 - $8,000" |
| } |
| |
| |
| analysis["arf_context"] = { |
| "oss_available": ARF_OSS_AVAILABLE, |
| "version": "3.3.6", |
| "mode": "advisory_only", |
| "healing_intent": True |
| } |
| |
| |
| self.audit_manager.add_incident(scenario_name, scenario.get("metrics", {})) |
| |
| return analysis |
| |
| def execute_enterprise_healing(self, scenario_name: str, approval_required: bool) -> Tuple[Any, ...]: |
| """Execute enterprise healing""" |
| scenario = INCIDENT_SCENARIOS.get(scenario_name, {}) |
| results = scenario.get("enterprise_results", {}) |
| |
| |
| if not results: |
| results = { |
| "actions_completed": [ |
| "✅ Auto-scaled resources based on ARF healing intent", |
| "✅ Implemented optimization recommendations", |
| "✅ Deployed monitoring and alerting", |
| "✅ Validated recovery with automated testing" |
| ], |
| "metrics_improvement": { |
| "Performance": "Dramatically improved", |
| "Stability": "Restored", |
| "Recovery": "Complete" |
| }, |
| "business_impact": { |
| "Recovery Time": f"60 min → {random.randint(5, 15)} min", |
| "Cost Saved": f"${random.randint(2000, 10000):,}", |
| "Users Impacted": "45,000 → 0", |
| "Revenue Protected": f"${random.randint(1000, 5000):,}" |
| } |
| } |
| |
| |
| savings = 0 |
| if "Cost Saved" in results["business_impact"]: |
| try: |
| savings_str = results["business_impact"]["Cost Saved"] |
| savings = int(''.join(filter(str.isdigit, savings_str))) |
| except (ValueError, TypeError): |
| savings = random.randint(2000, 10000) |
| |
| |
| if approval_required: |
| results["status"] = "✅ Approved and Executed" |
| approval_html = self._create_approval_html(scenario_name, True) |
| else: |
| results["status"] = "✅ Auto-Executed" |
| approval_html = self._create_approval_html(scenario_name, False) |
| |
| |
| details = f"{len(results['actions_completed'])} actions executed" |
| self.audit_manager.add_execution( |
| scenario_name, |
| results["actions_completed"], |
| savings, |
| approval_required, |
| details |
| ) |
| |
| |
| results["enterprise_context"] = { |
| "approval_required": approval_required, |
| "compliance_mode": "strict", |
| "audit_trail": "created", |
| "learning_applied": True, |
| "roi_measured": True |
| } |
| |
| |
| execution_chart = self.viz_engine.create_execution_history_chart(self.audit_manager) |
| |
| return ( |
| approval_html, |
| {"approval_required": approval_required, "compliance_mode": "strict"}, |
| results, |
| execution_chart, |
| self.audit_manager.get_execution_history_table(), |
| self.audit_manager.get_incident_history_table() |
| ) |
| |
| def _create_approval_html(self, scenario_name: str, approval_required: bool) -> str: |
| """Create approval workflow HTML""" |
| if approval_required: |
| return f""" |
| <div style='padding: 20px; background: #f8f9fa; border-radius: 10px; border-left: 4px solid #007bff; margin: 10px 0;'> |
| <h4 style='margin: 0 0 10px 0;'>🛡️ Approval Required</h4> |
| <p><b>Action:</b> Scale resources for {scenario_name}</p> |
| <p><b>Risk Level:</b> Low (auto-rollback available)</p> |
| <p><b>Blast Radius:</b> Limited to affected service</p> |
| <p><b>Status:</b> ✅ <span style='color: green;'>Approved & Executed</span></p> |
| </div> |
| """ |
| else: |
| return f""" |
| <div style='padding: 20px; background: #e8f5e8; border-radius: 10px; border-left: 4px solid #28a745; margin: 10px 0;'> |
| <h4 style='margin: 0 0 10px 0;'>⚡ Auto-Executed</h4> |
| <p><b>Action:</b> Autonomous healing for {scenario_name}</p> |
| <p><b>Mode:</b> Fully autonomous (safety guardrails active)</p> |
| <p><b>Guardrails:</b> Blast radius limits, rollback ready, compliance logging</p> |
| <p><b>Status:</b> ✅ <span style='color: green;'>Successfully completed</span></p> |
| </div> |
| """ |
| |
| def calculate_roi(self, monthly_incidents: int, avg_impact: int, team_size: int) -> Dict[str, Any]: |
| """Calculate ROI""" |
| try: |
| annual_impact = monthly_incidents * 12 * avg_impact |
| team_cost = team_size * 150000 |
| savings = annual_impact * 0.82 |
| |
| roi_multiplier = savings / team_cost if team_cost > 0 else 0 |
| |
| if roi_multiplier >= 5.0: |
| recommendation = "🚀 Excellent fit for ARF Enterprise" |
| icon = "🚀" |
| elif roi_multiplier >= 2.0: |
| recommendation = "✅ Good ROI with ARF Enterprise" |
| icon = "✅" |
| elif roi_multiplier >= 1.0: |
| recommendation = "⚠️ Consider ARF OSS edition first" |
| icon = "ℹ️" |
| else: |
| recommendation = "🆓 Start with ARF OSS (free)" |
| icon = "🆓" |
| |
| payback = (team_cost / (savings / 12)) if savings > 0 else 0 |
| |
| return { |
| "analysis": { |
| "your_annual_impact": f"${annual_impact:,.0f}", |
| "your_team_cost": f"${team_cost:,.0f}", |
| "potential_savings": f"${savings:,.0f}", |
| "your_roi_multiplier": f"{roi_multiplier:.1f}×", |
| "vs_industry_average": "5.2× average ROI", |
| "recommendation": f"{icon} {recommendation}", |
| "payback_period": f"{payback:.1f} months" if savings > 0 else "N/A", |
| "annual_savings_potential": f"${savings - team_cost:,.0f}" if savings > team_cost else "$0" |
| } |
| } |
| except Exception as e: |
| return {"error": f"Calculation error: {str(e)}"} |
| |
| def validate_license(self) -> Dict[str, Any]: |
| """Validate current license""" |
| |
| self.license_info["last_validated"] = datetime.datetime.now().isoformat() |
| self.license_info["validation_code"] = f"VAL-{random.randint(1000, 9999)}" |
| return self.license_info |
| |
| def start_trial(self) -> Dict[str, Any]: |
| """Start 30-day trial""" |
| expires = datetime.datetime.now() + datetime.timedelta(days=30) |
| |
| self.license_info.update({ |
| "tier": "TRIAL", |
| "expires_at": expires.isoformat(), |
| "features": ["autonomous_healing", "compliance", "audit_trail"], |
| "max_services": 10, |
| "max_incidents_per_month": 100, |
| "status": "🆓 Trial Active", |
| "trial_started": datetime.datetime.now().isoformat(), |
| "days_remaining": 30 |
| }) |
| |
| return self.license_info |
| |
| def upgrade_license(self) -> Dict[str, Any]: |
| """Upgrade license tier""" |
| tiers = ["STARTER", "PROFESSIONAL", "ENTERPRISE", "PLATFORM"] |
| current_tier = self.license_info.get("tier", "STARTER") |
| current_idx = tiers.index(current_tier) if current_tier in tiers else 0 |
| |
| if current_idx < len(tiers) - 1: |
| new_tier = tiers[current_idx + 1] |
| self.license_info["tier"] = new_tier |
| self.license_info["status"] = f"✅ Upgraded to {new_tier}" |
| |
| return self.license_info |
| |
| def update_mcp_mode(self, mode: str) -> Dict[str, Any]: |
| """Update MCP execution mode""" |
| self.mcp_mode = mode |
| |
| |
| config = { |
| "mcp_mode": mode, |
| "approval_required": mode == "approval", |
| "autonomous": mode == "autonomous", |
| "safety_guardrails": True, |
| "rollback_enabled": True, |
| "compliance_mode": "strict" |
| } |
| |
| return config |
| |
| def get_learning_stats(self) -> Dict[str, Any]: |
| """Get current learning engine statistics""" |
| |
| total_incidents = len(self.audit_manager.incident_history) |
| resolved = len([e for e in self.audit_manager.execution_history |
| if "Executed" in e.get("status", "")]) |
| |
| if total_incidents > 0: |
| success_rate = (resolved / total_incidents) * 100 |
| self.learning_stats.update({ |
| "total_incidents": total_incidents, |
| "resolved_automatically": resolved, |
| "success_rate": f"{success_rate:.1f}%", |
| "graph_nodes": total_incidents, |
| "graph_edges": total_incidents * 2 |
| }) |
| |
| return self.learning_stats |
| |
| def update_learning_stats(self) -> Dict[str, Any]: |
| """Update and return learning stats""" |
| return self.get_learning_stats() |
| |
| def analyze_patterns(self) -> Dict[str, Any]: |
| """Analyze patterns in incident history""" |
| incidents = list(self.audit_manager.incident_history) |
| |
| if not incidents: |
| return {"status": "No incidents to analyze"} |
| |
| |
| type_counts = {} |
| severity_counts = {1: 0, 2: 0, 3: 0} |
| |
| for incident in incidents: |
| inc_type = incident.get("type", "Unknown") |
| type_counts[inc_type] = type_counts.get(inc_type, 0) + 1 |
| severity_counts[incident.get("severity", 2)] += 1 |
| |
| |
| most_common = sorted(type_counts.items(), key=lambda x: x[1], reverse=True)[:5] |
| |
| |
| total_incidents = len(incidents) |
| avg_severity = sum(severity_counts.values()) / total_incidents if total_incidents > 0 else 0 |
| |
| return { |
| "total_incidents_analyzed": total_incidents, |
| "most_common_incidents": dict(most_common), |
| "severity_distribution": severity_counts, |
| "average_severity": f"{avg_severity:.2f}", |
| "time_period_covered": "Last 24 hours", |
| "recommendations": [ |
| f"Focus on resolving {most_common[0][0]} incidents (most frequent)" if most_common else "No patterns found", |
| "Implement proactive monitoring for high-severity patterns", |
| "Review incident resolution times for optimization", |
| "Update runbooks based on frequent patterns" |
| ] |
| } |
| |
| def search_similar_incidents(self, query: str) -> List[List[str]]: |
| """Search for similar incidents""" |
| if not query.strip(): |
| return [] |
| |
| |
| incidents = list(self.audit_manager.incident_history)[:10] |
| |
| results = [] |
| for i, incident in enumerate(incidents[:5]): |
| similarity = 0.7 + (i * 0.05) |
| results.append([ |
| incident.get("type", "Unknown"), |
| f"{similarity:.0%}", |
| "✅ Resolved" if i % 2 == 0 else "⚠️ Pending", |
| f"{random.randint(1, 5)} actions" |
| ]) |
| |
| return results |
| |
| def export_graph_data(self) -> str: |
| """Export graph data""" |
| |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.json') |
| temp_file.write(json.dumps({ |
| "nodes": len(self.audit_manager.incident_history), |
| "edges": len(self.audit_manager.incident_history) * 2, |
| "exported_at": datetime.datetime.now().isoformat(), |
| "incidents": list(self.audit_manager.incident_history)[:10] |
| }, default=str).encode()) |
| temp_file.close() |
| return temp_file.name |
| |
| def export_embeddings(self) -> str: |
| """Export embeddings""" |
| |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.bin') |
| temp_file.write(b"Mock embeddings data") |
| temp_file.close() |
| return temp_file.name |
|
|
| |
| |
| |
|
|
| def create_interface() -> gr.Blocks: |
| """Create the comprehensive Gradio interface""" |
| |
| |
| audit_manager = AuditTrailManager() |
| business_logic = EnhancedBusinessLogic(audit_manager) |
| viz_engine = EnhancedVisualizationEngine() |
| |
| custom_css = """ |
| .gradio-container { max-width: 1400px !important; margin: auto !important; } |
| h1, h2, h3 { color: #1a365d !important; } |
| .critical { color: #FF6B6B !important; font-weight: bold; } |
| .success { color: #4ECDC4 !important; font-weight: bold; } |
| """ |
| |
| with gr.Blocks( |
| title="🚀 ARF Investor Demo v3.8.0", |
| theme=gr.themes.Soft(), |
| css=custom_css |
| ) as demo: |
| |
| |
| arf_status = "✅ ARF OSS v3.3.6" if ARF_OSS_AVAILABLE else "⚠️ Simulation Mode" |
| |
| gr.Markdown(f""" |
| # 🚀 Agentic Reliability Framework - Investor Demo v3.8.0 |
| ## From Cost Center to Profit Engine: 5.2× ROI with Autonomous Reliability |
| |
| <div style='color: #666; font-size: 16px; margin-top: 10px;'> |
| {arf_status} | Experience: <b>OSS (Advisory)</b> ↔ <b>Enterprise (Autonomous)</b> |
| </div> |
| """) |
| |
| |
| with gr.Tabs(): |
| |
| |
| with gr.TabItem("🔥 Live Incident Demo"): |
| with gr.Row(): |
| |
| with gr.Column(scale=1): |
| gr.Markdown("### 🎬 Incident Scenario") |
| scenario_dropdown = gr.Dropdown( |
| choices=list(INCIDENT_SCENARIOS.keys()), |
| value="Cache Miss Storm", |
| label="Select critical incident:" |
| ) |
| |
| |
| scenario_description = gr.Markdown( |
| value=INCIDENT_SCENARIOS["Cache Miss Storm"]["description"] |
| ) |
| |
| gr.Markdown("### 📊 Current Crisis Metrics") |
| metrics_display = gr.JSON( |
| value=INCIDENT_SCENARIOS["Cache Miss Storm"]["metrics"], |
| label="Live Metrics" |
| ) |
| |
| gr.Markdown("### 💰 Business Impact") |
| impact_display = gr.JSON( |
| value=INCIDENT_SCENARIOS["Cache Miss Storm"]["impact"], |
| label="Impact Analysis" |
| ) |
| |
| |
| with gr.Column(scale=2): |
| |
| gr.Markdown("### 📈 Incident Timeline") |
| timeline_output = gr.Plot() |
| |
| |
| gr.Markdown("### ⚡ Take Action") |
| with gr.Row(): |
| oss_btn = gr.Button("🆓 Run OSS Analysis", variant="secondary") |
| enterprise_btn = gr.Button("🚀 Execute Enterprise Healing", variant="primary") |
| |
| |
| with gr.Row(): |
| approval_toggle = gr.Checkbox( |
| label="🔐 Require Manual Approval", |
| value=True, |
| info="Toggle to show approval workflow vs auto-execution" |
| ) |
| demo_mode_btn = gr.Button("⚡ Quick Demo", variant="secondary", size="sm") |
| |
| |
| approval_display = gr.HTML( |
| value="<div style='padding: 10px; background: #f8f9fa; border-radius: 5px;'>Approval status will appear here</div>" |
| ) |
| |
| |
| config_display = gr.JSON( |
| label="⚙️ Enterprise Configuration", |
| value={"approval_required": True, "compliance_mode": "strict"} |
| ) |
| |
| |
| results_display = gr.JSON( |
| label="🎯 Execution Results", |
| value={"status": "Ready for execution..."} |
| ) |
| |
| |
| with gr.TabItem("💰 Business Impact & ROI"): |
| with gr.Column(): |
| |
| gr.Markdown("### 📊 Executive Business Dashboard") |
| dashboard_output = gr.Plot() |
| |
| |
| gr.Markdown("### 🧮 Interactive ROI Calculator") |
| with gr.Row(): |
| with gr.Column(scale=1): |
| monthly_slider = gr.Slider( |
| 1, 100, value=15, step=1, |
| label="Monthly incidents" |
| ) |
| impact_slider = gr.Slider( |
| 1000, 50000, value=8500, step=500, |
| label="Average incident impact ($)" |
| ) |
| team_slider = gr.Slider( |
| 1, 20, value=5, step=1, |
| label="Reliability team size" |
| ) |
| calculate_btn = gr.Button("Calculate My ROI", variant="primary") |
| |
| with gr.Column(scale=2): |
| roi_output = gr.JSON( |
| label="Your ROI Analysis", |
| value={"analysis": "Adjust sliders and click Calculate"} |
| ) |
| |
| |
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown(""" |
| **📈 ARF Enterprise ROI Metrics** |
| - **Average ROI:** 5.2× first year |
| - **Payback Period:** 2-3 months |
| - **Auto-Heal Rate:** 81.7% |
| - **MTTR Reduction:** 85% |
| - **Cost Savings:** $6.2M average annually |
| """) |
| with gr.Column(): |
| gr.Markdown(""" |
| **🎯 Business Impact** |
| - **Engineer Time:** 325+ hours reclaimed annually |
| - **SLA Compliance:** 99.9% maintained |
| - **Customer Satisfaction:** +40% improvement |
| - **Revenue Protection:** $8,500+/hour saved |
| - **Innovation Capacity:** 60% increase |
| """) |
| |
| |
| with gr.TabItem("📜 Audit Trail & History"): |
| with gr.Row(): |
| |
| with gr.Column(scale=1): |
| gr.Markdown("### 📋 Execution History (Audit Trail)") |
| |
| |
| with gr.Row(): |
| refresh_btn = gr.Button("🔄 Refresh", variant="secondary", size="sm") |
| clear_btn = gr.Button("🗑️ Clear History", variant="stop", size="sm") |
| export_btn = gr.Button("📥 Export JSON", variant="secondary", size="sm") |
| analyze_btn = gr.Button("🧠 AI Analysis", variant="primary", size="sm") |
| |
| |
| execution_table = gr.Dataframe( |
| headers=["Time", "Scenario", "Actions", "Status", "Savings", "Details"], |
| value=audit_manager.get_execution_history_table(), |
| label="📋 Execution History", |
| interactive=False, |
| wrap=True, |
| datatype=["str", "str", "str", "str", "str", "str"] |
| ) |
| |
| gr.Markdown("### 📈 Visual History") |
| execution_chart = gr.Plot() |
| |
| |
| gr.Markdown("### 🧠 AI Pattern Analysis") |
| ai_analysis = gr.JSON( |
| value={"status": "Click 'AI Analysis' to run pattern detection"}, |
| label="Pattern Recognition Results" |
| ) |
| |
| |
| with gr.Column(scale=1): |
| gr.Markdown("### 📊 Incident History") |
| |
| incident_table = gr.Dataframe( |
| headers=["Time", "Service", "Type", "Severity", "Description"], |
| value=audit_manager.get_incident_history_table(), |
| label="📊 Incident History", |
| interactive=False, |
| wrap=True |
| ) |
| |
| |
| gr.Markdown("### 🧠 Memory Graph") |
| memory_graph = gr.Plot() |
| |
| |
| gr.Markdown("### 📤 Export & Analytics") |
| export_text = gr.Textbox( |
| label="Full Audit Trail (JSON)", |
| value=audit_manager.export_audit_trail(), |
| lines=10, |
| max_lines=15 |
| ) |
| |
| with gr.Row(): |
| download_btn = gr.Button("💾 Download JSON", variant="secondary") |
| compliance_btn = gr.Button("📋 Generate Compliance Report", variant="secondary") |
|
|
| |
| with gr.TabItem("🏢 Enterprise Features"): |
| with gr.Row(): |
| |
| with gr.Column(scale=1): |
| gr.Markdown("### 🔐 License Management") |
| |
| |
| license_display = gr.JSON( |
| value=business_logic.license_info, |
| label="License Information" |
| ) |
| |
| |
| with gr.Row(): |
| validate_btn = gr.Button("🔍 Validate License", variant="secondary") |
| trial_btn = gr.Button("🆓 Start 30-Day Trial", variant="primary") |
| upgrade_btn = gr.Button("🚀 Upgrade Tier", variant="secondary") |
| |
| |
| gr.Markdown("### ⚡ Feature Matrix") |
| |
| features_data = [ |
| ["🤖 Autonomous Healing", "❌", "✅ Auto", "Enterprise Only"], |
| ["📊 Executive Dashboards", "Basic", "Advanced", "✅ Comprehensive"], |
| ["🔐 Compliance Automation", "❌", "✅", "✅ SOC2/GDPR"], |
| ["📈 Predictive Analytics", "❌", "Basic", "✅ ML-Powered"], |
| ["🔄 Auto-Remediation", "Manual", "✅ Auto", "✅ AI-Driven"], |
| ["🎯 SLA Guarantees", "❌", "❌", "✅ 99.9%"], |
| ["📊 Cost Optimization", "Basic", "Advanced", "✅ AI-Optimized"], |
| ["🔒 Role-Based Access", "❌", "✅", "✅ Granular"], |
| ["📝 Audit Trail", "Basic", "✅", "✅ Comprehensive"], |
| ["🔄 Multi-Cloud", "❌", "❌", "✅ Native"], |
| ["🧠 Learning Engine", "❌", "Basic", "✅ Continuous"], |
| ["📋 Compliance Reports", "❌", "❌", "✅ Automated"], |
| ] |
| |
| features_table = gr.Dataframe( |
| value=features_data, |
| headers=["Feature", "OSS", "Starter", "Enterprise"], |
| label="Feature Comparison Matrix", |
| interactive=False, |
| wrap=True |
| ) |
| |
| |
| with gr.Column(scale=1): |
| |
| gr.Markdown("### 📋 Compliance Status") |
| |
| compliance_status = gr.JSON( |
| value={ |
| "SOC2": {"status": "✅ Certified", "expires": "2025-06-30"}, |
| "GDPR": {"status": "✅ Compliant", "last_audit": "2024-10-15"}, |
| "HIPAA": {"status": "🟡 In Progress", "eta": "2024-12-31"}, |
| "ISO27001": {"status": "✅ Certified", "cert_id": "ISO-2024-001"}, |
| "FedRAMP": {"status": "🔄 Moderate Pending", "phase": "Assessment"}, |
| "CCPA": {"status": "✅ Compliant", "verified": True} |
| }, |
| label="Compliance Certifications" |
| ) |
| |
| |
| gr.Markdown("### 🔗 Integration Hub") |
| |
| integrations_data = [ |
| ["AWS", "CloudWatch, S3, Lambda", "✅ Connected", "Last sync: 5min ago"], |
| ["Azure", "Monitor, Log Analytics", "✅ Connected", "Last sync: 8min ago"], |
| ["GCP", "Operations, BigQuery", "✅ Connected", "Last sync: 3min ago"], |
| ["Datadog", "Metrics, Logs, APM", "✅ Connected", "Active"], |
| ["New Relic", "Full-stack", "✅ Connected", "Active"], |
| ["PagerDuty", "Incident Response", "✅ Connected", "On-call active"], |
| ["ServiceNow", "ITSM & CMDB", "✅ Connected", "Last sync: 15min ago"], |
| ["Slack", "Notifications", "✅ Connected", "#arf-alerts"], |
| ["Teams", "Notifications", "✅ Connected", "General channel"], |
| ["Jira", "Issue Tracking", "✅ Connected", "ARF project"], |
| ["GitHub", "CI/CD", "✅ Connected", "Webhooks active"], |
| ["GitLab", "CI/CD", "✅ Connected", "Pipelines active"], |
| ] |
| |
| integrations_table = gr.Dataframe( |
| value=integrations_data, |
| headers=["Platform", "Services", "Status", "Details"], |
| label="Active Integrations", |
| interactive=False, |
| wrap=True |
| ) |
| |
| |
| gr.Markdown("### ⚙️ MCP Execution Mode") |
| mcp_mode = gr.Radio( |
| choices=["advisory", "approval", "autonomous"], |
| value="approval", |
| label="Select MCP Mode:", |
| info="Controls how healing actions are executed" |
| ) |
| |
| update_mode_btn = gr.Button("🔄 Update Mode", variant="secondary") |
|
|
| |
| with gr.TabItem("🧠 Memory & Learning"): |
| with gr.Row(): |
| |
| with gr.Column(scale=2): |
| gr.Markdown("### 🧠 Incident Memory Graph") |
| |
| |
| with gr.Row(): |
| graph_type = gr.Radio( |
| choices=["Force Directed", "Hierarchical", "Timeline"], |
| value="Force Directed", |
| label="Graph Type" |
| ) |
| show_weights = gr.Checkbox(label="Show Edge Weights", value=True) |
| auto_layout = gr.Checkbox(label="Auto-Layout", value=True) |
| |
| |
| memory_graph_plot = gr.Plot() |
| |
| |
| gr.Markdown("### 🔍 Selected Node Details") |
| node_details = gr.JSON( |
| value={"select": "a node in the graph to see details"}, |
| label="Node Information" |
| ) |
| |
| |
| with gr.Column(scale=1): |
| |
| gr.Markdown("### 🔎 Similarity Search") |
| |
| search_query = gr.Textbox( |
| label="Search for similar incidents", |
| placeholder="Describe incident or paste metrics...", |
| lines=3 |
| ) |
| |
| with gr.Row(): |
| search_btn = gr.Button("🔍 Search Memory", variant="primary") |
| clear_search_btn = gr.Button("Clear", variant="secondary") |
| |
| search_results = gr.Dataframe( |
| headers=["Incident", "Similarity", "Resolution", "Actions"], |
| value=[], |
| label="Search Results", |
| interactive=False |
| ) |
| |
| |
| gr.Markdown("### 📊 Learning Engine Stats") |
| |
| learning_stats = gr.JSON( |
| value=business_logic.learning_stats, |
| label="Learning Engine Statistics" |
| ) |
| |
| |
| gr.Markdown("### 📤 Export Learning Data") |
| with gr.Row(): |
| export_graph_btn = gr.Button("💾 Export Graph", variant="secondary") |
| export_embeddings_btn = gr.Button("💾 Export Embeddings", variant="secondary") |
| |
| export_status = gr.Textbox( |
| label="Export Status", |
| value="Ready for export...", |
| interactive=False |
| ) |
|
|
| |
| |
| |
| |
| def update_scenario(scenario_name: str) -> Tuple[str, Dict, Dict, go.Figure, Dict]: |
| """Update scenario display""" |
| scenario = INCIDENT_SCENARIOS.get(scenario_name, {}) |
| return ( |
| f"### {scenario_name}\n{scenario.get('description', 'No description')}", |
| scenario.get("metrics", {}), |
| scenario.get("impact", {}), |
| viz_engine.create_incident_timeline(), |
| {"status": "Ready to analyze..."} |
| ) |
| |
| scenario_dropdown.change( |
| fn=update_scenario, |
| inputs=[scenario_dropdown], |
| outputs=[scenario_description, metrics_display, impact_display, timeline_output, results_display] |
| ) |
| |
| def run_oss_analysis(scenario_name: str) -> Tuple[Dict, List[List[str]], go.Figure]: |
| """Run OSS analysis""" |
| analysis = business_logic.run_oss_analysis(scenario_name) |
| incident_table_data = audit_manager.get_incident_history_table() |
| graph_plot = viz_engine.create_memory_graph(audit_manager) |
| |
| return analysis, incident_table_data, graph_plot |
| |
| oss_btn.click( |
| fn=run_oss_analysis, |
| inputs=[scenario_dropdown], |
| outputs=[results_display, incident_table, memory_graph] |
| ) |
| |
| def execute_healing(scenario_name: str, approval_required: bool) -> Tuple[str, Dict, Dict, go.Figure, List[List[str]], List[List[str]], Dict]: |
| """Execute enterprise healing""" |
| results = business_logic.execute_enterprise_healing(scenario_name, approval_required) |
| new_stats = business_logic.update_learning_stats() |
| |
| return results + (new_stats,) |
| |
| enterprise_btn.click( |
| fn=execute_healing, |
| inputs=[scenario_dropdown, approval_toggle], |
| outputs=[ |
| approval_display, config_display, results_display, |
| execution_chart, execution_table, incident_table, |
| learning_stats |
| ] |
| ) |
| |
| def run_quick_demo() -> Tuple[str, Dict, Dict, str, Dict, go.Figure, List[List[str]], List[List[str]], go.Figure, gr.Checkbox, Dict, go.Figure]: |
| """Run quick demo""" |
| analysis = business_logic.run_oss_analysis("Cache Miss Storm") |
| results = business_logic.execute_enterprise_healing("Cache Miss Storm", False) |
| new_stats = business_logic.update_learning_stats() |
| |
| return ( |
| "<div style='padding: 15px; background: #d4edda; border-radius: 8px; border-left: 4px solid #28a745;'>" |
| "⚡ Quick Demo Completed!<br>" |
| "1. ✅ OSS Analysis Completed<br>" |
| "2. ✅ Enterprise Healing Executed<br>" |
| "3. ✅ Audit Trail Updated<br>" |
| "4. ✅ ROI Calculated<br>" |
| "5. ✅ Learning Engine Updated" |
| "</div>", |
| {"status": "demo_completed", "mode": "autonomous"}, |
| analysis, |
| results[0], |
| results[2], |
| viz_engine.create_execution_history_chart(audit_manager), |
| audit_manager.get_execution_history_table(), |
| audit_manager.get_incident_history_table(), |
| viz_engine.create_incident_timeline(), |
| approval_toggle.update(value=False), |
| new_stats, |
| viz_engine.create_memory_graph(audit_manager) |
| ) |
| |
| demo_mode_btn.click( |
| fn=run_quick_demo, |
| outputs=[ |
| scenario_description, |
| config_display, |
| results_display, |
| approval_display, |
| results_display, |
| execution_chart, |
| execution_table, |
| incident_table, |
| timeline_output, |
| approval_toggle, |
| learning_stats, |
| memory_graph |
| ] |
| ) |
| |
| def calculate_roi(monthly_incidents: int, avg_impact: int, team_size: int) -> Dict: |
| """Calculate ROI""" |
| return business_logic.calculate_roi(monthly_incidents, avg_impact, team_size) |
| |
| calculate_btn.click( |
| fn=calculate_roi, |
| inputs=[monthly_slider, impact_slider, team_slider], |
| outputs=[roi_output] |
| ) |
| |
| |
| for slider in [monthly_slider, impact_slider, team_slider]: |
| slider.change( |
| fn=calculate_roi, |
| inputs=[monthly_slider, impact_slider, team_slider], |
| outputs=[roi_output] |
| ) |
| |
| |
| def refresh_history() -> Tuple[List[List[str]], List[List[str]], go.Figure, str, go.Figure]: |
| """Refresh history display""" |
| return ( |
| audit_manager.get_execution_history_table(), |
| audit_manager.get_incident_history_table(), |
| viz_engine.create_execution_history_chart(audit_manager), |
| audit_manager.export_audit_trail(), |
| viz_engine.create_memory_graph(audit_manager) |
| ) |
| |
| refresh_btn.click( |
| fn=refresh_history, |
| outputs=[execution_table, incident_table, execution_chart, export_text, memory_graph] |
| ) |
| |
| def clear_history() -> Tuple[List[List[str]], List[List[str]], go.Figure, str, go.Figure]: |
| """Clear history""" |
| execution_table_data, incident_table_data = audit_manager.clear_history() |
| return ( |
| execution_table_data, |
| incident_table_data, |
| viz_engine.create_execution_history_chart(audit_manager), |
| audit_manager.export_audit_trail(), |
| viz_engine.create_memory_graph(audit_manager) |
| ) |
| |
| clear_btn.click( |
| fn=clear_history, |
| outputs=[execution_table, incident_table, execution_chart, export_text, memory_graph] |
| ) |
| |
| def run_ai_analysis() -> Tuple[Dict, go.Figure]: |
| """Run AI pattern analysis""" |
| analysis = business_logic.analyze_patterns() |
| pattern_chart = viz_engine.create_pattern_analysis_chart(analysis) |
| return analysis, pattern_chart |
| |
| analyze_btn.click( |
| fn=run_ai_analysis, |
| outputs=[ai_analysis, execution_chart] |
| ) |
| |
| def update_export() -> str: |
| """Update export text""" |
| return audit_manager.export_audit_trail() |
| |
| export_btn.click( |
| fn=update_export, |
| outputs=[export_text] |
| ) |
| |
| |
| def validate_license() -> Dict: |
| """Validate license""" |
| return business_logic.validate_license() |
| |
| validate_btn.click( |
| fn=validate_license, |
| outputs=[license_display] |
| ) |
| |
| def start_trial() -> Dict: |
| """Start trial""" |
| return business_logic.start_trial() |
| |
| trial_btn.click( |
| fn=start_trial, |
| outputs=[license_display] |
| ) |
| |
| def upgrade_tier() -> Dict: |
| """Upgrade license tier""" |
| return business_logic.upgrade_license() |
| |
| upgrade_btn.click( |
| fn=upgrade_tier, |
| outputs=[license_display] |
| ) |
| |
| def update_mcp_mode(mode: str) -> Dict: |
| """Update MCP mode""" |
| return business_logic.update_mcp_mode(mode) |
| |
| update_mode_btn.click( |
| fn=update_mcp_mode, |
| inputs=[mcp_mode], |
| outputs=[config_display] |
| ) |
| |
| |
| def update_graph_view(graph_type: str, show_weights: bool, auto_layout: bool) -> go.Figure: |
| """Update graph view""" |
| return viz_engine.create_memory_graph( |
| audit_manager, |
| graph_type=graph_type, |
| show_weights=show_weights, |
| auto_layout=auto_layout |
| ) |
| |
| for control in [graph_type, show_weights, auto_layout]: |
| control.change( |
| fn=update_graph_view, |
| inputs=[graph_type, show_weights, auto_layout], |
| outputs=[memory_graph_plot] |
| ) |
| |
| def search_memory(query: str) -> Tuple[List[List[str]], str]: |
| """Search memory""" |
| if not query.strip(): |
| return [], "Enter a search query" |
| |
| results = business_logic.search_similar_incidents(query) |
| return results, f"Found {len(results)} similar incidents" |
| |
| search_btn.click( |
| fn=search_memory, |
| inputs=[search_query], |
| outputs=[search_results, export_status] |
| ) |
| |
| def clear_search() -> Tuple[List, str]: |
| """Clear search""" |
| return [], "Search cleared" |
| |
| clear_search_btn.click( |
| fn=clear_search, |
| outputs=[search_results, export_status] |
| ) |
| |
| def export_graph_data() -> str: |
| """Export graph data""" |
| export_path = business_logic.export_graph_data() |
| return f"Graph exported to: {export_path}" |
| |
| export_graph_btn.click( |
| fn=export_graph_data, |
| outputs=[export_status] |
| ) |
| |
| def export_embeddings() -> str: |
| """Export embeddings""" |
| export_path = business_logic.export_embeddings() |
| return f"Embeddings exported to: {export_path}" |
| |
| export_embeddings_btn.click( |
| fn=export_embeddings, |
| outputs=[export_status] |
| ) |
| |
| |
| |
| |
| |
| demo.load( |
| fn=lambda: ( |
| viz_engine.create_business_dashboard(), |
| viz_engine.create_incident_timeline(), |
| viz_engine.create_execution_history_chart(audit_manager), |
| viz_engine.create_memory_graph(audit_manager), |
| business_logic.get_learning_stats() |
| ), |
| outputs=[dashboard_output, timeline_output, execution_chart, memory_graph_plot, learning_stats] |
| ) |
| |
| |
| gr.Markdown(""" |
| --- |
| ### 🚀 ARF Enterprise Platform |
| |
| **Tabs Overview:** |
| 1. **🔥 Live Incident Demo** - Experience OSS vs Enterprise healing |
| 2. **💰 Business Impact & ROI** - Calculate your savings potential |
| 3. **📜 Audit Trail & History** - Complete compliance logging |
| 4. **🏢 Enterprise Features** - License & compliance management |
| 5. **🧠 Memory & Learning** - AI-powered incident memory |
| |
| **Get Started:** |
| • **Free Trial:** 30-day enterprise trial |
| • **Contact:** sales@arfinvestor.com |
| • **Docs:** docs.arfinvestor.com/enterprise |
| • **Slack:** Join 2,500+ engineers |
| |
| <div style="text-align: center; color: #666; margin-top: 20px;"> |
| <small>© 2024 Agentic Reliability Framework. Demo v3.8.0 Enterprise Edition</small> |
| </div> |
| """) |
| |
| return demo |
|
|
| |
| |
| |
|
|
| if __name__ == "__main__": |
| print("🚀 Starting ARF Ultimate Investor Demo v3.8.0 (Enterprise Edition)...") |
| print("📊 Features: 5 Tabs, Memory Graph, License Management, Compliance") |
| print("🧠 Learning Engine: Pattern Analysis, Similarity Search") |
| print("🌐 Opening web interface...") |
| |
| demo = create_interface() |
| demo.launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| share=False, |
| debug=True |
| ) |