diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,13 +1,6 @@
"""
-🚀 ARF ULTIMATE INVESTOR DEMO
-Showing OSS vs Enterprise capabilities with maximum WOW factor
-Features demonstrated:
-1. Live business impact dashboard
-2. RAG graph memory visualization
-3. Predictive failure prevention
-4. Multi-agent orchestration
-5. Compliance automation
-6. Real ROI calculation
+🚀 ARF ULTIMATE INVESTOR DEMO v3.3.7
+Enhanced with professional visualizations, export features, and data persistence
"""
import asyncio
@@ -17,8 +10,10 @@ import logging
import time
import uuid
import random
-from typing import Dict, Any, List, Optional
-from collections import defaultdict
+import base64
+import io
+from typing import Dict, Any, List, Optional, Tuple
+from collections import defaultdict, deque
import hashlib
import gradio as gr
@@ -26,6 +21,10 @@ import numpy as np
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
+from plotly.subplots import make_subplots
+import matplotlib.pyplot as plt
+from matplotlib import font_manager
+import seaborn as sns
# Import OSS components
try:
@@ -43,365 +42,671 @@ except ImportError:
logger.warning("OSS package not available")
# ============================================================================
-# BUSINESS IMPACT CALCULATIONS (Based on business.py)
+# DATA PERSISTENCE & SESSION MANAGEMENT
# ============================================================================
-class BusinessImpactCalculator:
- """Enterprise-scale business impact calculation"""
+class DemoSessionManager:
+ """Manage session data persistence and historical trends"""
def __init__(self):
- # Enterprise-scale constants
- self.BASE_REVENUE_PER_MINUTE = 5000.0 # $5K/min for enterprise
- self.BASE_USERS = 10000 # 10K active users
-
- def calculate_impact(self, scenario: Dict[str, Any]) -> Dict[str, Any]:
- """Calculate business impact for demo scenarios"""
- revenue_at_risk = scenario.get("revenue_at_risk", 0)
- users_impacted = scenario.get("users_impacted", 0)
-
- if revenue_at_risk > 1000000:
- severity = "🚨 CRITICAL"
- impact_color = "#ff4444"
- elif revenue_at_risk > 500000:
- severity = "⚠️ HIGH"
- impact_color = "#ffaa00"
- elif revenue_at_risk > 100000:
- severity = "📈 MEDIUM"
- impact_color = "#ffdd00"
- else:
- severity = "✅ LOW"
- impact_color = "#44ff44"
-
- return {
- "revenue_at_risk": f"${revenue_at_risk:,.0f}",
- "users_impacted": f"{users_impacted:,}",
- "severity": severity,
- "impact_color": impact_color,
- "time_to_resolution": f"{scenario.get('time_to_resolve', 2.3):.1f} min",
- "auto_heal_possible": scenario.get("auto_heal_possible", True),
+ self.sessions = {}
+ self.global_stats = {
+ "total_sessions": 0,
+ "total_revenue_protected": 0.0,
+ "total_executions": 0,
+ "historical_trends": deque(maxlen=100), # Last 100 data points
+ "peak_performance": {
+ "highest_roi": 0.0,
+ "fastest_mttr": float('inf'),
+ "largest_incident_resolved": 0.0,
+ }
}
-
-# ============================================================================
-# RAG GRAPH VISUALIZATION (Based on v3_reliability.py)
-# ============================================================================
-
-class RAGGraphVisualizer:
- """Visualize RAG graph memory growth"""
- def __init__(self):
- self.incidents = []
- self.outcomes = []
- self.edges = []
-
- def add_incident(self, component: str, severity: str):
- """Add an incident to the graph"""
- incident_id = f"inc_{len(self.incidents)}"
- self.incidents.append({
- "id": incident_id,
- "component": component,
- "severity": severity,
- "timestamp": time.time(),
- })
- return incident_id
+ def start_session(self, session_id: str):
+ """Start a new user session"""
+ if session_id not in self.sessions:
+ self.sessions[session_id] = {
+ "start_time": time.time(),
+ "actions": [],
+ "metrics": {},
+ "scenarios_tried": set(),
+ "roi_calculations": [],
+ "exported_reports": [],
+ }
+ self.global_stats["total_sessions"] += 1
+ return self.sessions[session_id]
- def add_outcome(self, incident_id: str, success: bool, action: str):
- """Add an outcome to the graph"""
- outcome_id = f"out_{len(self.outcomes)}"
- self.outcomes.append({
- "id": outcome_id,
- "incident_id": incident_id,
- "success": success,
- "action": action,
- "timestamp": time.time(),
- })
-
- # Add edge
- self.edges.append({
- "source": incident_id,
- "target": outcome_id,
- "type": "resolved" if success else "failed",
- })
- return outcome_id
+ def record_action(self, session_id: str, action: str, details: Dict[str, Any]):
+ """Record user action with details"""
+ if session_id in self.sessions:
+ self.sessions[session_id]["actions"].append({
+ "timestamp": time.time(),
+ "action": action,
+ "details": details,
+ })
+
+ # Update global historical trends
+ if "revenue_protected" in details:
+ self.global_stats["historical_trends"].append({
+ "timestamp": time.time(),
+ "revenue": details["revenue_protected"],
+ "session": session_id[-6:], # Last 6 chars for anonymity
+ })
+ self.global_stats["total_revenue_protected"] += details["revenue_protected"]
+
+ self.global_stats["total_executions"] += 1
+
+ # Update peak performance
+ if details.get("revenue_protected", 0) > self.global_stats["peak_performance"]["largest_incident_resolved"]:
+ self.global_stats["peak_performance"]["largest_incident_resolved"] = details["revenue_protected"]
- def get_graph_figure(self):
- """Create Plotly figure of RAG graph"""
- if not self.incidents:
+ def get_session_summary(self, session_id: str) -> Dict[str, Any]:
+ """Get summary of current session"""
+ if session_id in self.sessions:
+ session = self.sessions[session_id]
+ duration = time.time() - session["start_time"]
+
+ return {
+ "session_duration": f"{duration/60:.1f} minutes",
+ "total_actions": len(session["actions"]),
+ "scenarios_tried": len(session["scenarios_tried"]),
+ "roi_calculations": len(session["roi_calculations"]),
+ "last_action": session["actions"][-1]["action"] if session["actions"] else "None",
+ "session_id_short": session_id[-8:],
+ }
+ return {}
+
+ def get_historical_trends_chart(self):
+ """Create historical trends visualization"""
+ if not self.global_stats["historical_trends"]:
return go.Figure()
- # Prepare node data
- nodes = []
- node_colors = []
- node_sizes = []
+ # Prepare data
+ data = list(self.global_stats["historical_trends"])
+ df = pd.DataFrame(data)
- # Add incident nodes
- for inc in self.incidents:
- nodes.append({
- "x": random.random(),
- "y": random.random(),
- "label": f"{inc['component']}\n{inc['severity']}",
- "id": inc["id"],
- "type": "incident",
- })
- node_colors.append("#ff6b6b" if inc["severity"] == "critical" else "#ffa726")
- node_sizes.append(30)
+ # Create figure with subplots
+ fig = make_subplots(
+ rows=2, cols=2,
+ subplot_titles=('Revenue Protection Over Time', 'Cumulative Revenue',
+ 'Session Activity', 'Performance Metrics'),
+ specs=[[{'type': 'scatter'}, {'type': 'scatter'}],
+ [{'type': 'bar'}, {'type': 'indicator'}]],
+ vertical_spacing=0.15,
+ horizontal_spacing=0.15
+ )
- # Add outcome nodes
- for out in self.outcomes:
- nodes.append({
- "x": random.random() + 0.5, # Shift right
- "y": random.random(),
- "label": f"{out['action']}\n{'✅' if out['success'] else '❌'}",
- "id": out["id"],
- "type": "outcome",
- })
- node_colors.append("#4caf50" if out["success"] else "#f44336")
- node_sizes.append(20)
+ # Revenue over time
+ fig.add_trace(
+ go.Scatter(
+ x=df['timestamp'],
+ y=df['revenue'],
+ mode='lines+markers',
+ name='Revenue Protected',
+ line=dict(color='#4CAF50', width=3),
+ marker=dict(size=8),
+ hovertemplate='$%{y:,.0f}
%{text}',
+ text=[f"Session: {s}" for s in df['session']]
+ ),
+ row=1, col=1
+ )
- # Create figure
- fig = go.Figure()
+ # Cumulative revenue
+ cumulative_rev = df['revenue'].cumsum()
+ fig.add_trace(
+ go.Scatter(
+ x=df['timestamp'],
+ y=cumulative_rev,
+ mode='lines',
+ name='Cumulative Revenue',
+ line=dict(color='#2196F3', width=3, dash='dash'),
+ fill='tozeroy',
+ fillcolor='rgba(33, 150, 243, 0.1)'
+ ),
+ row=1, col=2
+ )
- # Add edges
- for edge in self.edges:
- source = next((n for n in nodes if n["id"] == edge["source"]), None)
- target = next((n for n in nodes if n["id"] == edge["target"]), None)
-
- if source and target:
- fig.add_trace(go.Scatter(
- x=[source["x"], target["x"]],
- y=[source["y"], target["y"]],
- mode="lines",
- line=dict(
- color="#888888",
- width=2,
- dash="dash" if edge["type"] == "failed" else "solid"
- ),
- hoverinfo="none",
- showlegend=False,
- ))
+ # Session activity (group by session)
+ session_counts = df['session'].value_counts().head(10)
+ fig.add_trace(
+ go.Bar(
+ x=session_counts.index,
+ y=session_counts.values,
+ name='Actions per Session',
+ marker_color='#FF9800',
+ hovertemplate='Session: %{x}
Actions: %{y}'
+ ),
+ row=2, col=1
+ )
- # Add nodes
- fig.add_trace(go.Scatter(
- x=[n["x"] for n in nodes],
- y=[n["y"] for n in nodes],
- mode="markers+text",
- marker=dict(
- size=node_sizes,
- color=node_colors,
- line=dict(color="white", width=2)
+ # Performance indicator
+ avg_revenue = df['revenue'].mean() if len(df) > 0 else 0
+ fig.add_trace(
+ go.Indicator(
+ mode="gauge+number+delta",
+ value=avg_revenue,
+ title={'text': "Avg Revenue/Incident"},
+ delta={'reference': 100000, 'increasing': {'color': "#4CAF50"}},
+ gauge={
+ 'axis': {'range': [None, max(500000, avg_revenue * 1.5)]},
+ 'bar': {'color': "#4CAF50"},
+ 'steps': [
+ {'range': [0, 100000], 'color': '#FFEBEE'},
+ {'range': [100000, 300000], 'color': '#FFCDD2'},
+ {'range': [300000, 500000], 'color': '#EF9A9A'}
+ ],
+ 'threshold': {
+ 'line': {'color': "red", 'width': 4},
+ 'thickness': 0.75,
+ 'value': 250000
+ }
+ }
),
- text=[n["label"] for n in nodes],
- textposition="top center",
- hovertext=[f"Type: {n['type']}" for n in nodes],
- hoverinfo="text",
- showlegend=False,
- ))
+ row=2, col=2
+ )
# Update layout
fig.update_layout(
- title="🧠 RAG Graph Memory - Learning from Incidents",
- showlegend=False,
- xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
- yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
- plot_bgcolor="white",
- height=500,
+ title="📈 Historical Performance Trends",
+ height=700,
+ showlegend=True,
+ plot_bgcolor='white',
+ paper_bgcolor='white',
)
+ # Update axes
+ fig.update_xaxes(title_text="Time", row=1, col=1)
+ fig.update_yaxes(title_text="Revenue ($)", row=1, col=1)
+ fig.update_xaxes(title_text="Time", row=1, col=2)
+ fig.update_yaxes(title_text="Cumulative Revenue ($)", row=1, col=2)
+ fig.update_xaxes(title_text="Session", row=2, col=1)
+ fig.update_yaxes(title_text="Actions", row=2, col=1)
+
return fig
-
- def get_stats(self):
- """Get graph statistics"""
- successful_outcomes = sum(1 for o in self.outcomes if o["success"])
-
- return {
- "incident_nodes": len(self.incidents),
- "outcome_nodes": len(self.outcomes),
- "edges": len(self.edges),
- "success_rate": f"{(successful_outcomes / len(self.outcomes) * 100):.1f}%" if self.outcomes else "0%",
- "patterns_learned": len(self.outcomes) // 3, # Rough estimate
- }
# ============================================================================
-# PREDICTIVE ANALYTICS (Based on predictive.py)
+# ENHANCED VISUALIZATION ENGINE
# ============================================================================
-class PredictiveVisualizer:
- """Visualize predictive analytics"""
+class EnhancedVisualizationEngine:
+ """Enhanced visualization engine with animations and interactivity"""
- def __init__(self):
- self.predictions = []
-
- def add_prediction(self, metric: str, current_value: float, predicted_value: float,
- time_to_threshold: Optional[float] = None):
- """Add a prediction"""
- self.predictions.append({
- "metric": metric,
- "current": current_value,
- "predicted": predicted_value,
- "time_to_threshold": time_to_threshold,
- "timestamp": time.time(),
- "predicted_at": datetime.datetime.now().strftime("%H:%M:%S"),
- })
+ @staticmethod
+ def create_animated_radar_chart(metrics: Dict[str, float], title: str = "Performance Radar"):
+ """Create animated radar chart with smooth transitions"""
+
+ categories = list(metrics.keys())
+ values = list(metrics.values())
+
+ # Create radar chart
+ fig = go.Figure()
+
+ fig.add_trace(go.Scatterpolar(
+ r=values,
+ theta=categories,
+ fill='toself',
+ name='Current',
+ line_color='#4CAF50',
+ opacity=0.8
+ ))
+
+ # Add ideal baseline (for comparison)
+ baseline_values = [max(values) * 0.8] * len(values)
+ fig.add_trace(go.Scatterpolar(
+ r=baseline_values,
+ theta=categories,
+ fill='toself',
+ name='Ideal Baseline',
+ line_color='#2196F3',
+ opacity=0.3
+ ))
+
+ fig.update_layout(
+ polar=dict(
+ radialaxis=dict(
+ visible=True,
+ range=[0, max(values) * 1.2]
+ )),
+ showlegend=True,
+ title=title,
+ height=400,
+ animations=[{
+ 'frame': {'duration': 500, 'redraw': True},
+ 'transition': {'duration': 300, 'easing': 'cubic-in-out'},
+ }]
+ )
+
+ return fig
- def get_predictive_timeline(self):
- """Create predictive timeline visualization"""
- if not self.predictions:
- return go.Figure()
+ @staticmethod
+ def create_heatmap_timeline(scenarios: List[Dict[str, Any]]):
+ """Create heatmap timeline of incidents"""
- # Create timeline data
- df = pd.DataFrame(self.predictions[-10:]) # Last 10 predictions
+ # Prepare data
+ severity_map = {"critical": 3, "high": 2, "medium": 1, "low": 0}
+
+ data = []
+ for i, scenario in enumerate(scenarios):
+ impact = scenario.get("business_impact", {})
+ severity_val = severity_map.get(
+ "critical" if impact.get("revenue_at_risk", 0) > 1000000 else
+ "high" if impact.get("revenue_at_risk", 0) > 500000 else
+ "medium" if impact.get("revenue_at_risk", 0) > 100000 else "low",
+ 0
+ )
+
+ data.append({
+ "Scenario": scenario.get("description", "Unknown")[:30] + "...",
+ "Revenue Risk": impact.get("revenue_at_risk", 0),
+ "Users Impacted": impact.get("users_impacted", 0),
+ "Severity": severity_val,
+ "Time to Resolve": impact.get("time_to_resolve", 0),
+ })
+
+ df = pd.DataFrame(data)
+
+ # Create heatmap
+ fig = go.Figure(data=go.Heatmap(
+ z=df[['Revenue Risk', 'Users Impacted', 'Severity', 'Time to Resolve']].values.T,
+ x=df['Scenario'],
+ y=['Revenue Risk ($)', 'Users Impacted', 'Severity Level', 'Time to Resolve (min)'],
+ colorscale='RdYlGn_r', # Red to Green (reversed for severity)
+ showscale=True,
+ hoverongaps=False,
+ hovertemplate='%{x}
%{y}: %{z}'
+ ))
+
+ fig.update_layout(
+ title="🔥 Incident Heatmap Timeline",
+ xaxis_title="Scenarios",
+ yaxis_title="Metrics",
+ height=400,
+ xaxis={'tickangle': 45},
+ )
+
+ return fig
+
+ @staticmethod
+ def create_real_time_metrics_stream():
+ """Create real-time streaming metrics visualization"""
+
+ # Generate sample streaming data
+ times = pd.date_range(start='now', periods=50, freq='1min')
+ values = np.cumsum(np.random.randn(50)) + 100
fig = go.Figure()
- # Add current values
fig.add_trace(go.Scatter(
- x=df["predicted_at"],
- y=df["current"],
- mode="lines+markers",
- name="Current",
- line=dict(color="#4caf50", width=3),
- marker=dict(size=10),
+ x=times,
+ y=values,
+ mode='lines+markers',
+ name='System Health Score',
+ line=dict(color='#2196F3', width=3),
+ marker=dict(size=6),
+ hovertemplate='Time: %{x}
Score: %{y:.1f}'
))
- # Add predicted values
- fig.add_trace(go.Scatter(
- x=df["predicted_at"],
- y=df["predicted"],
- mode="lines+markers",
- name="Predicted",
- line=dict(color="#ff9800", width=2, dash="dash"),
- marker=dict(size=8),
+ # Add threshold lines
+ fig.add_hline(y=95, line_dash="dash", line_color="green",
+ annotation_text="Optimal", annotation_position="right")
+ fig.add_hline(y=80, line_dash="dash", line_color="orange",
+ annotation_text="Warning", annotation_position="right")
+ fig.add_hline(y=70, line_dash="dash", line_color="red",
+ annotation_text="Critical", annotation_position="right")
+
+ # Add range slider
+ fig.update_layout(
+ title="📊 Real-time System Health Monitor",
+ xaxis=dict(
+ rangeselector=dict(
+ buttons=list([
+ dict(count=15, label="15m", step="minute", stepmode="backward"),
+ dict(count=1, label="1h", step="hour", stepmode="backward"),
+ dict(count=6, label="6h", step="hour", stepmode="backward"),
+ dict(step="all")
+ ])
+ ),
+ rangeslider=dict(visible=True),
+ type="date"
+ ),
+ yaxis_title="Health Score",
+ height=400,
+ showlegend=True
+ )
+
+ return fig
+
+ @staticmethod
+ def create_3d_rag_graph(incidents: List[Dict], outcomes: List[Dict], edges: List[Dict]):
+ """Create 3D visualization of RAG graph"""
+
+ if not incidents:
+ return go.Figure()
+
+ # Prepare 3D coordinates
+ np.random.seed(42) # For reproducibility
+
+ # Incident nodes (red to orange based on severity)
+ incident_coords = []
+ incident_colors = []
+ incident_sizes = []
+ incident_labels = []
+
+ for inc in incidents:
+ incident_coords.append([
+ np.random.uniform(-1, 0), # x: negative side
+ np.random.uniform(-1, 1), # y
+ np.random.uniform(0, 1) # z: incidents on bottom layer
+ ])
+
+ severity = inc.get("severity", "medium")
+ if severity == "critical":
+ incident_colors.append("#FF4444") # Bright red
+ incident_sizes.append(20)
+ elif severity == "high":
+ incident_colors.append("#FF9800") # Orange
+ incident_sizes.append(15)
+ else:
+ incident_colors.append("#FFC107") # Amber
+ incident_sizes.append(10)
+
+ incident_labels.append(f"{inc.get('component', 'Unknown')}
{severity.upper()}")
+
+ # Outcome nodes (green gradient based on success)
+ outcome_coords = []
+ outcome_colors = []
+ outcome_sizes = []
+ outcome_labels = []
+
+ for out in outcomes:
+ outcome_coords.append([
+ np.random.uniform(0, 1), # x: positive side
+ np.random.uniform(-1, 1), # y
+ np.random.uniform(0, 1) # z
+ ])
+
+ if out.get("success", False):
+ outcome_colors.append("#4CAF50") # Green
+ outcome_sizes.append(12)
+ else:
+ outcome_colors.append("#F44336") # Red
+ outcome_sizes.append(12)
+
+ outcome_labels.append(f"{out.get('action', 'Unknown')}
{'✅' if out.get('success') else '❌'}")
+
+ # Create figure
+ fig = go.Figure()
+
+ # Add incident nodes
+ fig.add_trace(go.Scatter3d(
+ x=[c[0] for c in incident_coords],
+ y=[c[1] for c in incident_coords],
+ z=[c[2] for c in incident_coords],
+ mode='markers+text',
+ marker=dict(
+ size=incident_sizes,
+ color=incident_colors,
+ symbol='circle',
+ line=dict(color='white', width=2)
+ ),
+ text=incident_labels,
+ textposition="top center",
+ name='Incidents',
+ hoverinfo='text',
))
- # Add threshold warning if applicable
- for i, row in df.iterrows():
- if row["time_to_threshold"] and row["time_to_threshold"] < 30:
- fig.add_annotation(
- x=row["predicted_at"],
- y=row["predicted"],
- text=f"⚠️ {row['time_to_threshold']:.0f} min",
- showarrow=True,
- arrowhead=2,
- arrowsize=1,
- arrowwidth=2,
- arrowcolor="#ff4444",
- font=dict(color="#ff4444", size=10),
- )
+ # Add outcome nodes
+ fig.add_trace(go.Scatter3d(
+ x=[c[0] for c in outcome_coords],
+ y=[c[1] for c in outcome_coords],
+ z=[c[2] for c in outcome_coords],
+ mode='markers+text',
+ marker=dict(
+ size=outcome_sizes,
+ color=outcome_colors,
+ symbol='diamond',
+ line=dict(color='white', width=1)
+ ),
+ text=outcome_labels,
+ textposition="top center",
+ name='Outcomes',
+ hoverinfo='text',
+ ))
+
+ # Add edges (connections)
+ edge_x, edge_y, edge_z = [], [], []
+ for edge in edges:
+ source_idx = int(edge["source"].split("_")[1]) if "_" in edge["source"] else 0
+ target_idx = int(edge["target"].split("_")[1]) if "_" in edge["target"] else 0
+
+ if source_idx < len(incident_coords) and target_idx < len(outcome_coords):
+ # Edge from incident to outcome
+ edge_x += [incident_coords[source_idx][0], outcome_coords[target_idx][0], None]
+ edge_y += [incident_coords[source_idx][1], outcome_coords[target_idx][1], None]
+ edge_z += [incident_coords[source_idx][2], outcome_coords[target_idx][2], None]
+
+ fig.add_trace(go.Scatter3d(
+ x=edge_x,
+ y=edge_y,
+ z=edge_z,
+ mode='lines',
+ line=dict(color='rgba(100, 100, 100, 0.5)', width=2),
+ hoverinfo='none',
+ showlegend=False
+ ))
# Update layout
fig.update_layout(
- title="🔮 Predictive Analytics Timeline",
- xaxis_title="Time",
- yaxis_title="Metric Value",
- hovermode="x unified",
- plot_bgcolor="white",
- height=400,
+ title="🧠 3D RAG Knowledge Graph",
+ scene=dict(
+ xaxis_title="Incidents ← → Outcomes",
+ yaxis_title="",
+ zaxis_title="Knowledge Depth",
+ camera=dict(
+ eye=dict(x=1.5, y=1.5, z=1.5)
+ ),
+ aspectmode='manual',
+ aspectratio=dict(x=2, y=1, z=1)
+ ),
+ height=600,
+ showlegend=True,
)
return fig
# ============================================================================
-# ENTERPRISE MOCK SERVER (Based on enterprise code structure)
+# EXPORT ENGINE
# ============================================================================
-class MockEnterpriseServer:
- """Mock enterprise server showing full capabilities"""
+class ExportEngine:
+ """Handle export of reports, charts, and data"""
- def __init__(self, license_key: str):
- self.license_key = license_key
- self.license_tier = self._get_license_tier(license_key)
- self.audit_trail = []
- self.learning_engine_active = True
- self.execution_stats = {
- "total_executions": 0,
- "successful_executions": 0,
- "autonomous_executions": 0,
- "approval_workflows": 0,
- "revenue_protected": 0.0,
- }
+ @staticmethod
+ def export_roi_report_as_html(roi_data: Dict[str, Any]) -> str:
+ """Export ROI report as HTML"""
- def _get_license_tier(self, license_key: str) -> str:
- """Determine license tier from key"""
- if "ENTERPRISE" in license_key:
- return "Enterprise"
- elif "PROFESSIONAL" in license_key:
- return "Professional"
- elif "TRIAL" in license_key:
- return "Trial"
- return "Starter"
+ html = f"""
+
+
+
+ ARF ROI Report - {datetime.datetime.now().strftime('%Y-%m-%d')}
+
+
+
+
+
+ 📊 Executive Summary
+
+
Investment Payback: {roi_data.get('payback_period', 'N/A')}
+ First Year ROI: {roi_data.get('first_year_roi', 'N/A')}
+
+
+ 💰 Financial Metrics
+
+ """
+
+ # Add metric cards
+ metrics_to_show = [
+ ('monthly_savings', 'Monthly Savings'),
+ ('annual_savings', 'Annual Savings'),
+ ('implementation_cost', 'Implementation Cost'),
+ ('auto_heal_rate', 'Auto-Heal Rate'),
+ ('mttr_improvement', 'MTTR Improvement'),
+ ]
+
+ for key, label in metrics_to_show:
+ if key in roi_data:
+ html += f"""
+
+
{label}
+
{roi_data[key]}
+
+ """
+
+ html += """
+
+
+ 📈 Detailed Breakdown
+
+ | Metric | Without ARF | With ARF | Improvement |
+ """
+
+ # Add comparison table
+ comparisons = [
+ ('Manual Incident Handling', '45 minutes', '2.3 minutes', '94% faster'),
+ ('Engineer Hours/Month', '250 hours', '37.5 hours', '85% reduction'),
+ ('Revenue at Risk/Month', '$450,000', '$82,350', '82% protection'),
+ ('Compliance Audit Costs', '$50,000/year', '$5,000/year', '90% savings'),
+ ]
+
+ for comp in comparisons:
+ html += f"""
+
+ | {comp[0]} |
+ {comp[1]} |
+ {comp[2]} |
+ {comp[3]} |
+
+ """
+
+ html += f"""
+
+
+
+
+
+ """
+
+ return html
- async def execute_healing(self, healing_intent: Dict[str, Any], mode: str = "autonomous") -> Dict[str, Any]:
- """Mock enterprise execution"""
- execution_id = f"exec_{uuid.uuid4().hex[:16]}"
- start_time = time.time()
-
- # Simulate execution time
- await asyncio.sleep(random.uniform(0.5, 2.0))
-
- # Determine success based on confidence
- confidence = healing_intent.get("confidence", 0.85)
- success = random.random() < confidence
-
- # Calculate simulated impact
- revenue_protected = random.randint(50000, 500000)
-
- # Update stats
- self.execution_stats["total_executions"] += 1
- if success:
- self.execution_stats["successful_executions"] += 1
- self.execution_stats["revenue_protected"] += revenue_protected
-
- if mode == "autonomous":
- self.execution_stats["autonomous_executions"] += 1
- elif mode == "approval":
- self.execution_stats["approval_workflows"] += 1
-
- # Record audit
- audit_entry = {
- "audit_id": f"audit_{uuid.uuid4().hex[:8]}",
- "timestamp": datetime.datetime.now().isoformat(),
- "action": healing_intent["action"],
- "component": healing_intent["component"],
- "mode": mode,
- "success": success,
- "revenue_protected": revenue_protected,
- "execution_time": time.time() - start_time,
- "license_tier": self.license_tier,
- }
- self.audit_trail.append(audit_entry)
-
- return {
- "execution_id": execution_id,
- "success": success,
- "message": f"✅ Successfully executed {healing_intent['action']} on {healing_intent['component']}" if success
- else f"⚠️ Execution partially failed for {healing_intent['action']}",
- "revenue_protected": revenue_protected,
- "execution_time": time.time() - start_time,
- "mode": mode,
- "license_tier": self.license_tier,
- "audit_id": audit_entry["audit_id"],
- "learning_recorded": self.learning_engine_active and success,
- }
+ @staticmethod
+ def export_compliance_report(compliance_data: Dict[str, Any], format: str = "html") -> str:
+ """Export compliance report in specified format"""
+
+ if format == "html":
+ return ExportEngine._compliance_to_html(compliance_data)
+ else:
+ # Return as JSON for other formats
+ return json.dumps(compliance_data, indent=2)
- def generate_compliance_report(self, standard: str = "SOC2") -> Dict[str, Any]:
- """Generate mock compliance report"""
- return {
- "report_id": f"compliance_{uuid.uuid4().hex[:8]}",
- "standard": standard,
- "generated_at": datetime.datetime.now().isoformat(),
- "period": "last_30_days",
- "findings": {
- "audit_trail_complete": True,
- "access_controls_enforced": True,
- "data_encrypted": True,
- "incident_response_documented": True,
- "sla_compliance": "99.95%",
- },
- "summary": f"✅ {standard} compliance requirements fully met",
- "estimated_audit_cost_savings": "$150,000",
- }
+ @staticmethod
+ def _compliance_to_html(compliance_data: Dict[str, Any]) -> str:
+ """Convert compliance data to HTML report"""
+
+ html = f"""
+
+
+
+ ARF {compliance_data.get('standard', 'Compliance')} Report
+
+
+
+
+
+ ✅ Executive Summary
+
+
{compliance_data.get('summary', 'No summary available')}
+
Estimated Audit Cost Savings: {compliance_data.get('estimated_audit_cost_savings', 'N/A')}
+
+
+ 🔍 Detailed Findings
+ """
+
+ # Add findings
+ findings = compliance_data.get('findings', {})
+ for key, value in findings.items():
+ status_class = "status-pass" if value in [True, "99.95%", "Complete"] else "status-fail"
+ display_value = "✅ PASS" if value is True else "❌ FAIL" if value is False else str(value)
+
+ html += f"""
+
+
{key.replace('_', ' ').title()}
+
{display_value}
+
+ """
+
+ html += """
+
+
+
+ """
+
+ return html
+
+ @staticmethod
+ def export_chart_as_image(fig, format: str = "png") -> bytes:
+ """Export Plotly chart as image bytes"""
+ try:
+ # For Plotly figures
+ img_bytes = fig.to_image(format=format, scale=2)
+ return img_bytes
+ except Exception as e:
+ logging.error(f"Failed to export chart: {e}")
+ # Return placeholder
+ return b""
# ============================================================================
-# DEMO SCENARIOS
+# ENHANCED DEMO SCENARIOS
# ============================================================================
ENTERPRISE_SCENARIOS = {
@@ -413,16 +718,21 @@ ENTERPRISE_SCENARIOS = {
"error_rate": 0.22,
"cpu_util": 0.95,
"memory_util": 0.88,
+ "queue_depth": 2500,
+ "throughput": 850,
},
"business_impact": {
"revenue_at_risk": 2500000,
"users_impacted": 45000,
"time_to_resolve": 2.3,
"auto_heal_possible": True,
+ "customer_satisfaction_impact": "Critical",
+ "brand_reputation_risk": "High",
},
"oss_action": "scale_out",
"enterprise_action": "autonomous_scale",
"prediction": "Database crash predicted in 8.5 minutes",
+ "visualization_type": "radar",
},
"⚡ Database Connection Pool Exhaustion": {
@@ -433,16 +743,21 @@ ENTERPRISE_SCENARIOS = {
"error_rate": 0.35,
"cpu_util": 0.78,
"memory_util": 0.98,
+ "connections": 980,
+ "deadlocks": 12,
},
"business_impact": {
"revenue_at_risk": 1200000,
"users_impacted": 12000,
"time_to_resolve": 8.5,
"auto_heal_possible": True,
+ "customer_satisfaction_impact": "High",
+ "brand_reputation_risk": "Medium",
},
"oss_action": "restart_container",
"enterprise_action": "approval_workflow",
"prediction": "Cascading failure in 3.2 minutes",
+ "visualization_type": "heatmap",
},
"🔮 Predictive Memory Leak": {
@@ -453,16 +768,21 @@ ENTERPRISE_SCENARIOS = {
"error_rate": 0.05,
"cpu_util": 0.45,
"memory_util": 0.94,
+ "cache_hit_rate": 0.12,
+ "garbage_collection": 45,
},
"business_impact": {
"revenue_at_risk": 250000,
"users_impacted": 65000,
"time_to_resolve": 0.8,
"auto_heal_possible": True,
+ "customer_satisfaction_impact": "Medium",
+ "brand_reputation_risk": "Low",
},
"oss_action": "restart_container",
"enterprise_action": "predictive_prevention",
"prediction": "Outage prevented 17 minutes before crash",
+ "visualization_type": "radar",
},
"📈 API Error Rate Spike": {
@@ -473,194 +793,415 @@ ENTERPRISE_SCENARIOS = {
"error_rate": 0.25,
"cpu_util": 0.35,
"memory_util": 0.42,
+ "requests_per_second": 4500,
+ "timeout_rate": 0.15,
},
"business_impact": {
"revenue_at_risk": 150000,
"users_impacted": 8000,
- "time_to_resolve": 45.0, # Traditional monitoring
+ "time_to_resolve": 45.0,
"auto_heal_possible": False,
+ "customer_satisfaction_impact": "Low",
+ "brand_reputation_risk": "Low",
},
"oss_action": "rollback",
"enterprise_action": "root_cause_analysis",
"prediction": "Error rate will reach 35% in 22 minutes",
+ "visualization_type": "stream",
},
-}
-
-# ============================================================================
-# LIVE DASHBOARD
-# ============================================================================
-
-class LiveDashboard:
- """Live executive dashboard"""
- def __init__(self):
- self.total_revenue_protected = 0.0
- self.total_incidents = 0
- self.auto_healed = 0
- self.engineer_hours_saved = 0
- self.start_time = time.time()
-
- def add_execution_result(self, revenue_protected: float, auto_healed: bool = True):
- """Add execution result to dashboard"""
- self.total_revenue_protected += revenue_protected
- self.total_incidents += 1
- if auto_healed:
- self.auto_healed += 1
- self.engineer_hours_saved += 2.5 # 2.5 hours saved per auto-healed incident
+ "🌐 Global CDN Outage": {
+ "description": "CDN failing across 3 regions affecting 200K users",
+ "component": "cdn-service",
+ "metrics": {
+ "latency_ms": 1200,
+ "error_rate": 0.65,
+ "cpu_util": 0.25,
+ "memory_util": 0.35,
+ "bandwidth_util": 0.98,
+ "regional_availability": 0.33,
+ },
+ "business_impact": {
+ "revenue_at_risk": 3500000,
+ "users_impacted": 200000,
+ "time_to_resolve": 15.5,
+ "auto_heal_possible": True,
+ "customer_satisfaction_impact": "Critical",
+ "brand_reputation_risk": "Critical",
+ },
+ "oss_action": "failover_regions",
+ "enterprise_action": "geo_load_balancing",
+ "prediction": "Global outage spreading to 5 regions in 12 minutes",
+ "visualization_type": "heatmap",
+ },
- def get_dashboard_data(self):
- """Get current dashboard data"""
- uptime_hours = (time.time() - self.start_time) / 3600
-
- return {
- "revenue_protected": f"${self.total_revenue_protected:,.0f}",
- "total_incidents": self.total_incidents,
- "auto_healed": self.auto_healed,
- "auto_heal_rate": f"{(self.auto_healed / self.total_incidents * 100):.1f}%" if self.total_incidents > 0 else "0%",
- "engineer_hours_saved": f"{self.engineer_hours_saved:.0f} hours",
- "avg_mttr": "2.3 minutes",
- "industry_mttr": "45 minutes",
- "improvement": "94% faster",
- "uptime": f"{uptime_hours:.1f} hours",
- "roi": "5.2×",
- }
+ "🔐 Authentication Service Failure": {
+ "description": "OAuth service failing - users cannot login",
+ "component": "auth-service",
+ "metrics": {
+ "latency_ms": 2500,
+ "error_rate": 0.85,
+ "cpu_util": 0.95,
+ "memory_util": 0.99,
+ "token_generation_rate": 5,
+ "active_sessions": 45000,
+ },
+ "business_impact": {
+ "revenue_at_risk": 1800000,
+ "users_impacted": 95000,
+ "time_to_resolve": 5.2,
+ "auto_heal_possible": True,
+ "customer_satisfaction_impact": "Critical",
+ "brand_reputation_risk": "High",
+ },
+ "oss_action": "restart_service",
+ "enterprise_action": "circuit_breaker_auto",
+ "prediction": "Complete service failure in 4.8 minutes",
+ "visualization_type": "radar",
+ },
+
+ "📊 Analytics Pipeline Crash": {
+ "description": "Real-time analytics pipeline crashed during reporting",
+ "component": "analytics-service",
+ "metrics": {
+ "latency_ms": 5000,
+ "error_rate": 0.95,
+ "cpu_util": 0.15,
+ "memory_util": 0.99,
+ "data_lag_minutes": 45,
+ "queue_backlog": 1200000,
+ },
+ "business_impact": {
+ "revenue_at_risk": 750000,
+ "users_impacted": 25000,
+ "time_to_resolve": 25.0,
+ "auto_heal_possible": True,
+ "customer_satisfaction_impact": "Medium",
+ "brand_reputation_risk": "Medium",
+ },
+ "oss_action": "restart_pipeline",
+ "enterprise_action": "data_recovery_auto",
+ "prediction": "Data loss exceeding SLA in 18 minutes",
+ "visualization_type": "stream",
+ },
+}
# ============================================================================
-# MAIN DEMO UI
+# MAIN DEMO UI - ENHANCED VERSION
# ============================================================================
-def create_ultimate_demo():
- """Create the ultimate investor demo UI"""
+def create_enhanced_demo():
+ """Create enhanced ultimate investor demo UI"""
- # Initialize components
+ # Initialize enhanced components
business_calc = BusinessImpactCalculator()
rag_visualizer = RAGGraphVisualizer()
predictive_viz = PredictiveVisualizer()
live_dashboard = LiveDashboard()
- enterprise_servers = {} # Store mock enterprise servers
+ viz_engine = EnhancedVisualizationEngine()
+ export_engine = ExportEngine()
+ session_manager = DemoSessionManager()
+ enterprise_servers = {}
- with gr.Blocks(title="🚀 ARF Ultimate Investor Demo") as demo:
+ # Generate session ID for this user
+ session_id = f"session_{uuid.uuid4().hex[:16]}"
+ session_manager.start_session(session_id)
+
+ with gr.Blocks(title="🚀 ARF Ultimate Investor Demo v3.3.7") as demo:
+ # Store session data in Gradio state
+ session_state = gr.State({
+ "session_id": session_id,
+ "current_scenario": None,
+ "exported_files": [],
+ "visualization_cache": {},
+ })
+
gr.Markdown("""
- # 🚀 Agentic Reliability Framework - Ultimate Investor Demo
- ### From Cost Center to Profit Engine: 5.2× ROI with Autonomous Reliability
+ # 🚀 Agentic Reliability Framework - Ultimate Investor Demo v3.3.7
+ ### **From Cost Center to Profit Engine: 5.2× ROI with Autonomous Reliability**
+
+
+
+
+
🎯 Live Demo Session:
+
Experience the full spectrum: OSS (Free) ↔ Enterprise (Paid)
+
+
+
+
+
+
- **Experience the full spectrum: OSS (Free) ↔ Enterprise (Paid)**
*Watch as ARF transforms reliability from a $2M cost center to a $10M profit engine*
""")
# ================================================================
- # EXECUTIVE DASHBOARD TAB
+ # ENHANCED EXECUTIVE DASHBOARD TAB
# ================================================================
- with gr.TabItem("🏢 Executive Dashboard"):
+ with gr.TabItem("🏢 Executive Dashboard", elem_id="dashboard-tab"):
gr.Markdown("""
## 📊 Real-Time Business Impact Dashboard
**Live metrics showing ARF's financial impact in enterprise deployments**
""")
- # Live metrics display
with gr.Row():
+ with gr.Column(scale=2):
+ # Enhanced metrics display with tooltips
+ with gr.Row():
+ with gr.Column(scale=1):
+ revenue_protected = gr.Markdown(
+ "### 💰 Revenue Protected\n**$0**",
+ elem_id="revenue-protected"
+ )
+ gr.HTML("""
+
+ 💡 Tooltip: Total revenue protected from potential outages
+
+ """)
+
+ with gr.Column(scale=1):
+ auto_heal_rate = gr.Markdown(
+ "### ⚡ Auto-Heal Rate\n**0%**",
+ elem_id="auto-heal-rate"
+ )
+ gr.HTML("""
+
+ 💡 Tooltip: Percentage of incidents resolved automatically
+
+ """)
+
+ with gr.Row():
+ with gr.Column(scale=1):
+ mttr_improvement = gr.Markdown(
+ "### 🚀 MTTR Improvement\n**94% faster**",
+ elem_id="mttr-improvement"
+ )
+ gr.HTML("""
+
+ 💡 Tooltip: Mean Time To Recovery improvement vs industry
+
+ """)
+
+ with gr.Column(scale=1):
+ engineer_hours = gr.Markdown(
+ "### 👷 Engineer Hours Saved\n**0 hours**",
+ elem_id="engineer-hours"
+ )
+ gr.HTML("""
+
+ 💡 Tooltip: Engineering time saved through automation
+
+ """)
+
with gr.Column(scale=1):
- revenue_protected = gr.Markdown("### 💰 Revenue Protected\n**$0**")
- with gr.Column(scale=1):
- auto_heal_rate = gr.Markdown("### ⚡ Auto-Heal Rate\n**0%**")
- with gr.Column(scale=1):
- mttr_improvement = gr.Markdown("### 🚀 MTTR Improvement\n**94% faster**")
- with gr.Column(scale=1):
- engineer_hours = gr.Markdown("### 👷 Engineer Hours Saved\n**0 hours**")
+ # Quick stats card
+ gr.Markdown("""
+ ### 📈 Session Statistics
+
+
🆔 **Session:** """ + session_id[-8:] + """
+
🕐 **Duration:** 0.0 min
+
🔥 **Incidents Handled:** 0
+
📊 **Scenarios Tried:** 0
+
+ """)
- # Live incident feed
- gr.Markdown("### 🔥 Live Incident Feed")
- incident_feed = gr.Dataframe(
- headers=["Time", "Service", "Impact", "Status", "Value Protected"],
- value=[],
- interactive=False,
+ # Real-time streaming metrics
+ gr.Markdown("### 📈 Real-time System Health Monitor")
+ real_time_metrics = gr.Plot(
+ label="",
+ elem_id="real-time-metrics"
)
- # Top customers protected
+ # Enhanced incident feed with filtering
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.Markdown("### 🔥 Live Incident Feed")
+ incident_feed = gr.Dataframe(
+ headers=["Time", "Service", "Impact", "Status", "Value Protected"],
+ value=[],
+ interactive=False,
+ elem_id="incident-feed"
+ )
+
+ with gr.Column(scale=1):
+ gr.Markdown("### 🔍 Quick Filters")
+ filter_severity = gr.Dropdown(
+ choices=["All", "Critical", "High", "Medium", "Low"],
+ value="All",
+ label="Filter by Severity"
+ )
+ filter_status = gr.Dropdown(
+ choices=["All", "Resolved", "In Progress", "Failed"],
+ value="All",
+ label="Filter by Status"
+ )
+
+ # Top customers with enhanced visualization
gr.Markdown("### 🏆 Top Customers Protected")
- customers_table = gr.Dataframe(
- headers=["Customer", "Industry", "Revenue Protected", "Uptime", "ROI"],
- value=[
- ["FinTech Corp", "Financial Services", "$2.1M", "99.99%", "8.3×"],
- ["HealthSys Inc", "Healthcare", "$1.8M", "99.995%", "Priceless"],
- ["SaaSPlatform", "SaaS", "$1.5M", "99.98%", "6.8×"],
- ["MediaStream", "Media", "$1.2M", "99.97%", "7.1×"],
- ["LogisticsPro", "Logistics", "$900K", "99.96%", "6.5×"],
- ],
- interactive=False,
- )
+ with gr.Row():
+ with gr.Column(scale=2):
+ customers_table = gr.Dataframe(
+ headers=["Customer", "Industry", "Revenue Protected", "Uptime", "ROI"],
+ value=[
+ ["FinTech Corp", "Financial Services", "$2.1M", "99.99%", "8.3×"],
+ ["HealthSys Inc", "Healthcare", "$1.8M", "99.995%", "Priceless"],
+ ["SaaSPlatform", "SaaS", "$1.5M", "99.98%", "6.8×"],
+ ["MediaStream", "Media", "$1.2M", "99.97%", "7.1×"],
+ ["LogisticsPro", "Logistics", "$900K", "99.96%", "6.5×"],
+ ],
+ interactive=False,
+ )
+
+ with gr.Column(scale=1):
+ # Customer ROI visualization
+ gr.Markdown("#### 📊 ROI Distribution")
+ roi_distribution = gr.Plot(
+ label="Customer ROI Distribution"
+ )
# ================================================================
- # LIVE WAR ROOM TAB
+ # ENHANCED LIVE WAR ROOM TAB
# ================================================================
- with gr.TabItem("🔥 Live War Room"):
+ with gr.TabItem("🔥 Live War Room", elem_id="war-room-tab"):
gr.Markdown("""
## 🔥 Multi-Incident War Room
- **Watch ARF handle 5+ simultaneous incidents across different services**
+ **Watch ARF handle 8+ simultaneous incidents across different services**
""")
with gr.Row():
with gr.Column(scale=1):
- # Scenario selector
+ # Enhanced scenario selector with search
scenario_selector = gr.Dropdown(
choices=list(ENTERPRISE_SCENARIOS.keys()),
value="🚨 Black Friday Payment Crisis",
label="🎬 Select Incident Scenario",
- info="Choose an enterprise incident scenario"
+ info="Choose an enterprise incident scenario",
+ filterable=True,
+ allow_custom_value=False,
+ )
+
+ # Scenario visualization type selector
+ viz_type = gr.Radio(
+ choices=["Radar Chart", "Heatmap", "3D Graph", "Stream"],
+ value="Radar Chart",
+ label="📊 Visualization Type",
+ info="Choose how to visualize the metrics"
)
- # Metrics display
+ # Enhanced metrics display
metrics_display = gr.JSON(
label="📊 Current Metrics",
value={},
)
- # Business impact
+ # Business impact with color coding
impact_display = gr.JSON(
label="💰 Business Impact Analysis",
value={},
)
- # OSS vs Enterprise actions
+ # Action buttons with loading states
with gr.Row():
- oss_action_btn = gr.Button("🤖 OSS: Analyze & Recommend", variant="secondary")
- enterprise_action_btn = gr.Button("🚀 Enterprise: Execute Healing", variant="primary")
+ with gr.Column(scale=1):
+ oss_action_btn = gr.Button(
+ "🤖 OSS: Analyze & Recommend",
+ variant="secondary",
+ elem_id="oss-btn"
+ )
+ oss_loading = gr.HTML("", visible=False)
+
+ with gr.Column(scale=1):
+ enterprise_action_btn = gr.Button(
+ "🚀 Enterprise: Execute Healing",
+ variant="primary",
+ elem_id="enterprise-btn"
+ )
+ enterprise_loading = gr.HTML("", visible=False)
- # Enterprise license input
- license_input = gr.Textbox(
- label="🔑 Enterprise License Key",
- value="ARF-ENT-DEMO-2024",
- info="Demo license - real enterprise requires purchase"
- )
-
- # Execution mode
- execution_mode = gr.Radio(
- choices=["autonomous", "approval"],
- value="autonomous",
- label="⚙️ Execution Mode",
- info="How to execute the healing action"
- )
+ # License and mode with tooltips
+ with gr.Accordion("⚙️ Enterprise Configuration", open=False):
+ license_input = gr.Textbox(
+ label="🔑 Enterprise License Key",
+ value="ARF-ENT-DEMO-2024",
+ info="Demo license - real enterprise requires purchase",
+ placeholder="Enter your license key..."
+ )
+
+ execution_mode = gr.Radio(
+ choices=["autonomous", "approval"],
+ value="autonomous",
+ label="⚙️ Execution Mode",
+ info="How to execute the healing action"
+ )
+
+ gr.HTML("""
+
+ 💡 Autonomous: ARF executes automatically
+ 💡 Approval: Requires human approval before execution
+
+ """)
with gr.Column(scale=2):
- # Results display
- result_display = gr.JSON(
- label="🎯 Execution Results",
- value={},
- )
+ # Enhanced results display with tabs
+ with gr.Tabs():
+ with gr.TabItem("🎯 Execution Results"):
+ result_display = gr.JSON(
+ label="",
+ value={},
+ elem_id="results-json"
+ )
+
+ with gr.TabItem("📈 Performance Analysis"):
+ performance_chart = gr.Plot(
+ label="Performance Radar Chart",
+ )
+
+ with gr.TabItem("🔥 Incident Heatmap"):
+ incident_heatmap = gr.Plot(
+ label="Incident Severity Heatmap",
+ )
- # RAG Graph Visualization
- rag_graph = gr.Plot(
- label="🧠 RAG Graph Memory Visualization",
- )
+ # Enhanced RAG Graph visualization
+ with gr.Row():
+ with gr.Column(scale=2):
+ rag_graph = gr.Plot(
+ label="🧠 RAG Graph Memory Visualization",
+ elem_id="rag-graph"
+ )
+
+ with gr.Column(scale=1):
+ # RAG Graph controls
+ gr.Markdown("#### 🎛️ Graph Controls")
+ graph_type = gr.Radio(
+ choices=["2D View", "3D View", "Network View"],
+ value="2D View",
+ label="View Type"
+ )
+ animate_graph = gr.Checkbox(
+ label="🎬 Enable Animation",
+ value=True
+ )
+ refresh_graph = gr.Button(
+ "🔄 Refresh Graph",
+ size="sm"
+ )
# Predictive Timeline
predictive_timeline = gr.Plot(
label="🔮 Predictive Analytics Timeline",
+ elem_id="predictive-timeline"
)
- # Function to update scenario
- def update_scenario(scenario_name):
+ # Function to update scenario with enhanced visualization
+ def update_scenario_enhanced(scenario_name, viz_type, session_state):
scenario = ENTERPRISE_SCENARIOS.get(scenario_name, {})
+ session_state["current_scenario"] = scenario_name
# Add to RAG graph
incident_id = rag_visualizer.add_incident(
@@ -677,105 +1218,54 @@ def create_ultimate_demo():
time_to_threshold=8.5 if "Black Friday" in scenario_name else None
)
+ # Select visualization based on type
+ if viz_type == "Radar Chart":
+ viz_fig = viz_engine.create_animated_radar_chart(
+ scenario.get("metrics", {}),
+ f"Performance Radar - {scenario_name}"
+ )
+ elif viz_type == "Heatmap":
+ viz_fig = viz_engine.create_heatmap_timeline([scenario])
+ elif viz_type == "3D Graph":
+ viz_fig = viz_engine.create_3d_rag_graph(
+ rag_visualizer.incidents,
+ rag_visualizer.outcomes,
+ rag_visualizer.edges
+ )
+ else: # Stream
+ viz_fig = viz_engine.create_real_time_metrics_stream()
+
+ # Store in cache
+ session_state["visualization_cache"][scenario_name] = viz_fig
+
return {
metrics_display: scenario.get("metrics", {}),
impact_display: business_calc.calculate_impact(scenario.get("business_impact", {})),
rag_graph: rag_visualizer.get_graph_figure(),
predictive_timeline: predictive_viz.get_predictive_timeline(),
- }
-
- # Function for OSS analysis
- async def oss_analysis(scenario_name):
- scenario = ENTERPRISE_SCENARIOS.get(scenario_name, {})
-
- return {
- result_display: {
- "status": "OSS_ADVISORY_COMPLETE",
- "action": scenario.get("oss_action", "unknown"),
- "component": scenario.get("component", "unknown"),
- "message": f"✅ OSS analysis recommends {scenario.get('oss_action')} for {scenario.get('component')}",
- "requires_enterprise": True,
- "confidence": 0.85,
- "enterprise_features_required": [
- "autonomous_execution",
- "learning_engine",
- "audit_trails",
- "compliance_reporting",
- ],
- "upgrade_url": "https://arf.dev/enterprise",
- }
- }
-
- # Function for Enterprise execution
- async def enterprise_execution(scenario_name, license_key, mode):
- scenario = ENTERPRISE_SCENARIOS.get(scenario_name, {})
-
- # Create or get enterprise server
- if license_key not in enterprise_servers:
- enterprise_servers[license_key] = MockEnterpriseServer(license_key)
-
- server = enterprise_servers[license_key]
-
- # Create healing intent
- healing_intent = {
- "action": scenario.get("enterprise_action", "unknown"),
- "component": scenario.get("component", "unknown"),
- "justification": f"Enterprise execution for {scenario_name}",
- "confidence": 0.92,
- "parameters": {"scale_factor": 3} if "scale" in scenario.get("enterprise_action", "") else {},
- }
-
- # Execute
- result = await server.execute_healing(healing_intent, mode)
-
- # Update dashboard
- live_dashboard.add_execution_result(result["revenue_protected"])
-
- # Add to RAG graph
- rag_visualizer.add_outcome(
- incident_id=f"inc_{len(rag_visualizer.incidents)-1}",
- success=result["success"],
- action=healing_intent["action"]
- )
-
- # Update dashboard displays
- dashboard_data = live_dashboard.get_dashboard_data()
-
- return {
- result_display: {
- **result,
- "rag_stats": rag_visualizer.get_stats(),
- "dashboard_update": dashboard_data,
- },
- rag_graph: rag_visualizer.get_graph_figure(),
- revenue_protected: f"### 💰 Revenue Protected\n**{dashboard_data['revenue_protected']}**",
- auto_heal_rate: f"### ⚡ Auto-Heal Rate\n**{dashboard_data['auto_heal_rate']}**",
- engineer_hours: f"### 👷 Engineer Hours Saved\n**{dashboard_data['engineer_hours_saved']}**",
+ performance_chart: viz_fig,
+ incident_heatmap: viz_engine.create_heatmap_timeline([scenario]),
+ session_state: session_state,
}
# Connect events
scenario_selector.change(
- fn=update_scenario,
- inputs=[scenario_selector],
- outputs=[metrics_display, impact_display, rag_graph, predictive_timeline]
+ fn=update_scenario_enhanced,
+ inputs=[scenario_selector, viz_type, session_state],
+ outputs=[metrics_display, impact_display, rag_graph, predictive_timeline,
+ performance_chart, incident_heatmap, session_state]
)
- oss_action_btn.click(
- fn=oss_analysis,
- inputs=[scenario_selector],
- outputs=[result_display]
- )
-
- enterprise_action_btn.click(
- fn=enterprise_execution,
- inputs=[scenario_selector, license_input, execution_mode],
- outputs=[result_display, rag_graph, revenue_protected, auto_heal_rate, engineer_hours]
+ viz_type.change(
+ fn=lambda scenario, viz_type, state: update_scenario_enhanced(scenario, viz_type, state),
+ inputs=[scenario_selector, viz_type, session_state],
+ outputs=[performance_chart, session_state]
)
# ================================================================
- # LEARNING ENGINE TAB
+ # ENHANCED LEARNING ENGINE TAB
# ================================================================
- with gr.TabItem("🧠 Learning Engine"):
+ with gr.TabItem("🧠 Learning Engine", elem_id="learning-tab"):
gr.Markdown("""
## 🧠 RAG Graph Learning Engine
**Watch ARF learn from every incident and outcome**
@@ -783,38 +1273,96 @@ def create_ultimate_demo():
with gr.Row():
with gr.Column(scale=1):
- # Learning stats
+ # Enhanced learning stats
learning_stats = gr.JSON(
label="📊 Learning Statistics",
value=rag_visualizer.get_stats(),
)
- # Simulate learning button
- simulate_learning_btn = gr.Button("🎓 Simulate Learning Cycle", variant="primary")
+ # Learning controls
+ with gr.Accordion("🎓 Learning Controls", open=True):
+ simulate_learning_btn = gr.Button(
+ "🎓 Simulate Learning Cycle",
+ variant="primary",
+ elem_id="simulate-learning"
+ )
+
+ learning_rate = gr.Slider(
+ minimum=1,
+ maximum=10,
+ value=3,
+ step=1,
+ label="Learning Cycles",
+ info="Number of incidents to simulate"
+ )
+
+ success_probability = gr.Slider(
+ minimum=0.1,
+ maximum=1.0,
+ value=0.8,
+ step=0.1,
+ label="Success Probability",
+ info="Probability of successful resolution"
+ )
- # Export knowledge button
- export_btn = gr.Button("📤 Export Learned Patterns", variant="secondary")
+ # Export section
+ with gr.Accordion("📤 Export Knowledge", open=False):
+ export_format = gr.Radio(
+ choices=["JSON", "CSV", "Graph Image"],
+ value="JSON",
+ label="Export Format"
+ )
+
+ export_btn = gr.Button(
+ "📤 Export Learned Patterns",
+ variant="secondary"
+ )
+
+ export_status = gr.HTML(
+ ""
+ "✅ Ready to export
",
+ visible=True
+ )
with gr.Column(scale=2):
- # RAG Graph visualization
- learning_graph = gr.Plot(
- label="🔗 Knowledge Graph Visualization",
- )
+ # Enhanced RAG Graph visualization
+ with gr.Tabs():
+ with gr.TabItem("🔗 2D Knowledge Graph"):
+ learning_graph_2d = gr.Plot(
+ label="",
+ )
+
+ with gr.TabItem("🌐 3D Knowledge Graph"):
+ learning_graph_3d = gr.Plot(
+ label="",
+ )
+
+ with gr.TabItem("📊 Learning Progress"):
+ learning_progress = gr.Plot(
+ label="",
+ )
- # Update learning graph
- def update_learning_graph():
+ # Update learning graphs
+ def update_learning_graphs():
return {
- learning_graph: rag_visualizer.get_graph_figure(),
+ learning_graph_2d: rag_visualizer.get_graph_figure(),
+ learning_graph_3d: viz_engine.create_3d_rag_graph(
+ rag_visualizer.incidents,
+ rag_visualizer.outcomes,
+ rag_visualizer.edges
+ ),
learning_stats: rag_visualizer.get_stats(),
+ learning_progress: viz_engine.create_real_time_metrics_stream(),
}
- # Simulate learning
- def simulate_learning():
- # Add random incidents and outcomes
- components = ["payment-service", "database", "api-service", "cache", "auth-service"]
- actions = ["scale_out", "restart_container", "rollback", "circuit_breaker"]
+ # Simulate enhanced learning
+ def simulate_enhanced_learning(cycles, success_prob, session_state):
+ components = ["payment-service", "database", "api-service", "cache", "auth-service",
+ "cdn-service", "analytics-service", "queue-service"]
+ actions = ["scale_out", "restart_container", "rollback", "circuit_breaker",
+ "failover", "load_balance", "cache_clear", "connection_pool"]
- for _ in range(3):
+ for _ in range(cycles):
component = random.choice(components)
incident_id = rag_visualizer.add_incident(
component=component,
@@ -823,27 +1371,35 @@ def create_ultimate_demo():
rag_visualizer.add_outcome(
incident_id=incident_id,
- success=random.random() > 0.2, # 80% success rate
+ success=random.random() < success_prob,
action=random.choice(actions)
)
- return update_learning_graph()
+ # Record in session
+ session_manager.record_action(
+ session_state["session_id"],
+ "simulate_learning",
+ {"cycles": cycles, "success_probability": success_prob}
+ )
+
+ return update_learning_graphs()
# Connect events
simulate_learning_btn.click(
- fn=simulate_learning,
- outputs=[learning_graph, learning_stats]
+ fn=simulate_enhanced_learning,
+ inputs=[learning_rate, success_probability, session_state],
+ outputs=[learning_graph_2d, learning_graph_3d, learning_stats, learning_progress]
)
- export_btn.click(
- fn=lambda: {"message": "✅ Knowledge patterns exported to Neo4j for persistent learning"},
- outputs=[gr.JSON(value={"message": "✅ Knowledge patterns exported"})]
+ refresh_graph.click(
+ fn=update_learning_graphs,
+ outputs=[learning_graph_2d, learning_graph_3d, learning_stats, learning_progress]
)
# ================================================================
- # COMPLIANCE AUDITOR TAB
+ # ENHANCED COMPLIANCE AUDITOR TAB
# ================================================================
- with gr.TabItem("📝 Compliance Auditor"):
+ with gr.TabItem("📝 Compliance Auditor", elem_id="compliance-tab"):
gr.Markdown("""
## 📝 Automated Compliance & Audit Trails
**Enterprise-only: Generate SOC2/GDPR/HIPAA compliance reports in seconds**
@@ -851,81 +1407,90 @@ def create_ultimate_demo():
with gr.Row():
with gr.Column(scale=1):
- # Compliance standard selector
+ # Compliance configuration
compliance_standard = gr.Dropdown(
choices=["SOC2", "GDPR", "HIPAA", "ISO27001", "PCI-DSS"],
value="SOC2",
label="📋 Compliance Standard",
+ info="Select compliance standard"
)
- # License input
compliance_license = gr.Textbox(
label="🔑 Enterprise License Required",
value="ARF-ENT-COMPLIANCE",
interactive=True,
+ placeholder="Enter compliance license key..."
)
- # Generate report button
- generate_report_btn = gr.Button("⚡ Generate Compliance Report", variant="primary")
+ # Export options
+ with gr.Accordion("📤 Export Options", open=False):
+ report_format = gr.Radio(
+ choices=["HTML Report", "JSON", "PDF Summary"],
+ value="HTML Report",
+ label="Report Format"
+ )
+
+ include_audit_trail = gr.Checkbox(
+ label="Include Audit Trail",
+ value=True
+ )
+
+ generate_report_btn = gr.Button(
+ "⚡ Generate & Export Report",
+ variant="primary",
+ elem_id="generate-report"
+ )
# Audit trail viewer
+ gr.Markdown("### 📜 Live Audit Trail")
audit_trail = gr.Dataframe(
- label="📜 Live Audit Trail",
- headers=["Time", "Action", "Component", "User", "Status"],
+ label="",
+ headers=["Time", "Action", "Component", "User", "Status", "Details"],
value=[],
)
with gr.Column(scale=2):
- # Report display
- compliance_report = gr.JSON(
- label="📄 Compliance Report",
- value={},
- )
-
- # Generate compliance report
- def generate_compliance_report(standard, license_key):
- if "ENT" not in license_key:
- return {
- compliance_report: {
- "error": "Enterprise license required",
- "message": "Compliance features require Enterprise license",
- "upgrade_url": "https://arf.dev/enterprise",
- }
- }
-
- # Create mock enterprise server
- if license_key not in enterprise_servers:
- enterprise_servers[license_key] = MockEnterpriseServer(license_key)
-
- server = enterprise_servers[license_key]
- report = server.generate_compliance_report(standard)
-
- # Update audit trail
- audit_data = []
- for entry in server.audit_trail[-10:]: # Last 10 entries
- audit_data.append([
- entry["timestamp"][11:19], # Just time
- entry["action"],
- entry["component"],
- "ARF System",
- "✅" if entry["success"] else "⚠️",
- ])
-
- return {
- compliance_report: report,
- audit_trail: audit_data,
- }
-
- generate_report_btn.click(
- fn=generate_compliance_report,
- inputs=[compliance_standard, compliance_license],
- outputs=[compliance_report, audit_trail]
- )
+ # Report display with tabs
+ with gr.Tabs():
+ with gr.TabItem("📄 Compliance Report"):
+ compliance_report = gr.JSON(
+ label="",
+ value={},
+ )
+
+ with gr.TabItem("📊 Compliance Dashboard"):
+ compliance_dashboard = gr.Plot(
+ label="Compliance Metrics Dashboard",
+ )
+
+ with gr.TabItem("🔍 Detailed Findings"):
+ findings_display = gr.HTML(
+ label="",
+ value="Select a standard and generate report
"
+ )
+
+ # Report actions
+ with gr.Row():
+ preview_report = gr.Button(
+ "👁️ Preview Report",
+ variant="secondary",
+ size="sm"
+ )
+ download_report = gr.Button(
+ "📥 Download Report",
+ variant="secondary",
+ size="sm"
+ )
+ share_report = gr.Button(
+ "🔗 Share Report",
+ variant="secondary",
+ size="sm"
+ )
# ================================================================
- # ROI CALCULATOR TAB
+ # ENHANCED ROI CALCULATOR TAB
# ================================================================
- with gr.TabItem("💰 ROI Calculator"):
+ with gr.TabItem("💰 ROI Calculator", elem_id="roi-tab"):
gr.Markdown("""
## 💰 Enterprise ROI Calculator
**Calculate your potential savings with ARF Enterprise**
@@ -933,11 +1498,16 @@ def create_ultimate_demo():
with gr.Row():
with gr.Column(scale=1):
- # Inputs
+ # Inputs with tooltips
+ gr.Markdown("### 📝 Input Your Business Metrics")
+
monthly_revenue = gr.Number(
value=1000000,
label="Monthly Revenue ($)",
- info="Your company's monthly revenue"
+ info="Your company's monthly revenue",
+ minimum=10000,
+ maximum=1000000000,
+ step=10000
)
monthly_incidents = gr.Slider(
@@ -945,7 +1515,8 @@ def create_ultimate_demo():
maximum=100,
value=20,
label="Monthly Incidents",
- info="Reliability incidents per month"
+ info="Reliability incidents per month",
+ step=1
)
team_size = gr.Slider(
@@ -953,114 +1524,241 @@ def create_ultimate_demo():
maximum=20,
value=3,
label="SRE/DevOps Team Size",
- info="Engineers handling incidents"
+ info="Engineers handling incidents",
+ step=1
)
- avg_incident_cost = gr.Number(
+ avg_incident_cost = gr.Slider(
+ minimum=100,
+ maximum=10000,
value=1500,
label="Average Incident Cost ($)",
- info="Revenue loss + engineer time per incident"
+ info="Revenue loss + engineer time per incident",
+ step=100
)
- calculate_roi_btn = gr.Button("📈 Calculate ROI", variant="primary")
+ with gr.Accordion("⚙️ Advanced Settings", open=False):
+ engineer_hourly_rate = gr.Number(
+ value=100,
+ label="Engineer Hourly Rate ($)",
+ info="Average hourly rate of engineers"
+ )
+
+ implementation_timeline = gr.Slider(
+ minimum=1,
+ maximum=12,
+ value=3,
+ label="Implementation Timeline (months)",
+ info="Time to fully implement ARF"
+ )
+
+ calculate_roi_btn = gr.Button(
+ "📈 Calculate ROI",
+ variant="primary",
+ size="lg"
+ )
with gr.Column(scale=2):
- # Results
- roi_results = gr.JSON(
- label="📊 ROI Analysis Results",
- value={},
- )
+ # Enhanced results display
+ with gr.Tabs():
+ with gr.TabItem("📊 ROI Results"):
+ roi_results = gr.JSON(
+ label="",
+ value={},
+ )
+
+ with gr.TabItem("📈 Visualization"):
+ roi_chart = gr.Plot(
+ label="",
+ )
+
+ with gr.TabItem("📋 Detailed Breakdown"):
+ roi_breakdown = gr.Dataframe(
+ label="Cost-Benefit Analysis",
+ headers=["Category", "Without ARF", "With ARF", "Savings", "ROI Impact"],
+ value=[],
+ )
+
+ # Export section
+ gr.Markdown("### 📤 Export ROI Analysis")
+ with gr.Row():
+ export_roi_html = gr.Button(
+ "🌐 Export as HTML",
+ variant="secondary"
+ )
+ export_roi_csv = gr.Button(
+ "📊 Export as CSV",
+ variant="secondary"
+ )
+ export_roi_pdf = gr.Button(
+ "📄 Export as PDF",
+ variant="secondary"
+ )
- # Visualization
- roi_chart = gr.Plot(
- label="📈 ROI Visualization",
+ export_status = gr.HTML(
+ ""
+ "📝 Ready for export
",
+ visible=True
)
+
+ # ================================================================
+ # ENHANCED ANALYTICS & EXPORT TAB
+ # ================================================================
+ with gr.TabItem("📈 Analytics & Export", elem_id="analytics-section"):
+ gr.Markdown("""
+ ## 📈 Advanced Analytics & Export Hub
+ **Deep dive into performance metrics and export professional reports**
+ """)
- # Calculate ROI
- def calculate_roi(revenue, incidents, team_size, incident_cost):
- # ARF metrics (based on real deployments)
- auto_heal_rate = 0.817 # 81.7%
- mttr_reduction = 0.94 # 94% faster
- engineer_time_savings = 0.85 # 85% less engineer time
-
- # Calculations
- manual_incidents = incidents * (1 - auto_heal_rate)
- auto_healed = incidents * auto_heal_rate
-
- # Costs without ARF
- traditional_cost = incidents * incident_cost
- engineer_cost = incidents * 2.5 * 100 * team_size # 2.5 hours at $100/hour
- total_traditional_cost = traditional_cost + engineer_cost
-
- # Costs with ARF
- arf_incident_cost = manual_incidents * incident_cost * (1 - mttr_reduction)
- arf_engineer_cost = manual_incidents * 2.5 * 100 * team_size * engineer_time_savings
- total_arf_cost = arf_incident_cost + arf_engineer_cost
-
- # Savings
- monthly_savings = total_traditional_cost - total_arf_cost
- annual_savings = monthly_savings * 12
- implementation_cost = 47500 # $47.5K implementation
-
- # ROI
- payback_months = implementation_cost / monthly_savings if monthly_savings > 0 else 999
- first_year_roi = ((annual_savings - implementation_cost) / implementation_cost) * 100
-
- # Create chart
- fig = go.Figure(data=[
- go.Bar(name='Without ARF', x=['Monthly Cost'], y=[total_traditional_cost], marker_color='#ff4444'),
- go.Bar(name='With ARF', x=['Monthly Cost'], y=[total_arf_cost], marker_color='#44ff44'),
- ])
- fig.update_layout(
- title="Monthly Cost Comparison",
- yaxis_title="Cost ($)",
- barmode='group',
- height=300,
- )
+ with gr.Row():
+ with gr.Column(scale=1):
+ # Analytics controls
+ gr.Markdown("### 📊 Analytics Controls")
+
+ analytics_timeframe = gr.Dropdown(
+ choices=["Last Hour", "Today", "Last 7 Days", "Last 30 Days", "All Time"],
+ value="Today",
+ label="Timeframe"
+ )
+
+ analytics_metric = gr.Dropdown(
+ choices=["Revenue Protected", "Incidents Handled", "Auto-Heal Rate",
+ "MTTR Improvement", "ROI", "Compliance Score"],
+ value="Revenue Protected",
+ label="Primary Metric"
+ )
+
+ refresh_analytics = gr.Button(
+ "🔄 Refresh Analytics",
+ variant="primary"
+ )
+
+ # Export all data
+ gr.Markdown("### 📤 Bulk Export")
+ with gr.Accordion("Export All Session Data", open=False):
+ export_all_format = gr.Radio(
+ choices=["JSON", "CSV", "HTML Report"],
+ value="JSON",
+ label="Export Format"
+ )
+
+ export_all_btn = gr.Button(
+ "💾 Export All Data",
+ variant="secondary"
+ )
- return {
- roi_results: {
- "monthly_revenue": f"${revenue:,.0f}",
- "monthly_incidents": incidents,
- "auto_heal_rate": f"{auto_heal_rate*100:.1f}%",
- "mttr_improvement": f"{mttr_reduction*100:.0f}%",
- "monthly_savings": f"${monthly_savings:,.0f}",
- "annual_savings": f"${annual_savings:,.0f}",
- "implementation_cost": f"${implementation_cost:,.0f}",
- "payback_period": f"{payback_months:.1f} months",
- "first_year_roi": f"{first_year_roi:.1f}%",
- "key_metrics": {
- "incidents_auto_healed": f"{auto_healed:.0f}/month",
- "engineer_hours_saved": f"{(incidents * 2.5 * engineer_time_savings):.0f} hours/month",
- "revenue_protected": f"${(incidents * incident_cost * auto_heal_rate):,.0f}/month",
- }
- },
- roi_chart: fig,
- }
+ with gr.Column(scale=2):
+ # Historical trends
+ gr.Markdown("### 📈 Historical Performance Trends")
+ historical_trends = gr.Plot(
+ label="",
+ )
+
+ # Session analytics
+ gr.Markdown("### 👤 Session Analytics")
+ session_analytics = gr.JSON(
+ label="",
+ value={},
+ )
- calculate_roi_btn.click(
- fn=calculate_roi,
- inputs=[monthly_revenue, monthly_incidents, team_size, avg_incident_cost],
- outputs=[roi_results, roi_chart]
- )
+ # Export hub
+ gr.Markdown("### 🚀 Export Hub", elem_id="export-section")
+ with gr.Row():
+ with gr.Column(scale=1):
+ export_type = gr.Dropdown(
+ choices=["ROI Report", "Compliance Report", "Incident Analysis",
+ "Performance Dashboard", "Executive Summary"],
+ value="ROI Report",
+ label="Report Type"
+ )
+
+ export_customize = gr.CheckboxGroup(
+ choices=["Include Charts", "Include Raw Data", "Add Watermark",
+ "Password Protect", "Brand Customization"],
+ value=["Include Charts"],
+ label="Customization Options"
+ )
+
+ with gr.Column(scale=2):
+ export_preview = gr.HTML(
+ ""
+ "
🚀 Export Preview
"
+ "
Select report type and customization options
"
+ "
"
+ )
+
+ with gr.Row():
+ generate_export = gr.Button(
+ "⚡ Generate Export",
+ variant="primary"
+ )
+ preview_export = gr.Button(
+ "👁️ Preview",
+ variant="secondary"
+ )
+ clear_exports = gr.Button(
+ "🗑️ Clear",
+ variant="secondary"
+ )
- # Footer
+ # ================================================================
+ # MOBILE RESPONSIVE ELEMENTS
+ # ================================================================
gr.Markdown("""
- ---
+
+
📱 Mobile Tips
+
• Use landscape mode for better visualization
+
• Tap charts to interact
+
• Swipe left/right between tabs
+
- **Ready to transform your reliability operations?**
+
+ """)
- | Capability | OSS Edition | Enterprise Edition |
- |------------|-------------|-------------------|
- | **Execution** | ❌ Advisory only | ✅ Autonomous + Approval |
- | **Learning** | ❌ No learning | ✅ Continuous learning engine |
- | **Compliance** | ❌ No audit trails | ✅ SOC2/GDPR/HIPAA compliant |
- | **Storage** | ⚠️ In-memory only | ✅ Persistent (Neo4j + PostgreSQL) |
- | **Support** | ❌ Community | ✅ 24/7 Enterprise support |
- | **ROI** | ❌ None | ✅ **5.2× average first year ROI** |
+ # ================================================================
+ # ENHANCED FOOTER WITH EXPORT LINKS
+ # ================================================================
+ gr.Markdown("""
+ ---
+
+
+
+
+
🚀 Ready to transform your reliability operations?
+
Capability Comparison:
+
+ | Capability | OSS Edition | Enterprise Edition |
+ | Execution | ❌ Advisory only | ✅ Autonomous + Approval |
+ | Learning | ❌ No learning | ✅ Continuous learning engine |
+ | Compliance | ❌ No audit trails | ✅ SOC2/GDPR/HIPAA compliant |
+ | Storage | ⚠️ In-memory only | ✅ Persistent (Neo4j + PostgreSQL) |
+ | Support | ❌ Community | ✅ 24/7 Enterprise support |
+ | ROI | ❌ None | ✅ 5.2× average first year ROI |
+
+
+
+
+
+
- **Contact:** enterprise@petterjuan.com | **Website:** https://arf.dev
- **Documentation:** https://docs.arf.dev | **GitHub:** https://github.com/petterjuan/agentic-reliability-framework
+
+
🚀 ARF Ultimate Investor Demo v3.3.7 | Enhanced with Professional Analytics & Export Features
+
Built with ❤️ using Gradio & Plotly | Session started at """ +
+ datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + """
+
""")
return demo
@@ -1075,16 +1773,17 @@ def main():
logger = logging.getLogger(__name__)
logger.info("=" * 80)
- logger.info("🚀 Starting ARF Ultimate Investor Demo")
+ logger.info("🚀 Starting ARF Ultimate Investor Demo v3.3.7")
logger.info("=" * 80)
- demo = create_ultimate_demo()
+ demo = create_enhanced_demo()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True,
- theme="soft"
+ theme="soft",
+ favicon_path=None,
)
if __name__ == "__main__":