petter2025's picture
Update app.py
3e50ac5 verified
raw
history blame
16.1 kB
import os
import json
import numpy as np
import gradio as gr
import requests
import pandas as pd
import datetime
from typing import List, Dict, Any
import hashlib
# Import our new modules
from models import ReliabilityEvent, EventSeverity, AnomalyResult, HealingAction
from healing_policies import PolicyEngine
# === Configuration ===
HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
HF_API_URL = "https://router.huggingface.co/hf-inference/v1/completions"
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
# === FAISS & Embeddings Setup ===
try:
from sentence_transformers import SentenceTransformer
import faiss
VECTOR_DIM = 384
INDEX_FILE = "incident_vectors.index"
TEXTS_FILE = "incident_texts.json"
# Try to load model with error handling
try:
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
except Exception as e:
print(f"Model loading warning: {e}")
# Fallback to direct loading
from sentence_transformers import SentenceTransformer as ST
model = ST("sentence-transformers/all-MiniLM-L6-v2")
if os.path.exists(INDEX_FILE):
index = faiss.read_index(INDEX_FILE)
with open(TEXTS_FILE, "r") as f:
incident_texts = json.load(f)
else:
index = faiss.IndexFlatL2(VECTOR_DIM)
incident_texts = []
except ImportError as e:
print(f"Warning: FAISS or SentenceTransformers not available: {e}")
index = None
incident_texts = []
model = None
def save_index():
"""Save FAISS index and incident texts"""
if index is not None:
faiss.write_index(index, INDEX_FILE)
with open(TEXTS_FILE, "w") as f:
json.dump(incident_texts, f)
# === Core Engine Components ===
policy_engine = PolicyEngine()
events_history: List[ReliabilityEvent] = []
class BusinessImpactCalculator:
"""Calculate business impact of anomalies"""
def __init__(self, revenue_per_request: float = 0.01):
self.revenue_per_request = revenue_per_request
def calculate_impact(self, event: ReliabilityEvent, duration_minutes: int = 5) -> Dict[str, Any]:
"""Enhanced business impact calculation"""
# More realistic impact calculation
base_revenue_per_minute = 100 # Base revenue per minute for the service
# Calculate impact based on severity of anomalies
impact_multiplier = 1.0
if event.latency_p99 > 300:
impact_multiplier += 0.5 # High latency impact
if event.error_rate > 0.1:
impact_multiplier += 0.8 # High error rate impact
if event.cpu_util and event.cpu_util > 0.9:
impact_multiplier += 0.3 # Resource exhaustion impact
revenue_loss = base_revenue_per_minute * impact_multiplier * (duration_minutes / 60)
# More realistic user impact
base_users_affected = 1000 # Base user count
user_impact_multiplier = (event.error_rate * 10) + (max(0, event.latency_p99 - 100) / 500)
affected_users = int(base_users_affected * user_impact_multiplier)
# Severity classification
if revenue_loss > 500 or affected_users > 5000:
severity = "CRITICAL"
elif revenue_loss > 100 or affected_users > 1000:
severity = "HIGH"
elif revenue_loss > 50 or affected_users > 500:
severity = "MEDIUM"
else:
severity = "LOW"
return {
'revenue_loss_estimate': round(revenue_loss, 2),
'affected_users_estimate': affected_users,
'severity_level': severity,
'throughput_reduction_pct': round(min(100, user_impact_multiplier * 100), 1)
}
business_calculator = BusinessImpactCalculator()
class AdvancedAnomalyDetector:
"""Enhanced anomaly detection with adaptive thresholds"""
def __init__(self):
self.historical_data = []
self.adaptive_thresholds = {
'latency_p99': 150, # Will adapt based on history
'error_rate': 0.05
}
def detect_anomaly(self, event: ReliabilityEvent) -> bool:
"""Enhanced anomaly detection with adaptive thresholds"""
# Basic threshold checks
latency_anomaly = event.latency_p99 > self.adaptive_thresholds['latency_p99']
error_anomaly = event.error_rate > self.adaptive_thresholds['error_rate']
# Resource-based anomalies
resource_anomaly = False
if event.cpu_util and event.cpu_util > 0.9:
resource_anomaly = True
if event.memory_util and event.memory_util > 0.9:
resource_anomaly = True
# Update adaptive thresholds (simplified)
self._update_thresholds(event)
return latency_anomaly or error_anomaly or resource_anomaly
def _update_thresholds(self, event: ReliabilityEvent):
"""Update adaptive thresholds based on historical data"""
self.historical_data.append(event)
# Keep only recent history
if len(self.historical_data) > 100:
self.historical_data.pop(0)
# Update latency threshold to 90th percentile of recent data
if len(self.historical_data) > 10:
recent_latencies = [e.latency_p99 for e in self.historical_data[-20:]]
self.adaptive_thresholds['latency_p99'] = np.percentile(recent_latencies, 90)
anomaly_detector = AdvancedAnomalyDetector()
def call_huggingface_analysis(prompt: str) -> str:
"""Use HF Inference API or fallback simulation"""
if not HF_TOKEN:
# Enhanced fallback analysis
fallback_insights = [
"High latency detected - possible resource contention or network issues",
"Error rate increase suggests recent deployment instability",
"Latency spike correlates with increased user traffic patterns",
"Intermittent failures indicate potential dependency service degradation",
"Performance degradation detected - consider scaling compute resources"
]
import random
return random.choice(fallback_insights)
try:
enhanced_prompt = f"""
As a senior reliability engineer, analyze this telemetry event and provide a concise root cause analysis:
{prompt}
Focus on:
- Potential infrastructure or application issues
- Correlation between metrics
- Business impact assessment
- Recommended investigation areas
Provide 1-2 sentences maximum with actionable insights.
"""
payload = {
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"prompt": enhanced_prompt,
"max_tokens": 150,
"temperature": 0.4,
}
response = requests.post(HF_API_URL, headers=HEADERS, json=payload, timeout=15)
if response.status_code == 200:
result = response.json()
analysis_text = result.get("choices", [{}])[0].get("text", "").strip()
if analysis_text and len(analysis_text) > 10:
return analysis_text.split('\n')[0]
return analysis_text
else:
return f"API Error {response.status_code}: Service temporarily unavailable"
except Exception as e:
return f"Analysis service error: {str(e)}"
def analyze_event(component: str, latency: float, error_rate: float,
throughput: float = 1000, cpu_util: float = None,
memory_util: float = None) -> Dict[str, Any]:
"""Main event analysis function"""
# Create enhanced event
event = ReliabilityEvent(
component=component,
latency_p99=latency,
error_rate=error_rate,
throughput=throughput,
cpu_util=cpu_util,
memory_util=memory_util,
upstream_deps=["auth-service", "database"] if component == "api-service" else []
)
# Detect anomaly
is_anomaly = anomaly_detector.detect_anomaly(event)
event.severity = EventSeverity.HIGH if is_anomaly else EventSeverity.LOW
# Build analysis prompt
prompt = (
f"Component: {component}\nLatency: {latency:.2f}ms\nError Rate: {error_rate:.3f}\n"
f"Throughput: {throughput:.0f}\nCPU: {cpu_util or 'N/A'}\nMemory: {memory_util or 'N/A'}\n"
f"Status: {'ANOMALY' if is_anomaly else 'NORMAL'}\n\n"
"Provide a one-line reliability insight or root cause analysis."
)
# Get AI analysis
analysis = call_huggingface_analysis(prompt)
# Evaluate healing policies
healing_actions = policy_engine.evaluate_policies(event)
# Calculate business impact
business_impact = business_calculator.calculate_impact(event) if is_anomaly else None
# Vector memory learning
if index is not None and is_anomaly:
vector_text = f"{component} {latency} {error_rate} {analysis}"
vec = model.encode([vector_text])
index.add(np.array(vec, dtype=np.float32))
incident_texts.append(vector_text)
save_index()
# Prepare result
result = {
"timestamp": event.timestamp,
"component": component,
"latency_p99": latency,
"error_rate": error_rate,
"throughput": throughput,
"status": "ANOMALY" if is_anomaly else "NORMAL",
"analysis": analysis,
"healing_actions": [action.value for action in healing_actions],
"business_impact": business_impact,
"severity": event.severity.value,
"similar_incidents_count": len(incident_texts) if is_anomaly else 0
}
events_history.append(event)
return result
# === Gradio UI ===
def submit_event(component, latency, error_rate, throughput, cpu_util, memory_util):
"""Handle event submission from UI"""
try:
# Convert inputs
latency = float(latency)
error_rate = float(error_rate)
throughput = float(throughput) if throughput else 1000
cpu_util = float(cpu_util) if cpu_util else None
memory_util = float(memory_util) if memory_util else None
result = analyze_event(component, latency, error_rate, throughput, cpu_util, memory_util)
# Prepare table data
table_data = []
for event in events_history[-15:]:
table_data.append([
event.timestamp[:19], # Trim microseconds
event.component,
event.latency_p99,
f"{event.error_rate:.3f}",
event.throughput,
event.severity.value.upper(),
getattr(event, 'analysis', 'N/A')[:50] + "..." if getattr(event, 'analysis', 'N/A') else 'N/A'
])
# Format output message
status_emoji = "🚨" if result["status"] == "ANOMALY" else "βœ…"
output_msg = f"{status_emoji} {result['status']} - {result['analysis']}"
if result["business_impact"]:
impact = result["business_impact"]
output_msg += f"\nπŸ’° Business Impact: ${impact['revenue_loss_estimate']} | πŸ‘₯ {impact['affected_users_estimate']} users | 🚨 {impact['severity_level']}"
if result["healing_actions"]:
actions = ", ".join(result["healing_actions"])
output_msg += f"\nπŸ”§ Auto-Actions: {actions}"
return (
output_msg,
gr.Dataframe(
headers=["Timestamp", "Component", "Latency", "Error Rate", "Throughput", "Severity", "Analysis"],
value=table_data,
wrap=True
)
)
except Exception as e:
return f"❌ Error processing event: {str(e)}", gr.Dataframe(value=[])
def create_ui():
"""Create the Gradio interface"""
with gr.Blocks(title="🧠 Agentic Reliability Framework v2", theme="soft") as demo:
gr.Markdown("""
# 🧠 Agentic Reliability Framework v2
**Production-Grade Self-Healing AI Systems**
*Advanced anomaly detection + AI-driven root cause analysis + Business impact quantification*
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### πŸ“Š Telemetry Input")
component = gr.Dropdown(
choices=["api-service", "auth-service", "payment-service", "database", "cache-service"],
value="api-service",
label="Component",
info="Select the service being monitored"
)
latency = gr.Slider(
minimum=10, maximum=1000, value=100, step=1,
label="Latency P99 (ms)",
info="Alert threshold: >150ms (adaptive)"
)
error_rate = gr.Slider(
minimum=0, maximum=0.5, value=0.02, step=0.001,
label="Error Rate",
info="Alert threshold: >0.05"
)
throughput = gr.Number(
value=1000,
label="Throughput (req/sec)",
info="Current request rate"
)
cpu_util = gr.Slider(
minimum=0, maximum=1, value=0.4, step=0.01,
label="CPU Utilization",
info="0.0 - 1.0 scale"
)
memory_util = gr.Slider(
minimum=0, maximum=1, value=0.3, step=0.01,
label="Memory Utilization",
info="0.0 - 1.0 scale"
)
submit_btn = gr.Button("πŸš€ Submit Telemetry Event", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("### πŸ” Live Analysis & Healing")
output_text = gr.Textbox(
label="Analysis Results",
placeholder="Submit an event to see AI-powered analysis...",
lines=4
)
gr.Markdown("### πŸ“ˆ Recent Events (Last 15)")
events_table = gr.Dataframe(
headers=["Timestamp", "Component", "Latency", "Error Rate", "Throughput", "Severity", "Analysis"],
label="Event History",
wrap=True,
max_height="400px"
)
# Information sections
with gr.Accordion("ℹ️ Framework Capabilities", open=False):
gr.Markdown("""
- **πŸ€– AI-Powered Analysis**: Mistral-8x7B for intelligent root cause analysis
- **πŸ”§ Policy-Based Healing**: Automated recovery actions based on severity and context
- **πŸ’° Business Impact**: Revenue and user impact quantification
- **🎯 Adaptive Detection**: ML-powered thresholds that learn from your environment
- **πŸ“š Vector Memory**: FAISS-based incident memory for similarity detection
- **⚑ Production Ready**: Circuit breakers, cooldowns, and enterprise features
""")
with gr.Accordion("πŸ”§ Healing Policies", open=False):
policy_info = []
for policy in policy_engine.policies:
if policy.enabled:
actions = ", ".join([action.value for action in policy.actions])
policy_info.append(f"**{policy.name}**: {actions} (Priority: {policy.priority})")
gr.Markdown("\n\n".join(policy_info))
# Event handling
submit_btn.click(
fn=submit_event,
inputs=[component, latency, error_rate, throughput, cpu_util, memory_util],
outputs=[output_text, events_table]
)
return demo
if __name__ == "__main__":
demo = create_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
)