File size: 16,105 Bytes
ba59239 5c55cb5 e94f0ea ba59239 644fff6 3e50ac5 644fff6 82009c8 644fff6 1eb0dc5 644fff6 e94f0ea 644fff6 3e5b6a9 644fff6 1eb0dc5 3e5b6a9 414407c e94f0ea 644fff6 ba59239 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 3e50ac5 644fff6 1eb0dc5 644fff6 1eb0dc5 644fff6 a81efd4 644fff6 6a3df22 644fff6 ba59239 a81efd4 644fff6 a81efd4 82009c8 ba59239 a81efd4 e94f0ea a81efd4 e94f0ea a81efd4 1eb0dc5 a81efd4 644fff6 a81efd4 ba59239 a81efd4 ba59239 a81efd4 82009c8 644fff6 e94f0ea 644fff6 1eb0dc5 e94f0ea d97b7c8 644fff6 e94f0ea 644fff6 e94f0ea 644fff6 3e50ac5 644fff6 a81efd4 644fff6 a81efd4 644fff6 a81efd4 644fff6 a81efd4 644fff6 6a3df22 644fff6 a81efd4 644fff6 a81efd4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 | import os
import json
import numpy as np
import gradio as gr
import requests
import pandas as pd
import datetime
from typing import List, Dict, Any
import hashlib
# Import our new modules
from models import ReliabilityEvent, EventSeverity, AnomalyResult, HealingAction
from healing_policies import PolicyEngine
# === Configuration ===
HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
HF_API_URL = "https://router.huggingface.co/hf-inference/v1/completions"
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
# === FAISS & Embeddings Setup ===
try:
from sentence_transformers import SentenceTransformer
import faiss
VECTOR_DIM = 384
INDEX_FILE = "incident_vectors.index"
TEXTS_FILE = "incident_texts.json"
# Try to load model with error handling
try:
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
except Exception as e:
print(f"Model loading warning: {e}")
# Fallback to direct loading
from sentence_transformers import SentenceTransformer as ST
model = ST("sentence-transformers/all-MiniLM-L6-v2")
if os.path.exists(INDEX_FILE):
index = faiss.read_index(INDEX_FILE)
with open(TEXTS_FILE, "r") as f:
incident_texts = json.load(f)
else:
index = faiss.IndexFlatL2(VECTOR_DIM)
incident_texts = []
except ImportError as e:
print(f"Warning: FAISS or SentenceTransformers not available: {e}")
index = None
incident_texts = []
model = None
def save_index():
"""Save FAISS index and incident texts"""
if index is not None:
faiss.write_index(index, INDEX_FILE)
with open(TEXTS_FILE, "w") as f:
json.dump(incident_texts, f)
# === Core Engine Components ===
policy_engine = PolicyEngine()
events_history: List[ReliabilityEvent] = []
class BusinessImpactCalculator:
"""Calculate business impact of anomalies"""
def __init__(self, revenue_per_request: float = 0.01):
self.revenue_per_request = revenue_per_request
def calculate_impact(self, event: ReliabilityEvent, duration_minutes: int = 5) -> Dict[str, Any]:
"""Enhanced business impact calculation"""
# More realistic impact calculation
base_revenue_per_minute = 100 # Base revenue per minute for the service
# Calculate impact based on severity of anomalies
impact_multiplier = 1.0
if event.latency_p99 > 300:
impact_multiplier += 0.5 # High latency impact
if event.error_rate > 0.1:
impact_multiplier += 0.8 # High error rate impact
if event.cpu_util and event.cpu_util > 0.9:
impact_multiplier += 0.3 # Resource exhaustion impact
revenue_loss = base_revenue_per_minute * impact_multiplier * (duration_minutes / 60)
# More realistic user impact
base_users_affected = 1000 # Base user count
user_impact_multiplier = (event.error_rate * 10) + (max(0, event.latency_p99 - 100) / 500)
affected_users = int(base_users_affected * user_impact_multiplier)
# Severity classification
if revenue_loss > 500 or affected_users > 5000:
severity = "CRITICAL"
elif revenue_loss > 100 or affected_users > 1000:
severity = "HIGH"
elif revenue_loss > 50 or affected_users > 500:
severity = "MEDIUM"
else:
severity = "LOW"
return {
'revenue_loss_estimate': round(revenue_loss, 2),
'affected_users_estimate': affected_users,
'severity_level': severity,
'throughput_reduction_pct': round(min(100, user_impact_multiplier * 100), 1)
}
business_calculator = BusinessImpactCalculator()
class AdvancedAnomalyDetector:
"""Enhanced anomaly detection with adaptive thresholds"""
def __init__(self):
self.historical_data = []
self.adaptive_thresholds = {
'latency_p99': 150, # Will adapt based on history
'error_rate': 0.05
}
def detect_anomaly(self, event: ReliabilityEvent) -> bool:
"""Enhanced anomaly detection with adaptive thresholds"""
# Basic threshold checks
latency_anomaly = event.latency_p99 > self.adaptive_thresholds['latency_p99']
error_anomaly = event.error_rate > self.adaptive_thresholds['error_rate']
# Resource-based anomalies
resource_anomaly = False
if event.cpu_util and event.cpu_util > 0.9:
resource_anomaly = True
if event.memory_util and event.memory_util > 0.9:
resource_anomaly = True
# Update adaptive thresholds (simplified)
self._update_thresholds(event)
return latency_anomaly or error_anomaly or resource_anomaly
def _update_thresholds(self, event: ReliabilityEvent):
"""Update adaptive thresholds based on historical data"""
self.historical_data.append(event)
# Keep only recent history
if len(self.historical_data) > 100:
self.historical_data.pop(0)
# Update latency threshold to 90th percentile of recent data
if len(self.historical_data) > 10:
recent_latencies = [e.latency_p99 for e in self.historical_data[-20:]]
self.adaptive_thresholds['latency_p99'] = np.percentile(recent_latencies, 90)
anomaly_detector = AdvancedAnomalyDetector()
def call_huggingface_analysis(prompt: str) -> str:
"""Use HF Inference API or fallback simulation"""
if not HF_TOKEN:
# Enhanced fallback analysis
fallback_insights = [
"High latency detected - possible resource contention or network issues",
"Error rate increase suggests recent deployment instability",
"Latency spike correlates with increased user traffic patterns",
"Intermittent failures indicate potential dependency service degradation",
"Performance degradation detected - consider scaling compute resources"
]
import random
return random.choice(fallback_insights)
try:
enhanced_prompt = f"""
As a senior reliability engineer, analyze this telemetry event and provide a concise root cause analysis:
{prompt}
Focus on:
- Potential infrastructure or application issues
- Correlation between metrics
- Business impact assessment
- Recommended investigation areas
Provide 1-2 sentences maximum with actionable insights.
"""
payload = {
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
"prompt": enhanced_prompt,
"max_tokens": 150,
"temperature": 0.4,
}
response = requests.post(HF_API_URL, headers=HEADERS, json=payload, timeout=15)
if response.status_code == 200:
result = response.json()
analysis_text = result.get("choices", [{}])[0].get("text", "").strip()
if analysis_text and len(analysis_text) > 10:
return analysis_text.split('\n')[0]
return analysis_text
else:
return f"API Error {response.status_code}: Service temporarily unavailable"
except Exception as e:
return f"Analysis service error: {str(e)}"
def analyze_event(component: str, latency: float, error_rate: float,
throughput: float = 1000, cpu_util: float = None,
memory_util: float = None) -> Dict[str, Any]:
"""Main event analysis function"""
# Create enhanced event
event = ReliabilityEvent(
component=component,
latency_p99=latency,
error_rate=error_rate,
throughput=throughput,
cpu_util=cpu_util,
memory_util=memory_util,
upstream_deps=["auth-service", "database"] if component == "api-service" else []
)
# Detect anomaly
is_anomaly = anomaly_detector.detect_anomaly(event)
event.severity = EventSeverity.HIGH if is_anomaly else EventSeverity.LOW
# Build analysis prompt
prompt = (
f"Component: {component}\nLatency: {latency:.2f}ms\nError Rate: {error_rate:.3f}\n"
f"Throughput: {throughput:.0f}\nCPU: {cpu_util or 'N/A'}\nMemory: {memory_util or 'N/A'}\n"
f"Status: {'ANOMALY' if is_anomaly else 'NORMAL'}\n\n"
"Provide a one-line reliability insight or root cause analysis."
)
# Get AI analysis
analysis = call_huggingface_analysis(prompt)
# Evaluate healing policies
healing_actions = policy_engine.evaluate_policies(event)
# Calculate business impact
business_impact = business_calculator.calculate_impact(event) if is_anomaly else None
# Vector memory learning
if index is not None and is_anomaly:
vector_text = f"{component} {latency} {error_rate} {analysis}"
vec = model.encode([vector_text])
index.add(np.array(vec, dtype=np.float32))
incident_texts.append(vector_text)
save_index()
# Prepare result
result = {
"timestamp": event.timestamp,
"component": component,
"latency_p99": latency,
"error_rate": error_rate,
"throughput": throughput,
"status": "ANOMALY" if is_anomaly else "NORMAL",
"analysis": analysis,
"healing_actions": [action.value for action in healing_actions],
"business_impact": business_impact,
"severity": event.severity.value,
"similar_incidents_count": len(incident_texts) if is_anomaly else 0
}
events_history.append(event)
return result
# === Gradio UI ===
def submit_event(component, latency, error_rate, throughput, cpu_util, memory_util):
"""Handle event submission from UI"""
try:
# Convert inputs
latency = float(latency)
error_rate = float(error_rate)
throughput = float(throughput) if throughput else 1000
cpu_util = float(cpu_util) if cpu_util else None
memory_util = float(memory_util) if memory_util else None
result = analyze_event(component, latency, error_rate, throughput, cpu_util, memory_util)
# Prepare table data
table_data = []
for event in events_history[-15:]:
table_data.append([
event.timestamp[:19], # Trim microseconds
event.component,
event.latency_p99,
f"{event.error_rate:.3f}",
event.throughput,
event.severity.value.upper(),
getattr(event, 'analysis', 'N/A')[:50] + "..." if getattr(event, 'analysis', 'N/A') else 'N/A'
])
# Format output message
status_emoji = "π¨" if result["status"] == "ANOMALY" else "β
"
output_msg = f"{status_emoji} {result['status']} - {result['analysis']}"
if result["business_impact"]:
impact = result["business_impact"]
output_msg += f"\nπ° Business Impact: ${impact['revenue_loss_estimate']} | π₯ {impact['affected_users_estimate']} users | π¨ {impact['severity_level']}"
if result["healing_actions"]:
actions = ", ".join(result["healing_actions"])
output_msg += f"\nπ§ Auto-Actions: {actions}"
return (
output_msg,
gr.Dataframe(
headers=["Timestamp", "Component", "Latency", "Error Rate", "Throughput", "Severity", "Analysis"],
value=table_data,
wrap=True
)
)
except Exception as e:
return f"β Error processing event: {str(e)}", gr.Dataframe(value=[])
def create_ui():
"""Create the Gradio interface"""
with gr.Blocks(title="π§ Agentic Reliability Framework v2", theme="soft") as demo:
gr.Markdown("""
# π§ Agentic Reliability Framework v2
**Production-Grade Self-Healing AI Systems**
*Advanced anomaly detection + AI-driven root cause analysis + Business impact quantification*
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### π Telemetry Input")
component = gr.Dropdown(
choices=["api-service", "auth-service", "payment-service", "database", "cache-service"],
value="api-service",
label="Component",
info="Select the service being monitored"
)
latency = gr.Slider(
minimum=10, maximum=1000, value=100, step=1,
label="Latency P99 (ms)",
info="Alert threshold: >150ms (adaptive)"
)
error_rate = gr.Slider(
minimum=0, maximum=0.5, value=0.02, step=0.001,
label="Error Rate",
info="Alert threshold: >0.05"
)
throughput = gr.Number(
value=1000,
label="Throughput (req/sec)",
info="Current request rate"
)
cpu_util = gr.Slider(
minimum=0, maximum=1, value=0.4, step=0.01,
label="CPU Utilization",
info="0.0 - 1.0 scale"
)
memory_util = gr.Slider(
minimum=0, maximum=1, value=0.3, step=0.01,
label="Memory Utilization",
info="0.0 - 1.0 scale"
)
submit_btn = gr.Button("π Submit Telemetry Event", variant="primary", size="lg")
with gr.Column(scale=2):
gr.Markdown("### π Live Analysis & Healing")
output_text = gr.Textbox(
label="Analysis Results",
placeholder="Submit an event to see AI-powered analysis...",
lines=4
)
gr.Markdown("### π Recent Events (Last 15)")
events_table = gr.Dataframe(
headers=["Timestamp", "Component", "Latency", "Error Rate", "Throughput", "Severity", "Analysis"],
label="Event History",
wrap=True,
max_height="400px"
)
# Information sections
with gr.Accordion("βΉοΈ Framework Capabilities", open=False):
gr.Markdown("""
- **π€ AI-Powered Analysis**: Mistral-8x7B for intelligent root cause analysis
- **π§ Policy-Based Healing**: Automated recovery actions based on severity and context
- **π° Business Impact**: Revenue and user impact quantification
- **π― Adaptive Detection**: ML-powered thresholds that learn from your environment
- **π Vector Memory**: FAISS-based incident memory for similarity detection
- **β‘ Production Ready**: Circuit breakers, cooldowns, and enterprise features
""")
with gr.Accordion("π§ Healing Policies", open=False):
policy_info = []
for policy in policy_engine.policies:
if policy.enabled:
actions = ", ".join([action.value for action in policy.actions])
policy_info.append(f"**{policy.name}**: {actions} (Priority: {policy.priority})")
gr.Markdown("\n\n".join(policy_info))
# Event handling
submit_btn.click(
fn=submit_event,
inputs=[component, latency, error_rate, throughput, cpu_util, memory_util],
outputs=[output_text, events_table]
)
return demo
if __name__ == "__main__":
demo = create_ui()
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |