Create mock_arf.py
Browse files- demo/mock_arf.py +122 -0
demo/mock_arf.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Mock ARF components for demo purposes
|
| 3 |
+
In production, these would use the real agentic-reliability-framework package
|
| 4 |
+
"""
|
| 5 |
+
import time
|
| 6 |
+
import json
|
| 7 |
+
from typing import Dict, Any, List
|
| 8 |
+
import random
|
| 9 |
+
|
| 10 |
+
def simulate_arf_analysis(scenario: Dict[str, Any]) -> Dict[str, Any]:
|
| 11 |
+
"""Simulate ARF analysis pipeline"""
|
| 12 |
+
return {
|
| 13 |
+
"analysis_complete": True,
|
| 14 |
+
"anomaly_detected": True,
|
| 15 |
+
"severity": "critical",
|
| 16 |
+
"root_cause": scenario.get('root_cause', 'unknown'),
|
| 17 |
+
"pattern_detected": True,
|
| 18 |
+
"pattern_confidence": random.uniform(0.8, 0.95),
|
| 19 |
+
"analysis_timestamp": time.time(),
|
| 20 |
+
"processing_time_ms": random.randint(200, 500)
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
def run_rag_similarity_search(scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
|
| 24 |
+
"""Simulate RAG similarity search"""
|
| 25 |
+
component = scenario.get('component', 'redis_cache')
|
| 26 |
+
|
| 27 |
+
# Mock similar incidents based on scenario
|
| 28 |
+
similar_incidents = []
|
| 29 |
+
|
| 30 |
+
# Generate 3-5 similar incidents
|
| 31 |
+
for i in range(random.randint(3, 5)):
|
| 32 |
+
similarity = random.uniform(0.7, 0.95)
|
| 33 |
+
success = similarity > 0.8
|
| 34 |
+
|
| 35 |
+
incident = {
|
| 36 |
+
"incident_id": f"inc_{int(time.time())}_{i}",
|
| 37 |
+
"component": component,
|
| 38 |
+
"similarity_score": similarity,
|
| 39 |
+
"success": success,
|
| 40 |
+
"resolution": "scale_out" if component == "redis_cache" else "restart",
|
| 41 |
+
"actions_taken": ["scale_out", "adjust_cache_ttl"] if component == "redis_cache" else ["restart_container"],
|
| 42 |
+
"resolution_time_minutes": random.uniform(5, 15),
|
| 43 |
+
"timestamp": time.time() - random.randint(86400, 2592000) # 1-30 days ago
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
if success:
|
| 47 |
+
incident["cost_savings"] = random.randint(1000, 10000)
|
| 48 |
+
|
| 49 |
+
similar_incidents.append(incident)
|
| 50 |
+
|
| 51 |
+
# Sort by similarity
|
| 52 |
+
similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
|
| 53 |
+
|
| 54 |
+
return similar_incidents
|
| 55 |
+
|
| 56 |
+
def calculate_pattern_confidence(scenario: Dict[str, Any], similar_incidents: List[Dict[str, Any]]) -> float:
|
| 57 |
+
"""Calculate pattern detection confidence"""
|
| 58 |
+
if not similar_incidents:
|
| 59 |
+
return 0.7
|
| 60 |
+
|
| 61 |
+
# Base confidence
|
| 62 |
+
base_confidence = 0.75
|
| 63 |
+
|
| 64 |
+
# Boost based on number of similar incidents
|
| 65 |
+
incident_boost = min(0.15, len(similar_incidents) * 0.03)
|
| 66 |
+
|
| 67 |
+
# Boost based on average similarity
|
| 68 |
+
avg_similarity = sum(i['similarity_score'] for i in similar_incidents) / len(similar_incidents)
|
| 69 |
+
similarity_boost = avg_similarity * 0.1
|
| 70 |
+
|
| 71 |
+
# Boost based on success rate
|
| 72 |
+
success_rate = sum(1 for i in similar_incidents if i['success']) / len(similar_incidents)
|
| 73 |
+
success_boost = success_rate * 0.1
|
| 74 |
+
|
| 75 |
+
total_confidence = base_confidence + incident_boost + similarity_boost + success_boost
|
| 76 |
+
|
| 77 |
+
return min(0.98, total_confidence)
|
| 78 |
+
|
| 79 |
+
def create_mock_healing_intent(scenario: Dict[str, Any],
|
| 80 |
+
similar_incidents: List[Dict[str, Any]],
|
| 81 |
+
confidence: float = 0.85) -> Dict[str, Any]:
|
| 82 |
+
"""Create a mock HealingIntent object"""
|
| 83 |
+
|
| 84 |
+
# Determine action based on scenario
|
| 85 |
+
component = scenario.get('component', 'redis_cache')
|
| 86 |
+
if component == 'redis_cache':
|
| 87 |
+
action = 'scale_out'
|
| 88 |
+
parameters = {'scale_factor': 2, 'cache_ttl': 300}
|
| 89 |
+
justification = "Scale Redis cluster and adjust cache TTL based on historical pattern"
|
| 90 |
+
elif component == 'database':
|
| 91 |
+
action = 'optimize_connections'
|
| 92 |
+
parameters = {'max_connections': 200, 'connection_timeout': 30}
|
| 93 |
+
justification = "Optimize database connection pool settings"
|
| 94 |
+
else:
|
| 95 |
+
action = 'restart_container'
|
| 96 |
+
parameters = {}
|
| 97 |
+
justification = "Restart container to resolve memory issues"
|
| 98 |
+
|
| 99 |
+
# Calculate RAG similarity score
|
| 100 |
+
rag_score = None
|
| 101 |
+
if similar_incidents:
|
| 102 |
+
rag_score = sum(i['similarity_score'] for i in similar_incidents[:3]) / min(3, len(similar_incidents))
|
| 103 |
+
|
| 104 |
+
return {
|
| 105 |
+
"action": action,
|
| 106 |
+
"component": component,
|
| 107 |
+
"parameters": parameters,
|
| 108 |
+
"justification": justification,
|
| 109 |
+
"confidence": confidence,
|
| 110 |
+
"incident_id": scenario.get('incident_id', f"inc_{int(time.time())}"),
|
| 111 |
+
"detected_at": time.time(),
|
| 112 |
+
"similar_incidents": similar_incidents,
|
| 113 |
+
"rag_similarity_score": rag_score,
|
| 114 |
+
"source": "oss_analysis",
|
| 115 |
+
"intent_id": f"intent_{int(time.time())}",
|
| 116 |
+
"created_at": time.time(),
|
| 117 |
+
"status": "created",
|
| 118 |
+
"oss_edition": "community",
|
| 119 |
+
"requires_enterprise": True,
|
| 120 |
+
"execution_allowed": False,
|
| 121 |
+
"deterministic_id": f"intent_{hash(json.dumps(parameters, sort_keys=True)) % 10000:04d}"
|
| 122 |
+
}
|