petter2025 commited on
Commit
23c10b8
·
verified ·
1 Parent(s): 115eb63

Update demo/mock_arf.py

Browse files
Files changed (1) hide show
  1. demo/mock_arf.py +419 -97
demo/mock_arf.py CHANGED
@@ -1,122 +1,444 @@
1
  """
2
- Mock ARF components for demo purposes
3
  In production, these would use the real agentic-reliability-framework package
4
  """
5
  import time
6
  import json
7
- from typing import Dict, Any, List
 
8
  import random
 
 
9
 
10
- def simulate_arf_analysis(scenario: Dict[str, Any]) -> Dict[str, Any]:
11
- """Simulate ARF analysis pipeline"""
12
- return {
13
- "analysis_complete": True,
14
- "anomaly_detected": True,
15
- "severity": "critical",
16
- "root_cause": scenario.get('root_cause', 'unknown'),
17
- "pattern_detected": True,
18
- "pattern_confidence": random.uniform(0.8, 0.95),
19
- "analysis_timestamp": time.time(),
20
- "processing_time_ms": random.randint(200, 500)
21
- }
22
 
23
- def run_rag_similarity_search(scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
24
- """Simulate RAG similarity search"""
25
- component = scenario.get('component', 'redis_cache')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Mock similar incidents based on scenario
28
- similar_incidents = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Generate 3-5 similar incidents
31
- for i in range(random.randint(3, 5)):
32
- similarity = random.uniform(0.7, 0.95)
33
- success = similarity > 0.8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- incident = {
36
- "incident_id": f"inc_{int(time.time())}_{i}",
 
37
  "component": component,
38
- "similarity_score": similarity,
39
- "success": success,
40
- "resolution": "scale_out" if component == "redis_cache" else "restart",
41
- "actions_taken": ["scale_out", "adjust_cache_ttl"] if component == "redis_cache" else ["restart_container"],
42
- "resolution_time_minutes": random.uniform(5, 15),
43
- "timestamp": time.time() - random.randint(86400, 2592000) # 1-30 days ago
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  }
45
 
46
- if success:
47
- incident["cost_savings"] = random.randint(1000, 10000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- similar_incidents.append(incident)
 
 
 
 
 
 
 
50
 
51
- # Sort by similarity
52
- similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
 
 
 
 
 
 
 
 
53
 
54
- return similar_incidents
55
-
56
- def calculate_pattern_confidence(scenario: Dict[str, Any], similar_incidents: List[Dict[str, Any]]) -> float:
57
- """Calculate pattern detection confidence"""
58
- if not similar_incidents:
59
- return 0.7
 
 
 
 
 
 
 
 
 
 
60
 
61
- # Base confidence
62
- base_confidence = 0.75
 
 
 
 
 
 
 
 
63
 
64
- # Boost based on number of similar incidents
65
- incident_boost = min(0.15, len(similar_incidents) * 0.03)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- # Boost based on average similarity
68
- avg_similarity = sum(i['similarity_score'] for i in similar_incidents) / len(similar_incidents)
69
- similarity_boost = avg_similarity * 0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- # Boost based on success rate
72
- success_rate = sum(1 for i in similar_incidents if i['success']) / len(similar_incidents)
73
- success_boost = success_rate * 0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- total_confidence = base_confidence + incident_boost + similarity_boost + success_boost
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
- return min(0.98, total_confidence)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  def create_mock_healing_intent(scenario: Dict[str, Any],
80
- similar_incidents: List[Dict[str, Any]],
81
- confidence: float = 0.85) -> Dict[str, Any]:
82
- """Create a mock HealingIntent object"""
83
-
84
- # Determine action based on scenario
85
- component = scenario.get('component', 'redis_cache')
86
- if component == 'redis_cache':
87
- action = 'scale_out'
88
- parameters = {'scale_factor': 2, 'cache_ttl': 300}
89
- justification = "Scale Redis cluster and adjust cache TTL based on historical pattern"
90
- elif component == 'database':
91
- action = 'optimize_connections'
92
- parameters = {'max_connections': 200, 'connection_timeout': 30}
93
- justification = "Optimize database connection pool settings"
94
- else:
95
- action = 'restart_container'
96
- parameters = {}
97
- justification = "Restart container to resolve memory issues"
98
-
99
- # Calculate RAG similarity score
100
- rag_score = None
101
- if similar_incidents:
102
- rag_score = sum(i['similarity_score'] for i in similar_incidents[:3]) / min(3, len(similar_incidents))
103
-
104
- return {
105
- "action": action,
106
- "component": component,
107
- "parameters": parameters,
108
- "justification": justification,
109
- "confidence": confidence,
110
- "incident_id": scenario.get('incident_id', f"inc_{int(time.time())}"),
111
- "detected_at": time.time(),
112
- "similar_incidents": similar_incidents,
113
- "rag_similarity_score": rag_score,
114
- "source": "oss_analysis",
115
- "intent_id": f"intent_{int(time.time())}",
116
- "created_at": time.time(),
117
- "status": "created",
118
- "oss_edition": "community",
119
- "requires_enterprise": True,
120
- "execution_allowed": False,
121
- "deterministic_id": f"intent_{hash(json.dumps(parameters, sort_keys=True)) % 10000:04d}"
122
- }
 
1
  """
2
+ Enhanced Mock ARF components for demo purposes
3
  In production, these would use the real agentic-reliability-framework package
4
  """
5
  import time
6
  import json
7
+ import hashlib
8
+ from typing import Dict, Any, List, Optional
9
  import random
10
+ import logging
11
+ from datetime import datetime, timedelta
12
 
13
+ logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+
16
+ class MockARFSimulator:
17
+ """Enhanced mock ARF simulator with realistic patterns"""
18
+
19
+ def __init__(self, seed: Optional[int] = None):
20
+ self.seed = seed or int(time.time())
21
+ random.seed(self.seed)
22
+ self._incident_patterns = self._initialize_patterns()
23
+ self._healing_actions = self._initialize_healing_actions()
24
+
25
+ def _initialize_patterns(self) -> Dict[str, Dict[str, Any]]:
26
+ """Initialize realistic incident patterns"""
27
+ return {
28
+ "cache_miss_storm": {
29
+ "pattern": "exponential_miss_increase",
30
+ "indicators": ["cache_hit_rate < 30%", "database_load > 80%", "response_time > 1500ms"],
31
+ "typical_causes": ["key_eviction", "cold_cache", "traffic_spike"],
32
+ "resolution_patterns": ["scale_out", "cache_warming", "ttl_optimization"]
33
+ },
34
+ "db_connection_exhaustion": {
35
+ "pattern": "connection_pool_saturation",
36
+ "indicators": ["active_connections > 95%", "connection_wait > 30s", "query_timeout_rate > 10%"],
37
+ "typical_causes": ["connection_leak", "slow_queries", "connection_pool_misconfig"],
38
+ "resolution_patterns": ["pool_tuning", "query_optimization", "circuit_breaker"]
39
+ },
40
+ "memory_leak": {
41
+ "pattern": "gradual_memory_increase",
42
+ "indicators": ["memory_usage > 90%", "gc_frequency_high", "restart_count_increasing"],
43
+ "typical_causes": ["object_retention", "resource_leak", "cache_growth"],
44
+ "resolution_patterns": ["heap_analysis", "restart", "memory_limit"]
45
+ },
46
+ "api_rate_limit": {
47
+ "pattern": "rate_limit_cascade",
48
+ "indicators": ["429_rate > 40%", "retry_storm", "cascade_failures"],
49
+ "typical_causes": ["burst_traffic", "misconfigured_limits", "retry_logic"],
50
+ "resolution_patterns": ["backoff_strategy", "circuit_breaker", "cache_responses"]
51
+ }
52
+ }
53
+
54
+ def _initialize_healing_actions(self) -> Dict[str, Dict[str, Any]]:
55
+ """Initialize healing actions with success rates"""
56
+ return {
57
+ "scale_out": {
58
+ "action": "increase_capacity",
59
+ "success_rate": 0.87,
60
+ "typical_recovery_time": "5-15 minutes",
61
+ "risk_level": "low",
62
+ "prerequisites": ["capacity_available", "auto_scaling_enabled"]
63
+ },
64
+ "cache_warming": {
65
+ "action": "preload_cache",
66
+ "success_rate": 0.72,
67
+ "typical_recovery_time": "2-10 minutes",
68
+ "risk_level": "very_low",
69
+ "prerequisites": ["predictive_model", "cache_pattern_known"]
70
+ },
71
+ "restart_container": {
72
+ "action": "graceful_restart",
73
+ "success_rate": 0.95,
74
+ "typical_recovery_time": "1-3 minutes",
75
+ "risk_level": "medium",
76
+ "prerequisites": ["health_checks", "load_balancer", "redundancy"]
77
+ },
78
+ "circuit_breaker": {
79
+ "action": "fail_fast_protection",
80
+ "success_rate": 0.89,
81
+ "typical_recovery_time": "instant",
82
+ "risk_level": "low",
83
+ "prerequisites": ["dependency_awareness", "fallback_strategy"]
84
+ }
85
+ }
86
+
87
+ def simulate_arf_analysis(self, scenario: Dict[str, Any]) -> Dict[str, Any]:
88
+ """Simulate ARF analysis pipeline with enhanced realism"""
89
+ component = scenario.get('component', 'unknown')
90
+ pattern_name = self._detect_pattern(component, scenario)
91
+
92
+ return {
93
+ "analysis_complete": True,
94
+ "anomaly_detected": True,
95
+ "severity": self._determine_severity(scenario),
96
+ "root_cause": scenario.get('root_cause', 'resource_constraint'),
97
+ "pattern_detected": True,
98
+ "pattern_name": pattern_name,
99
+ "pattern_confidence": self._calculate_pattern_confidence(pattern_name),
100
+ "detection_method": "ensemble_ml_model",
101
+ "detection_time_ms": random.randint(150, 350),
102
+ "analysis_timestamp": time.time(),
103
+ "processing_time_ms": random.randint(200, 500),
104
+ "model_version": "arf-ml-v3.3.6",
105
+ "features_analyzed": self._extract_features(scenario)
106
+ }
107
+
108
+ def run_rag_similarity_search(self, scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
109
+ """Simulate RAG similarity search with realistic data"""
110
+ component = scenario.get('component', 'redis_cache')
111
+ pattern_name = self._detect_pattern(component, scenario)
112
+
113
+ # Generate realistic similar incidents
114
+ similar_incidents = []
115
+ base_time = time.time()
116
+
117
+ for i in range(random.randint(3, 5)):
118
+ days_ago = random.randint(1, 90)
119
+ incident_time = base_time - (days_ago * 86400)
120
+
121
+ similarity = random.uniform(0.75, 0.95)
122
+ success = similarity > 0.82
123
+
124
+ incident = {
125
+ "incident_id": f"inc_{int(incident_time)}_{i}",
126
+ "component": component,
127
+ "pattern": pattern_name,
128
+ "similarity_score": similarity,
129
+ "cosine_similarity": similarity,
130
+ "success": success,
131
+ "resolution": self._get_recommended_action(component),
132
+ "actions_taken": self._get_action_sequence(component, success),
133
+ "resolution_time_minutes": random.uniform(3.5, 18.5),
134
+ "timestamp": incident_time,
135
+ "occurred_at": datetime.fromtimestamp(incident_time).isoformat(),
136
+ "engineers_involved": random.randint(1, 3),
137
+ "blast_radius": f"{random.randint(1, 5)} services",
138
+ "root_cause_analysis": self._generate_root_cause(component)
139
+ }
140
+
141
+ if success:
142
+ cost_saved = random.randint(1500, 12500)
143
+ incident["cost_savings"] = cost_saved
144
+ incident["mttr_reduction"] = f"{random.randint(60, 85)}%"
145
+ incident["user_impact"] = f"{random.randint(85, 99)}% reduction"
146
+
147
+ similar_incidents.append(incident)
148
+
149
+ # Sort by similarity
150
+ similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
151
+
152
+ # Add RAG metadata
153
+ rag_metadata = {
154
+ "vector_db": "chroma_v0.4.0",
155
+ "embedding_model": "all-MiniLM-L6-v2",
156
+ "index_size": f"{random.randint(500, 5000)} incidents",
157
+ "retrieval_time_ms": random.randint(45, 120),
158
+ "top_k": len(similar_incidents)
159
+ }
160
+
161
+ for incident in similar_incidents:
162
+ incident["rag_metadata"] = rag_metadata
163
+
164
+ return similar_incidents
165
 
166
+ def calculate_pattern_confidence(self, scenario: Dict[str, Any],
167
+ similar_incidents: List[Dict[str, Any]]) -> float:
168
+ """Calculate pattern detection confidence with enhanced logic"""
169
+ if not similar_incidents:
170
+ return 0.70 # Base confidence without similar incidents
171
+
172
+ # Base confidence from pattern matching
173
+ component = scenario.get('component', 'unknown')
174
+ pattern_name = self._detect_pattern(component, scenario)
175
+ base_confidence = self._calculate_pattern_confidence(pattern_name)
176
+
177
+ # Boost based on number of similar incidents
178
+ incident_count = len(similar_incidents)
179
+ incident_boost = min(0.15, incident_count * 0.025)
180
+
181
+ # Boost based on average similarity
182
+ avg_similarity = sum(i['similarity_score'] for i in similar_incidents) / incident_count
183
+ similarity_boost = avg_similarity * 0.12
184
+
185
+ # Boost based on success rate
186
+ success_count = sum(1 for i in similar_incidents if i['success'])
187
+ success_rate = success_count / incident_count
188
+ success_boost = success_rate * 0.10
189
+
190
+ # Boost based on recency (weight recent incidents more)
191
+ recency_boost = self._calculate_recency_boost(similar_incidents)
192
+
193
+ total_confidence = (
194
+ base_confidence +
195
+ incident_boost +
196
+ similarity_boost +
197
+ success_boost +
198
+ recency_boost
199
+ )
200
+
201
+ # Cap at 0.98 and ensure minimum
202
+ return max(0.70, min(0.98, total_confidence))
203
 
204
+ def create_mock_healing_intent(self, scenario: Dict[str, Any],
205
+ similar_incidents: List[Dict[str, Any]],
206
+ confidence: float = 0.85) -> Dict[str, Any]:
207
+ """Create a realistic mock HealingIntent object"""
208
+
209
+ component = scenario.get('component', 'redis_cache')
210
+ pattern_name = self._detect_pattern(component, scenario)
211
+
212
+ # Determine action based on component and pattern
213
+ action_info = self._determine_healing_action(component, pattern_name)
214
+
215
+ # Generate deterministic ID
216
+ params_hash = hashlib.md5(
217
+ json.dumps(action_info['parameters'], sort_keys=True).encode()
218
+ ).hexdigest()[:8]
219
+
220
+ # Calculate RAG similarity metrics
221
+ rag_metrics = self._calculate_rag_metrics(similar_incidents)
222
 
223
+ # Create healing intent
224
+ healing_intent = {
225
+ "action": action_info['action'],
226
  "component": component,
227
+ "pattern": pattern_name,
228
+ "parameters": action_info['parameters'],
229
+ "justification": action_info['justification'],
230
+ "confidence": confidence,
231
+ "incident_id": f"inc_{int(time.time())}",
232
+ "detected_at": time.time(),
233
+ "similar_incidents_count": len(similar_incidents),
234
+ "rag_similarity_score": rag_metrics['avg_similarity'],
235
+ "rag_metrics": rag_metrics,
236
+ "source": "oss_analysis",
237
+ "intent_id": f"intent_{int(time.time())}_{params_hash}",
238
+ "created_at": time.time(),
239
+ "status": "created",
240
+ "edition": "community",
241
+ "requires_enterprise": True,
242
+ "execution_allowed": False,
243
+ "safety_checks": {
244
+ "blast_radius": f"{random.randint(1, 3)} services",
245
+ "business_hours": "compliant",
246
+ "rollback_plan": "available",
247
+ "approval_required": True,
248
+ "risk_assessment": "low",
249
+ "compliance_check": "passed"
250
+ },
251
+ "expected_outcome": {
252
+ "recovery_time_minutes": action_info['recovery_time'],
253
+ "success_probability": action_info['success_rate'],
254
+ "cost_savings_estimate": self._estimate_savings(scenario),
255
+ "user_impact_reduction": f"{random.randint(85, 99)}%"
256
+ },
257
+ "deterministic_id": f"intent_{params_hash}"
258
  }
259
 
260
+ return healing_intent
261
+
262
+ # Helper methods
263
+ def _detect_pattern(self, component: str, scenario: Dict[str, Any]) -> str:
264
+ """Detect incident pattern based on component"""
265
+ if 'cache' in component.lower():
266
+ return "cache_miss_storm"
267
+ elif 'database' in component.lower() or 'postgres' in component.lower():
268
+ return "db_connection_exhaustion"
269
+ elif 'memory' in component.lower() or 'java' in component.lower():
270
+ return "memory_leak"
271
+ elif 'api' in component.lower() or 'rate' in component.lower():
272
+ return "api_rate_limit"
273
+ else:
274
+ return "unknown_pattern"
275
+
276
+ def _determine_severity(self, scenario: Dict[str, Any]) -> str:
277
+ """Determine incident severity"""
278
+ metrics = scenario.get('metrics', {})
279
 
280
+ if 'error_rate' in metrics and metrics['error_rate'] > 30:
281
+ return "critical"
282
+ elif 'response_time_ms' in metrics and metrics['response_time_ms'] > 2000:
283
+ return "critical"
284
+ elif 'memory_usage' in metrics and metrics['memory_usage'] > 90:
285
+ return "high"
286
+ else:
287
+ return random.choice(["high", "medium"])
288
 
289
+ def _calculate_pattern_confidence(self, pattern_name: str) -> float:
290
+ """Calculate confidence for specific pattern"""
291
+ confidence_map = {
292
+ "cache_miss_storm": 0.92,
293
+ "db_connection_exhaustion": 0.88,
294
+ "memory_leak": 0.85,
295
+ "api_rate_limit": 0.90,
296
+ "unknown_pattern": 0.70
297
+ }
298
+ return confidence_map.get(pattern_name, 0.75)
299
 
300
+ def _extract_features(self, scenario: Dict[str, Any]) -> List[str]:
301
+ """Extract features for ML analysis"""
302
+ features = []
303
+ metrics = scenario.get('metrics', {})
304
+
305
+ for key, value in metrics.items():
306
+ if isinstance(value, (int, float)):
307
+ features.append(f"{key}:{value}")
308
+
309
+ # Add derived features
310
+ if 'cache_hit_rate' in metrics and metrics['cache_hit_rate'] < 30:
311
+ features.append("cache_miss_critical")
312
+ if 'error_rate' in metrics and metrics['error_rate'] > 10:
313
+ features.append("error_rate_high")
314
+
315
+ return features[:10] # Limit to 10 features
316
 
317
+ def _get_recommended_action(self, component: str) -> str:
318
+ """Get recommended healing action"""
319
+ if 'cache' in component.lower():
320
+ return 'scale_out'
321
+ elif 'database' in component.lower():
322
+ return 'optimize_connections'
323
+ elif 'memory' in component.lower():
324
+ return 'restart_container'
325
+ else:
326
+ return 'circuit_breaker'
327
 
328
+ def _get_action_sequence(self, component: str, success: bool) -> List[str]:
329
+ """Get sequence of actions taken"""
330
+ base_actions = []
331
+
332
+ if 'cache' in component.lower():
333
+ base_actions = ["scale_out", "adjust_cache_ttl", "implement_warming"]
334
+ elif 'database' in component.lower():
335
+ base_actions = ["increase_pool_size", "add_timeout", "optimize_queries"]
336
+
337
+ if success and random.random() > 0.5:
338
+ base_actions.append("add_monitoring")
339
+
340
+ return base_actions
341
+
342
+ def _generate_root_cause(self, component: str) -> str:
343
+ """Generate realistic root cause"""
344
+ causes = {
345
+ 'cache': ["key_eviction_policy", "cold_cache_after_deploy", "traffic_spike_2x"],
346
+ 'database': ["connection_leak_in_pool", "slow_query_cascade", "max_connections_limit"],
347
+ 'memory': ["object_retention_in_cache", "thread_local_leak", "off_heap_memory_growth"]
348
+ }
349
+
350
+ for key, cause_list in causes.items():
351
+ if key in component.lower():
352
+ return random.choice(cause_list)
353
+
354
+ return "resource_constraint_under_load"
355
 
356
+ def _calculate_recency_boost(self, incidents: List[Dict[str, Any]]) -> float:
357
+ """Calculate boost based on incident recency"""
358
+ if not incidents:
359
+ return 0.0
360
+
361
+ now = time.time()
362
+ recent_count = 0
363
+
364
+ for incident in incidents:
365
+ incident_time = incident.get('timestamp', now)
366
+ days_ago = (now - incident_time) / 86400
367
+
368
+ if days_ago < 7: # Within last week
369
+ recent_count += 1
370
+
371
+ return min(0.08, recent_count * 0.02)
372
 
373
+ def _determine_healing_action(self, component: str, pattern: str) -> Dict[str, Any]:
374
+ """Determine healing action with parameters"""
375
+ if 'cache' in component.lower():
376
+ return {
377
+ "action": 'scale_out',
378
+ "parameters": {'scale_factor': random.choice([2, 3]), 'cache_ttl': 300},
379
+ "justification": "Scale Redis cluster and adjust cache TTL based on historical pattern",
380
+ "success_rate": 0.87,
381
+ "recovery_time": "5-15 minutes"
382
+ }
383
+ elif 'database' in component.lower():
384
+ return {
385
+ "action": 'optimize_connections',
386
+ "parameters": {'max_connections': 200, 'connection_timeout': 30},
387
+ "justification": "Optimize database connection pool settings based on load patterns",
388
+ "success_rate": 0.82,
389
+ "recovery_time": "2-8 minutes"
390
+ }
391
+ else:
392
+ return {
393
+ "action": 'restart_container',
394
+ "parameters": {'grace_period': 30, 'drain_connections': True},
395
+ "justification": "Restart container to resolve memory issues with graceful shutdown",
396
+ "success_rate": 0.95,
397
+ "recovery_time": "1-3 minutes"
398
+ }
399
 
400
+ def _calculate_rag_metrics(self, incidents: List[Dict[str, Any]]) -> Dict[str, Any]:
401
+ """Calculate RAG metrics"""
402
+ if not incidents:
403
+ return {
404
+ "avg_similarity": 0.0,
405
+ "similarity_std": 0.0,
406
+ "coverage_score": 0.0
407
+ }
408
+
409
+ similarities = [i.get('similarity_score', 0) for i in incidents]
410
+
411
+ return {
412
+ "avg_similarity": sum(similarities) / len(similarities),
413
+ "similarity_std": np.std(similarities) if len(similarities) > 1 else 0.0,
414
+ "coverage_score": min(1.0, len(incidents) / 5),
415
+ "diversity_score": random.uniform(0.6, 0.9)
416
+ }
417
 
418
+ def _estimate_savings(self, scenario: Dict[str, Any]) -> int:
419
+ """Estimate cost savings"""
420
+ impact = scenario.get('business_impact', {})
421
+ revenue_loss = impact.get('revenue_loss_per_hour', 5000)
422
+
423
+ # 70-90% savings estimate
424
+ savings_percentage = random.uniform(0.7, 0.9)
425
+ return int(revenue_loss * savings_percentage)
426
+
427
+
428
+ # Global simulator instance
429
+ _simulator = MockARFSimulator()
430
+
431
+ # Public API functions (backward compatibility)
432
+ def simulate_arf_analysis(scenario: Dict[str, Any]) -> Dict[str, Any]:
433
+ return _simulator.simulate_arf_analysis(scenario)
434
+
435
+ def run_rag_similarity_search(scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
436
+ return _simulator.run_rag_similarity_search(scenario)
437
+
438
+ def calculate_pattern_confidence(scenario: Dict[str, Any], similar_incidents: List[Dict[str, Any]]) -> float:
439
+ return _simulator.calculate_pattern_confidence(scenario, similar_incidents)
440
 
441
  def create_mock_healing_intent(scenario: Dict[str, Any],
442
+ similar_incidents: List[Dict[str, Any]],
443
+ confidence: float = 0.85) -> Dict[str, Any]:
444
+ return _simulator.create_mock_healing_intent(scenario, similar_incidents, confidence)