petter2025 commited on
Commit
2b6faa9
·
verified ·
1 Parent(s): 0111257

Update demo/mock_arf.py

Browse files
Files changed (1) hide show
  1. demo/mock_arf.py +354 -207
demo/mock_arf.py CHANGED
@@ -1,223 +1,370 @@
 
1
  """
2
- Configuration management for ARF Demo
3
- Updated for better Pydantic compatibility and fallback handling
4
  """
5
- from typing import Optional, Dict, Any, List
6
- from enum import Enum
7
- import os
8
- import logging
9
 
10
- logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Try to import from pydantic-settings, fallback to pydantic
13
- try:
14
- from pydantic_settings import BaseSettings
15
- from pydantic import Field, field_validator, ConfigDict
16
- PYDANTIC_V2 = True
17
- logger.info("Using pydantic-settings for BaseSettings")
18
- except ImportError:
19
- try:
20
- from pydantic import BaseSettings, Field, validator
21
- PYDANTIC_V2 = False
22
- logger.info("Using pydantic.BaseSettings (older version)")
23
- except ImportError as e:
24
- logger.warning(f"Failed to import pydantic: {e}. Using fallback settings.")
25
- # Create minimal fallback
26
- class BaseSettings:
27
- model_config = {}
28
- def __init__(self, **kwargs):
29
- for k, v in kwargs.items():
30
- setattr(self, k, v)
31
 
32
- class Field:
33
- @staticmethod
34
- def default(value):
35
- return value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- def validator(*args, **kwargs):
38
- def decorator(func):
39
- return func
40
- return decorator
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- def field_validator(*args, **kwargs):
43
- def decorator(func):
44
- return func
45
- return decorator
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- PYDANTIC_V2 = False
48
-
49
-
50
- class ARFMode(str, Enum):
51
- """ARF operation modes"""
52
- DEMO = "demo"
53
- OSS = "oss"
54
- ENTERPRISE = "enterprise"
55
-
56
-
57
- class SafetyMode(str, Enum):
58
- """Safety modes for execution"""
59
- ADVISORY = "advisory"
60
- APPROVAL = "approval"
61
- AUTONOMOUS = "autonomous"
62
-
63
 
64
- class Settings(BaseSettings):
65
  """
66
- Application settings with environment variable support
 
 
 
 
 
 
 
67
  """
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- # ===== System Mode =====
70
- arf_mode: ARFMode = Field(
71
- default=ARFMode.DEMO,
72
- description="ARF operation mode"
73
- )
74
-
75
- use_mock_arf: bool = Field(
76
- default=True,
77
- description="Use mock ARF implementation (for demo mode)"
78
- )
79
-
80
- # ===== ARF Configuration =====
81
- arf_api_key: Optional[str] = Field(
82
- default=None,
83
- description="ARF API key for real integration"
84
- )
85
-
86
- arf_base_url: str = Field(
87
- default="https://api.arf.dev",
88
- description="ARF API base URL"
89
- )
90
-
91
- # ===== Business Configuration =====
92
- engineer_hourly_rate: float = Field(
93
- default=150.0,
94
- description="Engineer hourly rate in USD"
95
- )
96
-
97
- engineer_annual_cost: float = Field(
98
- default=125000.0,
99
- description="Engineer annual cost in USD"
100
- )
101
-
102
- default_savings_rate: float = Field(
103
- default=0.82,
104
- description="Default savings rate with ARF"
105
- )
106
-
107
- # ===== UI Configuration =====
108
- auto_refresh_seconds: int = Field(
109
- default=30,
110
- description="Auto-refresh interval in seconds"
111
- )
112
-
113
- max_history_items: int = Field(
114
- default=100,
115
- description="Maximum history items to display"
116
- )
117
-
118
- # ===== Demo Configuration =====
119
- default_scenario: str = Field(
120
- default="Cache Miss Storm",
121
- description="Default incident scenario"
122
- )
123
-
124
- scenario_config_path: str = Field(
125
- default="config/scenarios",
126
- description="Path to scenario configuration files"
127
- )
128
-
129
- # ===== Safety Configuration =====
130
- default_safety_mode: SafetyMode = Field(
131
- default=SafetyMode.ADVISORY,
132
- description="Default safety mode"
133
- )
134
-
135
- require_approval: bool = Field(
136
- default=True,
137
- description="Require human approval for execution"
138
- )
139
-
140
- # ===== Validation =====
141
- if PYDANTIC_V2:
142
- @field_validator("arf_api_key")
143
- @classmethod
144
- def validate_api_key(cls, v: Optional[str], info) -> Optional[str]:
145
- if info.data.get("arf_mode") == ARFMode.ENTERPRISE and not v:
146
- raise ValueError("ARF API key required for Enterprise mode")
147
- return v
148
 
149
- @field_validator("use_mock_arf")
150
- @classmethod
151
- def validate_mock_mode(cls, v: bool, info) -> bool:
152
- if info.data.get("arf_mode") == ARFMode.DEMO:
153
- return True
154
- return v
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  else:
156
- @validator("arf_api_key")
157
- def validate_api_key(cls, v: Optional[str], values: Dict[str, Any]) -> Optional[str]:
158
- if values.get("arf_mode") == ARFMode.ENTERPRISE and not v:
159
- raise ValueError("ARF API key required for Enterprise mode")
160
- return v
161
-
162
- @validator("use_mock_arf")
163
- def validate_mock_mode(cls, v: bool, values: Dict[str, Any]) -> bool:
164
- if values.get("arf_mode") == ARFMode.DEMO:
165
- return True
166
- return v
167
-
168
- # Pydantic v2 config
169
- if PYDANTIC_V2:
170
- model_config = ConfigDict(
171
- env_file=".env",
172
- env_file_encoding="utf-8",
173
- case_sensitive=False,
174
- use_enum_values=True,
175
- extra="ignore"
176
- )
177
  else:
178
- class Config:
179
- env_file = ".env"
180
- env_file_encoding = "utf-8"
181
- case_sensitive = False
182
- use_enum_values = True
183
-
184
-
185
- # Global settings instance with robust fallback
186
- try:
187
- settings = Settings()
188
- logger.info("Settings loaded successfully")
189
- except Exception as e:
190
- logger.warning(f"Failed to load settings from .env: {e}, using defaults")
191
- # Provide comprehensive defaults
192
- settings = Settings(
193
- arf_mode=ARFMode.DEMO,
194
- use_mock_arf=True,
195
- arf_api_key=None,
196
- arf_base_url="https://api.arf.dev",
197
- engineer_hourly_rate=150.0,
198
- engineer_annual_cost=125000.0,
199
- default_savings_rate=0.82,
200
- auto_refresh_seconds=30,
201
- max_history_items=100,
202
- default_scenario="Cache Miss Storm",
203
- scenario_config_path="config/scenarios",
204
- default_safety_mode=SafetyMode.ADVISORY,
205
- require_approval=True
206
- )
207
-
208
-
209
- def get_settings() -> Settings:
210
- """Get settings instance (singleton pattern)"""
211
- return settings
212
-
213
-
214
- def print_settings_summary() -> None:
215
- """Print a summary of current settings (for debugging)"""
216
- summary = {
217
- "mode": settings.arf_mode.value,
218
- "mock_mode": settings.use_mock_arf,
219
- "default_scenario": settings.default_scenario,
220
- "safety_mode": settings.default_safety_mode.value,
221
- "requires_approval": settings.require_approval
222
  }
223
- logger.info(f"Settings summary: {summary}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # demo/mock_arf.py
2
  """
3
+ Enhanced Mock ARF with scenario-aware metrics
4
+ Generates different values based on scenario characteristics
5
  """
6
+ import random
7
+ import time
8
+ from typing import Dict, Any, List
9
+ import json
10
 
11
+ # Scenario-specific configurations
12
+ SCENARIO_CONFIGS = {
13
+ "Cache Miss Storm": {
14
+ "detection_confidence_range": (0.97, 0.995), # 97-99.5%
15
+ "detection_time_range": (35, 55), # 35-55 seconds
16
+ "accuracy_range": (0.97, 0.995), # 97-99.5%
17
+ "similar_incidents_range": (2, 5), # 2-5 similar incidents
18
+ "similarity_score_range": (0.88, 0.96), # 88-96% similarity
19
+ "pattern_confidence_range": (0.91, 0.97), # 91-97% confidence
20
+ "success_rate_range": (0.82, 0.93), # 82-93% success rate
21
+ "cost_savings_range": (5000, 9000), # $5K-$9K savings
22
+ "resolution_time_range": (10, 18), # 10-18 minutes
23
+ "affected_users_range": (30000, 60000), # 30K-60K users
24
+ "tags": ["cache", "redis", "latency", "memory"]
25
+ },
26
+ "Database Connection Pool Exhaustion": {
27
+ "detection_confidence_range": (0.92, 0.98),
28
+ "detection_time_range": (40, 65),
29
+ "accuracy_range": (0.95, 0.985),
30
+ "similar_incidents_range": (1, 4),
31
+ "similarity_score_range": (0.85, 0.94),
32
+ "pattern_confidence_range": (0.88, 0.95),
33
+ "success_rate_range": (0.78, 0.88),
34
+ "cost_savings_range": (3500, 5500),
35
+ "resolution_time_range": (15, 25),
36
+ "affected_users_range": (15000, 30000),
37
+ "tags": ["database", "postgres", "connections", "pool"]
38
+ },
39
+ "Kubernetes Memory Leak": {
40
+ "detection_confidence_range": (0.94, 0.99),
41
+ "detection_time_range": (30, 50),
42
+ "accuracy_range": (0.96, 0.99),
43
+ "similar_incidents_range": (3, 6),
44
+ "similarity_score_range": (0.89, 0.95),
45
+ "pattern_confidence_range": (0.90, 0.96),
46
+ "success_rate_range": (0.85, 0.92),
47
+ "cost_savings_range": (4500, 7500),
48
+ "resolution_time_range": (12, 22),
49
+ "affected_users_range": (20000, 40000),
50
+ "tags": ["kubernetes", "memory", "container", "leak"]
51
+ },
52
+ "API Rate Limit Storm": {
53
+ "detection_confidence_range": (0.96, 0.99),
54
+ "detection_time_range": (25, 45),
55
+ "accuracy_range": (0.97, 0.99),
56
+ "similar_incidents_range": (2, 4),
57
+ "similarity_score_range": (0.87, 0.93),
58
+ "pattern_confidence_range": (0.89, 0.94),
59
+ "success_rate_range": (0.80, 0.90),
60
+ "cost_savings_range": (3000, 5000),
61
+ "resolution_time_range": (8, 15),
62
+ "affected_users_range": (10000, 25000),
63
+ "tags": ["api", "rate_limit", "throttling", "ddos"]
64
+ },
65
+ "Network Partition": {
66
+ "detection_confidence_range": (0.98, 0.999),
67
+ "detection_time_range": (20, 40),
68
+ "accuracy_range": (0.98, 0.995),
69
+ "similar_incidents_range": (1, 3),
70
+ "similarity_score_range": (0.90, 0.97),
71
+ "pattern_confidence_range": (0.93, 0.98),
72
+ "success_rate_range": (0.75, 0.85),
73
+ "cost_savings_range": (8000, 15000),
74
+ "resolution_time_range": (20, 35),
75
+ "affected_users_range": (50000, 100000),
76
+ "tags": ["network", "partition", "connectivity", "failure"]
77
+ },
78
+ "Storage I/O Saturation": {
79
+ "detection_confidence_range": (0.93, 0.98),
80
+ "detection_time_range": (45, 70),
81
+ "accuracy_range": (0.94, 0.98),
82
+ "similar_incidents_range": (2, 5),
83
+ "similarity_score_range": (0.86, 0.92),
84
+ "pattern_confidence_range": (0.87, 0.93),
85
+ "success_rate_range": (0.79, 0.87),
86
+ "cost_savings_range": (5500, 8500),
87
+ "resolution_time_range": (18, 28),
88
+ "affected_users_range": (25000, 45000),
89
+ "tags": ["storage", "disk", "io", "saturation"]
90
+ }
91
+ }
92
+
93
+ def get_scenario_config(scenario_name: str) -> Dict[str, Any]:
94
+ """Get configuration for a specific scenario with defaults"""
95
+ return SCENARIO_CONFIGS.get(scenario_name, {
96
+ "detection_confidence_range": (0.90, 0.98),
97
+ "detection_time_range": (30, 60),
98
+ "accuracy_range": (0.92, 0.98),
99
+ "similar_incidents_range": (1, 3),
100
+ "similarity_score_range": (0.85, 0.95),
101
+ "pattern_confidence_range": (0.85, 0.95),
102
+ "success_rate_range": (0.75, 0.90),
103
+ "cost_savings_range": (4000, 8000),
104
+ "resolution_time_range": (15, 30),
105
+ "affected_users_range": (20000, 50000),
106
+ "tags": ["unknown", "incident"]
107
+ })
108
 
109
+ def simulate_arf_analysis(scenario_data: Dict[str, Any]) -> Dict[str, Any]:
110
+ """
111
+ Simulate ARF analysis with scenario-specific metrics
112
+
113
+ Args:
114
+ scenario_data: Dictionary containing scenario information
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
+ Returns:
117
+ Dictionary with analysis results
118
+ """
119
+ scenario_name = scenario_data.get("name", "Unknown Scenario")
120
+ config = get_scenario_config(scenario_name)
121
+
122
+ # Generate scenario-specific values
123
+ detection_confidence = random.uniform(*config["detection_confidence_range"])
124
+ detection_time = random.randint(*config["detection_time_range"])
125
+ accuracy = random.uniform(*config["accuracy_range"])
126
+
127
+ return {
128
+ "analysis_complete": True,
129
+ "anomaly_detected": True,
130
+ "severity": scenario_data.get("severity", "HIGH"),
131
+ "confidence": round(detection_confidence, 3), # Round to 3 decimals
132
+ "detection_time_ms": detection_time * 1000, # Convert to ms for display
133
+ "detection_time_seconds": detection_time,
134
+ "accuracy": round(accuracy, 3),
135
+ "component": scenario_data.get("component", "unknown"),
136
+ "scenario_specific": True,
137
+ "scenario_name": scenario_name,
138
+ "tags": config["tags"]
139
+ }
140
+
141
+ def run_rag_similarity_search(scenario_data: Dict[str, Any]) -> List[Dict[str, Any]]:
142
+ """
143
+ Simulate RAG similarity search with scenario-specific results
144
+
145
+ Args:
146
+ scenario_data: Dictionary containing scenario information
147
 
148
+ Returns:
149
+ List of similar incidents
150
+ """
151
+ scenario_name = scenario_data.get("name", "Unknown Scenario")
152
+ config = get_scenario_config(scenario_name)
153
+
154
+ similar_count = random.randint(*config["similar_incidents_range"])
155
+ similar_incidents = []
156
+
157
+ # Generate similar incidents based on scenario
158
+ base_time = int(time.time())
159
+
160
+ for i in range(similar_count):
161
+ similarity_score = random.uniform(*config["similarity_score_range"])
162
+ cost_savings = random.randint(*config["cost_savings_range"])
163
+ resolution_time = random.randint(*config["resolution_time_range"])
164
+ affected_users = random.randint(*config["affected_users_range"])
165
 
166
+ # Different resolutions based on scenario type
167
+ if "cache" in scenario_name.lower() or "redis" in scenario_name.lower():
168
+ resolution = random.choice(["scale_out", "warm_cache", "memory_increase", "add_replicas"])
169
+ elif "database" in scenario_name.lower():
170
+ resolution = random.choice(["restart", "connection_pool_resize", "index_optimization", "vacuum"])
171
+ elif "kubernetes" in scenario_name.lower():
172
+ resolution = random.choice(["restart_pod", "memory_limit_increase", "node_drain", "resource_quota"])
173
+ elif "api" in scenario_name.lower():
174
+ resolution = random.choice(["circuit_breaker", "rate_limit_increase", "caching", "load_balancer"])
175
+ elif "network" in scenario_name.lower():
176
+ resolution = random.choice(["route_update", "failover", "bandwidth_increase", "redundancy"])
177
+ elif "storage" in scenario_name.lower():
178
+ resolution = random.choice(["io_optimization", "disk_upgrade", "cache_addition", "load_distribution"])
179
+ else:
180
+ resolution = random.choice(["investigate", "scale", "restart", "optimize"])
181
 
182
+ similar_incidents.append({
183
+ "incident_id": f"inc_{base_time - random.randint(1, 90)}_00{i}",
184
+ "similarity_score": round(similarity_score, 3),
185
+ "success": random.random() > 0.15, # 85% success rate
186
+ "resolution": resolution,
187
+ "cost_savings": cost_savings,
188
+ "detection_time": f"{random.randint(30, 60)}s",
189
+ "resolution_time": f"{resolution_time}m",
190
+ "pattern": f"{scenario_name.lower().replace(' ', '_')}_v{random.randint(1, 3)}",
191
+ "affected_users": affected_users,
192
+ "component_match": scenario_data.get("component", "unknown"),
193
+ "rag_source": "production_memory_v3",
194
+ "timestamp": f"2024-{random.randint(1, 12):02d}-{random.randint(1, 28):02d}"
195
+ })
196
+
197
+ return similar_incidents
198
 
199
+ def calculate_pattern_confidence(scenario_data: Dict[str, Any], similar_incidents: List[Dict[str, Any]]) -> float:
200
  """
201
+ Calculate pattern confidence based on similar incidents
202
+
203
+ Args:
204
+ scenario_data: Dictionary containing scenario information
205
+ similar_incidents: List of similar incidents from RAG search
206
+
207
+ Returns:
208
+ Pattern confidence score (0-1)
209
  """
210
+ scenario_name = scenario_data.get("name", "Unknown Scenario")
211
+ config = get_scenario_config(scenario_name)
212
+
213
+ if not similar_incidents:
214
+ return random.uniform(*config["pattern_confidence_range"])
215
+
216
+ # Calculate average similarity and success rate
217
+ similarity_scores = [inc["similarity_score"] for inc in similar_incidents]
218
+ success_rates = [1.0 if inc["success"] else 0.0 for inc in similar_incidents]
219
+
220
+ avg_similarity = sum(similarity_scores) / len(similarity_scores)
221
+ avg_success = sum(success_rates) / len(success_rates)
222
 
223
+ # Weighted average: 60% similarity, 40% success rate
224
+ confidence = (avg_similarity * 0.6) + (avg_success * 0.4)
225
+
226
+ # Add some randomness but keep within scenario range
227
+ min_conf, max_conf = config["pattern_confidence_range"]
228
+ confidence = max(min_conf, min(max_conf, confidence))
229
+
230
+ return round(confidence, 3)
231
+
232
+ def create_mock_healing_intent(scenario_data: Dict[str, Any], similar_incidents: List[Dict[str, Any]], confidence: float) -> Dict[str, Any]:
233
+ """
234
+ Create mock healing intent based on scenario and similar incidents
235
+
236
+ Args:
237
+ scenario_data: Dictionary containing scenario information
238
+ similar_incidents: List of similar incidents from RAG search
239
+ confidence: Pattern confidence score
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
 
241
+ Returns:
242
+ Healing intent dictionary
243
+ """
244
+ scenario_name = scenario_data.get("name", "Unknown Scenario")
245
+ config = get_scenario_config(scenario_name)
246
+
247
+ component = scenario_data.get("component", "unknown")
248
+
249
+ # Determine action based on component and scenario
250
+ if "cache" in component.lower() or "redis" in component.lower():
251
+ action = "scale_out"
252
+ parameters = {
253
+ "nodes": f"{random.randint(2, 4)}→{random.randint(5, 8)}",
254
+ "memory": f"{random.randint(8, 16)}GB→{random.randint(24, 64)}GB",
255
+ "strategy": "gradual_scale",
256
+ "region": "auto-select"
257
+ }
258
+ elif "database" in component.lower():
259
+ action = "restart"
260
+ parameters = {
261
+ "connections": f"{random.randint(50, 100)}→{random.randint(150, 300)}",
262
+ "timeout": f"{random.randint(30, 60)}s",
263
+ "strategy": "rolling_restart",
264
+ "maintenance_window": "immediate"
265
+ }
266
+ elif "kubernetes" in component.lower():
267
+ action = "memory_limit_increase"
268
+ parameters = {
269
+ "memory": f"{random.randint(512, 1024)}Mi→{random.randint(2048, 4096)}Mi",
270
+ "strategy": "pod_restart",
271
+ "drain_timeout": f"{random.randint(5, 15)}m"
272
+ }
273
+ elif "api" in component.lower():
274
+ action = "circuit_breaker"
275
+ parameters = {
276
+ "threshold": f"{random.randint(70, 85)}%",
277
+ "window": f"{random.randint(3, 10)}m",
278
+ "fallback": "cached_response",
279
+ "retry_after": f"{random.randint(30, 120)}s"
280
+ }
281
+ elif "network" in component.lower():
282
+ action = "failover"
283
+ parameters = {
284
+ "primary": "us-east-1",
285
+ "secondary": "us-west-2",
286
+ "timeout": f"{random.randint(10, 30)}s",
287
+ "health_check": "enhanced"
288
+ }
289
+ elif "storage" in component.lower():
290
+ action = "io_optimization"
291
+ parameters = {
292
+ "iops": f"{random.randint(1000, 3000)}→{random.randint(5000, 10000)}",
293
+ "throughput": f"{random.randint(100, 250)}MB/s→{random.randint(500, 1000)}MB/s",
294
+ "cache_size": f"{random.randint(8, 16)}GB→{random.randint(32, 64)}GB"
295
+ }
296
  else:
297
+ action = "investigate"
298
+ parameters = {
299
+ "priority": "high",
300
+ "escalation": "tier2",
301
+ "timeout": "30m"
302
+ }
303
+
304
+ # Calculate success rate from similar incidents
305
+ if similar_incidents:
306
+ success_count = sum(1 for inc in similar_incidents if inc["success"])
307
+ success_rate = success_count / len(similar_incidents)
 
 
 
 
 
 
 
 
 
 
308
  else:
309
+ success_rate = random.uniform(*config["success_rate_range"])
310
+
311
+ # Calculate estimated impact
312
+ if similar_incidents:
313
+ avg_cost_savings = sum(inc["cost_savings"] for inc in similar_incidents) / len(similar_incidents)
314
+ avg_resolution_time = sum(int(inc["resolution_time"].replace('m', '')) for inc in similar_incidents) / len(similar_incidents)
315
+ else:
316
+ avg_cost_savings = sum(config["cost_savings_range"]) / 2
317
+ avg_resolution_time = sum(config["resolution_time_range"]) / 2
318
+
319
+ return {
320
+ "action": action,
321
+ "component": component,
322
+ "confidence": round(confidence, 3),
323
+ "parameters": parameters,
324
+ "source": "mock_analysis",
325
+ "requires_enterprise": True,
326
+ "advisory_only": True,
327
+ "success_rate": round(success_rate, 3),
328
+ "estimated_impact": {
329
+ "cost_savings": int(avg_cost_savings),
330
+ "resolution_time_minutes": int(avg_resolution_time),
331
+ "users_protected": random.randint(*config["affected_users_range"]),
332
+ "mttr_reduction": f"{random.randint(60, 80)}%"
333
+ },
334
+ "safety_checks": {
335
+ "blast_radius": f"{random.randint(1, 3)} services",
336
+ "business_hours": "compliant",
337
+ "rollback_plan": "available",
338
+ "approval_required": True,
339
+ "risk_level": "medium" if confidence < 0.9 else "low"
340
+ },
341
+ "scenario_specific": True,
342
+ "scenario_name": scenario_name
 
 
 
 
 
 
 
 
 
 
343
  }
344
+
345
+ def get_scenario_metrics(scenario_name: str) -> Dict[str, Any]:
346
+ """
347
+ Get dynamic metrics for a specific scenario
348
+
349
+ Args:
350
+ scenario_name: Name of the scenario
351
+
352
+ Returns:
353
+ Dictionary with scenario-specific metrics
354
+ """
355
+ config = get_scenario_config(scenario_name)
356
+
357
+ # Generate dynamic values within ranges
358
+ return {
359
+ "detection_confidence": round(random.uniform(*config["detection_confidence_range"]), 3),
360
+ "detection_time_seconds": random.randint(*config["detection_time_range"]),
361
+ "accuracy": round(random.uniform(*config["accuracy_range"]), 3),
362
+ "expected_similar_incidents": random.randint(*config["similar_incidents_range"]),
363
+ "avg_similarity_score": round(random.uniform(*config["similarity_score_range"]), 3),
364
+ "pattern_confidence": round(random.uniform(*config["pattern_confidence_range"]), 3),
365
+ "success_rate": round(random.uniform(*config["success_rate_range"]), 3),
366
+ "cost_savings_range": config["cost_savings_range"],
367
+ "resolution_time_range": config["resolution_time_range"],
368
+ "affected_users_range": config["affected_users_range"],
369
+ "tags": config["tags"]
370
+ }