petter2025 commited on
Commit
0b88148
·
verified ·
1 Parent(s): bd96870

Update demo/mock_arf.py

Browse files
Files changed (1) hide show
  1. demo/mock_arf.py +209 -430
demo/mock_arf.py CHANGED
@@ -1,444 +1,223 @@
1
  """
2
- Enhanced Mock ARF components for demo purposes
3
- In production, these would use the real agentic-reliability-framework package
4
  """
5
- import time
6
- import json
7
- import hashlib
8
- from typing import Dict, Any, List, Optional
9
- import random
10
  import logging
11
- from datetime import datetime, timedelta
12
 
13
  logger = logging.getLogger(__name__)
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- class MockARFSimulator:
17
- """Enhanced mock ARF simulator with realistic patterns"""
18
-
19
- def __init__(self, seed: Optional[int] = None):
20
- self.seed = seed or int(time.time())
21
- random.seed(self.seed)
22
- self._incident_patterns = self._initialize_patterns()
23
- self._healing_actions = self._initialize_healing_actions()
24
-
25
- def _initialize_patterns(self) -> Dict[str, Dict[str, Any]]:
26
- """Initialize realistic incident patterns"""
27
- return {
28
- "cache_miss_storm": {
29
- "pattern": "exponential_miss_increase",
30
- "indicators": ["cache_hit_rate < 30%", "database_load > 80%", "response_time > 1500ms"],
31
- "typical_causes": ["key_eviction", "cold_cache", "traffic_spike"],
32
- "resolution_patterns": ["scale_out", "cache_warming", "ttl_optimization"]
33
- },
34
- "db_connection_exhaustion": {
35
- "pattern": "connection_pool_saturation",
36
- "indicators": ["active_connections > 95%", "connection_wait > 30s", "query_timeout_rate > 10%"],
37
- "typical_causes": ["connection_leak", "slow_queries", "connection_pool_misconfig"],
38
- "resolution_patterns": ["pool_tuning", "query_optimization", "circuit_breaker"]
39
- },
40
- "memory_leak": {
41
- "pattern": "gradual_memory_increase",
42
- "indicators": ["memory_usage > 90%", "gc_frequency_high", "restart_count_increasing"],
43
- "typical_causes": ["object_retention", "resource_leak", "cache_growth"],
44
- "resolution_patterns": ["heap_analysis", "restart", "memory_limit"]
45
- },
46
- "api_rate_limit": {
47
- "pattern": "rate_limit_cascade",
48
- "indicators": ["429_rate > 40%", "retry_storm", "cascade_failures"],
49
- "typical_causes": ["burst_traffic", "misconfigured_limits", "retry_logic"],
50
- "resolution_patterns": ["backoff_strategy", "circuit_breaker", "cache_responses"]
51
- }
52
- }
53
-
54
- def _initialize_healing_actions(self) -> Dict[str, Dict[str, Any]]:
55
- """Initialize healing actions with success rates"""
56
- return {
57
- "scale_out": {
58
- "action": "increase_capacity",
59
- "success_rate": 0.87,
60
- "typical_recovery_time": "5-15 minutes",
61
- "risk_level": "low",
62
- "prerequisites": ["capacity_available", "auto_scaling_enabled"]
63
- },
64
- "cache_warming": {
65
- "action": "preload_cache",
66
- "success_rate": 0.72,
67
- "typical_recovery_time": "2-10 minutes",
68
- "risk_level": "very_low",
69
- "prerequisites": ["predictive_model", "cache_pattern_known"]
70
- },
71
- "restart_container": {
72
- "action": "graceful_restart",
73
- "success_rate": 0.95,
74
- "typical_recovery_time": "1-3 minutes",
75
- "risk_level": "medium",
76
- "prerequisites": ["health_checks", "load_balancer", "redundancy"]
77
- },
78
- "circuit_breaker": {
79
- "action": "fail_fast_protection",
80
- "success_rate": 0.89,
81
- "typical_recovery_time": "instant",
82
- "risk_level": "low",
83
- "prerequisites": ["dependency_awareness", "fallback_strategy"]
84
- }
85
- }
86
-
87
- def simulate_arf_analysis(self, scenario: Dict[str, Any]) -> Dict[str, Any]:
88
- """Simulate ARF analysis pipeline with enhanced realism"""
89
- component = scenario.get('component', 'unknown')
90
- pattern_name = self._detect_pattern(component, scenario)
91
-
92
- return {
93
- "analysis_complete": True,
94
- "anomaly_detected": True,
95
- "severity": self._determine_severity(scenario),
96
- "root_cause": scenario.get('root_cause', 'resource_constraint'),
97
- "pattern_detected": True,
98
- "pattern_name": pattern_name,
99
- "pattern_confidence": self._calculate_pattern_confidence(pattern_name),
100
- "detection_method": "ensemble_ml_model",
101
- "detection_time_ms": random.randint(150, 350),
102
- "analysis_timestamp": time.time(),
103
- "processing_time_ms": random.randint(200, 500),
104
- "model_version": "arf-ml-v3.3.6",
105
- "features_analyzed": self._extract_features(scenario)
106
- }
107
-
108
- def run_rag_similarity_search(self, scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
109
- """Simulate RAG similarity search with realistic data"""
110
- component = scenario.get('component', 'redis_cache')
111
- pattern_name = self._detect_pattern(component, scenario)
112
-
113
- # Generate realistic similar incidents
114
- similar_incidents = []
115
- base_time = time.time()
116
-
117
- for i in range(random.randint(3, 5)):
118
- days_ago = random.randint(1, 90)
119
- incident_time = base_time - (days_ago * 86400)
120
-
121
- similarity = random.uniform(0.75, 0.95)
122
- success = similarity > 0.82
123
-
124
- incident = {
125
- "incident_id": f"inc_{int(incident_time)}_{i}",
126
- "component": component,
127
- "pattern": pattern_name,
128
- "similarity_score": similarity,
129
- "cosine_similarity": similarity,
130
- "success": success,
131
- "resolution": self._get_recommended_action(component),
132
- "actions_taken": self._get_action_sequence(component, success),
133
- "resolution_time_minutes": random.uniform(3.5, 18.5),
134
- "timestamp": incident_time,
135
- "occurred_at": datetime.fromtimestamp(incident_time).isoformat(),
136
- "engineers_involved": random.randint(1, 3),
137
- "blast_radius": f"{random.randint(1, 5)} services",
138
- "root_cause_analysis": self._generate_root_cause(component)
139
- }
140
-
141
- if success:
142
- cost_saved = random.randint(1500, 12500)
143
- incident["cost_savings"] = cost_saved
144
- incident["mttr_reduction"] = f"{random.randint(60, 85)}%"
145
- incident["user_impact"] = f"{random.randint(85, 99)}% reduction"
146
-
147
- similar_incidents.append(incident)
148
-
149
- # Sort by similarity
150
- similar_incidents.sort(key=lambda x: x['similarity_score'], reverse=True)
151
-
152
- # Add RAG metadata
153
- rag_metadata = {
154
- "vector_db": "chroma_v0.4.0",
155
- "embedding_model": "all-MiniLM-L6-v2",
156
- "index_size": f"{random.randint(500, 5000)} incidents",
157
- "retrieval_time_ms": random.randint(45, 120),
158
- "top_k": len(similar_incidents)
159
- }
160
-
161
- for incident in similar_incidents:
162
- incident["rag_metadata"] = rag_metadata
163
-
164
- return similar_incidents
165
-
166
- def calculate_pattern_confidence(self, scenario: Dict[str, Any],
167
- similar_incidents: List[Dict[str, Any]]) -> float:
168
- """Calculate pattern detection confidence with enhanced logic"""
169
- if not similar_incidents:
170
- return 0.70 # Base confidence without similar incidents
171
-
172
- # Base confidence from pattern matching
173
- component = scenario.get('component', 'unknown')
174
- pattern_name = self._detect_pattern(component, scenario)
175
- base_confidence = self._calculate_pattern_confidence(pattern_name)
176
-
177
- # Boost based on number of similar incidents
178
- incident_count = len(similar_incidents)
179
- incident_boost = min(0.15, incident_count * 0.025)
180
-
181
- # Boost based on average similarity
182
- avg_similarity = sum(i['similarity_score'] for i in similar_incidents) / incident_count
183
- similarity_boost = avg_similarity * 0.12
184
-
185
- # Boost based on success rate
186
- success_count = sum(1 for i in similar_incidents if i['success'])
187
- success_rate = success_count / incident_count
188
- success_boost = success_rate * 0.10
189
-
190
- # Boost based on recency (weight recent incidents more)
191
- recency_boost = self._calculate_recency_boost(similar_incidents)
192
-
193
- total_confidence = (
194
- base_confidence +
195
- incident_boost +
196
- similarity_boost +
197
- success_boost +
198
- recency_boost
199
  )
200
-
201
- # Cap at 0.98 and ensure minimum
202
- return max(0.70, min(0.98, total_confidence))
203
-
204
- def create_mock_healing_intent(self, scenario: Dict[str, Any],
205
- similar_incidents: List[Dict[str, Any]],
206
- confidence: float = 0.85) -> Dict[str, Any]:
207
- """Create a realistic mock HealingIntent object"""
208
-
209
- component = scenario.get('component', 'redis_cache')
210
- pattern_name = self._detect_pattern(component, scenario)
211
-
212
- # Determine action based on component and pattern
213
- action_info = self._determine_healing_action(component, pattern_name)
214
-
215
- # Generate deterministic ID
216
- params_hash = hashlib.md5(
217
- json.dumps(action_info['parameters'], sort_keys=True).encode()
218
- ).hexdigest()[:8]
219
-
220
- # Calculate RAG similarity metrics
221
- rag_metrics = self._calculate_rag_metrics(similar_incidents)
222
-
223
- # Create healing intent
224
- healing_intent = {
225
- "action": action_info['action'],
226
- "component": component,
227
- "pattern": pattern_name,
228
- "parameters": action_info['parameters'],
229
- "justification": action_info['justification'],
230
- "confidence": confidence,
231
- "incident_id": f"inc_{int(time.time())}",
232
- "detected_at": time.time(),
233
- "similar_incidents_count": len(similar_incidents),
234
- "rag_similarity_score": rag_metrics['avg_similarity'],
235
- "rag_metrics": rag_metrics,
236
- "source": "oss_analysis",
237
- "intent_id": f"intent_{int(time.time())}_{params_hash}",
238
- "created_at": time.time(),
239
- "status": "created",
240
- "edition": "community",
241
- "requires_enterprise": True,
242
- "execution_allowed": False,
243
- "safety_checks": {
244
- "blast_radius": f"{random.randint(1, 3)} services",
245
- "business_hours": "compliant",
246
- "rollback_plan": "available",
247
- "approval_required": True,
248
- "risk_assessment": "low",
249
- "compliance_check": "passed"
250
- },
251
- "expected_outcome": {
252
- "recovery_time_minutes": action_info['recovery_time'],
253
- "success_probability": action_info['success_rate'],
254
- "cost_savings_estimate": self._estimate_savings(scenario),
255
- "user_impact_reduction": f"{random.randint(85, 99)}%"
256
- },
257
- "deterministic_id": f"intent_{params_hash}"
258
- }
259
-
260
- return healing_intent
261
-
262
- # Helper methods
263
- def _detect_pattern(self, component: str, scenario: Dict[str, Any]) -> str:
264
- """Detect incident pattern based on component"""
265
- if 'cache' in component.lower():
266
- return "cache_miss_storm"
267
- elif 'database' in component.lower() or 'postgres' in component.lower():
268
- return "db_connection_exhaustion"
269
- elif 'memory' in component.lower() or 'java' in component.lower():
270
- return "memory_leak"
271
- elif 'api' in component.lower() or 'rate' in component.lower():
272
- return "api_rate_limit"
273
- else:
274
- return "unknown_pattern"
275
-
276
- def _determine_severity(self, scenario: Dict[str, Any]) -> str:
277
- """Determine incident severity"""
278
- metrics = scenario.get('metrics', {})
279
-
280
- if 'error_rate' in metrics and metrics['error_rate'] > 30:
281
- return "critical"
282
- elif 'response_time_ms' in metrics and metrics['response_time_ms'] > 2000:
283
- return "critical"
284
- elif 'memory_usage' in metrics and metrics['memory_usage'] > 90:
285
- return "high"
286
- else:
287
- return random.choice(["high", "medium"])
288
-
289
- def _calculate_pattern_confidence(self, pattern_name: str) -> float:
290
- """Calculate confidence for specific pattern"""
291
- confidence_map = {
292
- "cache_miss_storm": 0.92,
293
- "db_connection_exhaustion": 0.88,
294
- "memory_leak": 0.85,
295
- "api_rate_limit": 0.90,
296
- "unknown_pattern": 0.70
297
- }
298
- return confidence_map.get(pattern_name, 0.75)
299
-
300
- def _extract_features(self, scenario: Dict[str, Any]) -> List[str]:
301
- """Extract features for ML analysis"""
302
- features = []
303
- metrics = scenario.get('metrics', {})
304
-
305
- for key, value in metrics.items():
306
- if isinstance(value, (int, float)):
307
- features.append(f"{key}:{value}")
308
-
309
- # Add derived features
310
- if 'cache_hit_rate' in metrics and metrics['cache_hit_rate'] < 30:
311
- features.append("cache_miss_critical")
312
- if 'error_rate' in metrics and metrics['error_rate'] > 10:
313
- features.append("error_rate_high")
314
-
315
- return features[:10] # Limit to 10 features
316
-
317
- def _get_recommended_action(self, component: str) -> str:
318
- """Get recommended healing action"""
319
- if 'cache' in component.lower():
320
- return 'scale_out'
321
- elif 'database' in component.lower():
322
- return 'optimize_connections'
323
- elif 'memory' in component.lower():
324
- return 'restart_container'
325
- else:
326
- return 'circuit_breaker'
327
-
328
- def _get_action_sequence(self, component: str, success: bool) -> List[str]:
329
- """Get sequence of actions taken"""
330
- base_actions = []
331
-
332
- if 'cache' in component.lower():
333
- base_actions = ["scale_out", "adjust_cache_ttl", "implement_warming"]
334
- elif 'database' in component.lower():
335
- base_actions = ["increase_pool_size", "add_timeout", "optimize_queries"]
336
-
337
- if success and random.random() > 0.5:
338
- base_actions.append("add_monitoring")
339
-
340
- return base_actions
341
-
342
- def _generate_root_cause(self, component: str) -> str:
343
- """Generate realistic root cause"""
344
- causes = {
345
- 'cache': ["key_eviction_policy", "cold_cache_after_deploy", "traffic_spike_2x"],
346
- 'database': ["connection_leak_in_pool", "slow_query_cascade", "max_connections_limit"],
347
- 'memory': ["object_retention_in_cache", "thread_local_leak", "off_heap_memory_growth"]
348
- }
349
-
350
- for key, cause_list in causes.items():
351
- if key in component.lower():
352
- return random.choice(cause_list)
353
-
354
- return "resource_constraint_under_load"
355
-
356
- def _calculate_recency_boost(self, incidents: List[Dict[str, Any]]) -> float:
357
- """Calculate boost based on incident recency"""
358
- if not incidents:
359
- return 0.0
360
-
361
- now = time.time()
362
- recent_count = 0
363
-
364
- for incident in incidents:
365
- incident_time = incident.get('timestamp', now)
366
- days_ago = (now - incident_time) / 86400
367
-
368
- if days_ago < 7: # Within last week
369
- recent_count += 1
370
-
371
- return min(0.08, recent_count * 0.02)
372
-
373
- def _determine_healing_action(self, component: str, pattern: str) -> Dict[str, Any]:
374
- """Determine healing action with parameters"""
375
- if 'cache' in component.lower():
376
- return {
377
- "action": 'scale_out',
378
- "parameters": {'scale_factor': random.choice([2, 3]), 'cache_ttl': 300},
379
- "justification": "Scale Redis cluster and adjust cache TTL based on historical pattern",
380
- "success_rate": 0.87,
381
- "recovery_time": "5-15 minutes"
382
- }
383
- elif 'database' in component.lower():
384
- return {
385
- "action": 'optimize_connections',
386
- "parameters": {'max_connections': 200, 'connection_timeout': 30},
387
- "justification": "Optimize database connection pool settings based on load patterns",
388
- "success_rate": 0.82,
389
- "recovery_time": "2-8 minutes"
390
- }
391
- else:
392
- return {
393
- "action": 'restart_container',
394
- "parameters": {'grace_period': 30, 'drain_connections': True},
395
- "justification": "Restart container to resolve memory issues with graceful shutdown",
396
- "success_rate": 0.95,
397
- "recovery_time": "1-3 minutes"
398
- }
399
-
400
- def _calculate_rag_metrics(self, incidents: List[Dict[str, Any]]) -> Dict[str, Any]:
401
- """Calculate RAG metrics"""
402
- if not incidents:
403
- return {
404
- "avg_similarity": 0.0,
405
- "similarity_std": 0.0,
406
- "coverage_score": 0.0
407
- }
408
-
409
- similarities = [i.get('similarity_score', 0) for i in incidents]
410
-
411
- return {
412
- "avg_similarity": sum(similarities) / len(similarities),
413
- "similarity_std": np.std(similarities) if len(similarities) > 1 else 0.0,
414
- "coverage_score": min(1.0, len(incidents) / 5),
415
- "diversity_score": random.uniform(0.6, 0.9)
416
- }
417
-
418
- def _estimate_savings(self, scenario: Dict[str, Any]) -> int:
419
- """Estimate cost savings"""
420
- impact = scenario.get('business_impact', {})
421
- revenue_loss = impact.get('revenue_loss_per_hour', 5000)
422
-
423
- # 70-90% savings estimate
424
- savings_percentage = random.uniform(0.7, 0.9)
425
- return int(revenue_loss * savings_percentage)
426
 
427
 
428
- # Global simulator instance
429
- _simulator = MockARFSimulator()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
 
431
- # Public API functions (backward compatibility)
432
- def simulate_arf_analysis(scenario: Dict[str, Any]) -> Dict[str, Any]:
433
- return _simulator.simulate_arf_analysis(scenario)
434
 
435
- def run_rag_similarity_search(scenario: Dict[str, Any]) -> List[Dict[str, Any]]:
436
- return _simulator.run_rag_similarity_search(scenario)
 
437
 
438
- def calculate_pattern_confidence(scenario: Dict[str, Any], similar_incidents: List[Dict[str, Any]]) -> float:
439
- return _simulator.calculate_pattern_confidence(scenario, similar_incidents)
440
 
441
- def create_mock_healing_intent(scenario: Dict[str, Any],
442
- similar_incidents: List[Dict[str, Any]],
443
- confidence: float = 0.85) -> Dict[str, Any]:
444
- return _simulator.create_mock_healing_intent(scenario, similar_incidents, confidence)
 
 
 
 
 
 
 
1
  """
2
+ Configuration management for ARF Demo
3
+ Updated for better Pydantic compatibility and fallback handling
4
  """
5
+ from typing import Optional, Dict, Any, List
6
+ from enum import Enum
7
+ import os
 
 
8
  import logging
 
9
 
10
  logger = logging.getLogger(__name__)
11
 
12
+ # Try to import from pydantic-settings, fallback to pydantic
13
+ try:
14
+ from pydantic_settings import BaseSettings
15
+ from pydantic import Field, field_validator, ConfigDict
16
+ PYDANTIC_V2 = True
17
+ logger.info("Using pydantic-settings for BaseSettings")
18
+ except ImportError:
19
+ try:
20
+ from pydantic import BaseSettings, Field, validator
21
+ PYDANTIC_V2 = False
22
+ logger.info("Using pydantic.BaseSettings (older version)")
23
+ except ImportError as e:
24
+ logger.warning(f"Failed to import pydantic: {e}. Using fallback settings.")
25
+ # Create minimal fallback
26
+ class BaseSettings:
27
+ model_config = {}
28
+ def __init__(self, **kwargs):
29
+ for k, v in kwargs.items():
30
+ setattr(self, k, v)
31
+
32
+ class Field:
33
+ @staticmethod
34
+ def default(value):
35
+ return value
36
+
37
+ def validator(*args, **kwargs):
38
+ def decorator(func):
39
+ return func
40
+ return decorator
41
+
42
+ def field_validator(*args, **kwargs):
43
+ def decorator(func):
44
+ return func
45
+ return decorator
46
+
47
+ PYDANTIC_V2 = False
48
 
49
+
50
+ class ARFMode(str, Enum):
51
+ """ARF operation modes"""
52
+ DEMO = "demo"
53
+ OSS = "oss"
54
+ ENTERPRISE = "enterprise"
55
+
56
+
57
+ class SafetyMode(str, Enum):
58
+ """Safety modes for execution"""
59
+ ADVISORY = "advisory"
60
+ APPROVAL = "approval"
61
+ AUTONOMOUS = "autonomous"
62
+
63
+
64
+ class Settings(BaseSettings):
65
+ """
66
+ Application settings with environment variable support
67
+ """
68
+
69
+ # ===== System Mode =====
70
+ arf_mode: ARFMode = Field(
71
+ default=ARFMode.DEMO,
72
+ description="ARF operation mode"
73
+ )
74
+
75
+ use_mock_arf: bool = Field(
76
+ default=True,
77
+ description="Use mock ARF implementation (for demo mode)"
78
+ )
79
+
80
+ # ===== ARF Configuration =====
81
+ arf_api_key: Optional[str] = Field(
82
+ default=None,
83
+ description="ARF API key for real integration"
84
+ )
85
+
86
+ arf_base_url: str = Field(
87
+ default="https://api.arf.dev",
88
+ description="ARF API base URL"
89
+ )
90
+
91
+ # ===== Business Configuration =====
92
+ engineer_hourly_rate: float = Field(
93
+ default=150.0,
94
+ description="Engineer hourly rate in USD"
95
+ )
96
+
97
+ engineer_annual_cost: float = Field(
98
+ default=125000.0,
99
+ description="Engineer annual cost in USD"
100
+ )
101
+
102
+ default_savings_rate: float = Field(
103
+ default=0.82,
104
+ description="Default savings rate with ARF"
105
+ )
106
+
107
+ # ===== UI Configuration =====
108
+ auto_refresh_seconds: int = Field(
109
+ default=30,
110
+ description="Auto-refresh interval in seconds"
111
+ )
112
+
113
+ max_history_items: int = Field(
114
+ default=100,
115
+ description="Maximum history items to display"
116
+ )
117
+
118
+ # ===== Demo Configuration =====
119
+ default_scenario: str = Field(
120
+ default="Cache Miss Storm",
121
+ description="Default incident scenario"
122
+ )
123
+
124
+ scenario_config_path: str = Field(
125
+ default="config/scenarios",
126
+ description="Path to scenario configuration files"
127
+ )
128
+
129
+ # ===== Safety Configuration =====
130
+ default_safety_mode: SafetyMode = Field(
131
+ default=SafetyMode.ADVISORY,
132
+ description="Default safety mode"
133
+ )
134
+
135
+ require_approval: bool = Field(
136
+ default=True,
137
+ description="Require human approval for execution"
138
+ )
139
+
140
+ # ===== Validation =====
141
+ if PYDANTIC_V2:
142
+ @field_validator("arf_api_key")
143
+ @classmethod
144
+ def validate_api_key(cls, v: Optional[str], info) -> Optional[str]:
145
+ if info.data.get("arf_mode") == ARFMode.ENTERPRISE and not v:
146
+ raise ValueError("ARF API key required for Enterprise mode")
147
+ return v
148
+
149
+ @field_validator("use_mock_arf")
150
+ @classmethod
151
+ def validate_mock_mode(cls, v: bool, info) -> bool:
152
+ if info.data.get("arf_mode") == ARFMode.DEMO:
153
+ return True
154
+ return v
155
+ else:
156
+ @validator("arf_api_key")
157
+ def validate_api_key(cls, v: Optional[str], values: Dict[str, Any]) -> Optional[str]:
158
+ if values.get("arf_mode") == ARFMode.ENTERPRISE and not v:
159
+ raise ValueError("ARF API key required for Enterprise mode")
160
+ return v
161
+
162
+ @validator("use_mock_arf")
163
+ def validate_mock_mode(cls, v: bool, values: Dict[str, Any]) -> bool:
164
+ if values.get("arf_mode") == ARFMode.DEMO:
165
+ return True
166
+ return v
167
+
168
+ # Pydantic v2 config
169
+ if PYDANTIC_V2:
170
+ model_config = ConfigDict(
171
+ env_file=".env",
172
+ env_file_encoding="utf-8",
173
+ case_sensitive=False,
174
+ use_enum_values=True,
175
+ extra="ignore"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  )
177
+ else:
178
+ class Config:
179
+ env_file = ".env"
180
+ env_file_encoding = "utf-8"
181
+ case_sensitive = False
182
+ use_enum_values = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
 
185
+ # Global settings instance with robust fallback
186
+ try:
187
+ settings = Settings()
188
+ logger.info("Settings loaded successfully")
189
+ except Exception as e:
190
+ logger.warning(f"Failed to load settings from .env: {e}, using defaults")
191
+ # Provide comprehensive defaults
192
+ settings = Settings(
193
+ arf_mode=ARFMode.DEMO,
194
+ use_mock_arf=True,
195
+ arf_api_key=None,
196
+ arf_base_url="https://api.arf.dev",
197
+ engineer_hourly_rate=150.0,
198
+ engineer_annual_cost=125000.0,
199
+ default_savings_rate=0.82,
200
+ auto_refresh_seconds=30,
201
+ max_history_items=100,
202
+ default_scenario="Cache Miss Storm",
203
+ scenario_config_path="config/scenarios",
204
+ default_safety_mode=SafetyMode.ADVISORY,
205
+ require_approval=True
206
+ )
207
 
 
 
 
208
 
209
+ def get_settings() -> Settings:
210
+ """Get settings instance (singleton pattern)"""
211
+ return settings
212
 
 
 
213
 
214
+ def print_settings_summary() -> None:
215
+ """Print a summary of current settings (for debugging)"""
216
+ summary = {
217
+ "mode": settings.arf_mode.value,
218
+ "mock_mode": settings.use_mock_arf,
219
+ "default_scenario": settings.default_scenario,
220
+ "safety_mode": settings.default_safety_mode.value,
221
+ "requires_approval": settings.require_approval
222
+ }
223
+ logger.info(f"Settings summary: {summary}")