petter2025 commited on
Commit
5f12010
·
verified ·
1 Parent(s): ebbee77

Create conftest.py

Browse files
Files changed (1) hide show
  1. tests/conftest.py +53 -112
tests/conftest.py CHANGED
@@ -1,141 +1,82 @@
1
  """
2
- Pytest configuration and shared fixtures
3
  """
4
 
5
  import pytest
6
- import tempfile
7
- import os
8
- from datetime import datetime, timezone
9
- from models import ReliabilityEvent, EventSeverity, HealingPolicy, HealingAction, PolicyCondition
10
- from healing_policies import PolicyEngine
11
- from app import (
12
- ThreadSafeEventStore,
13
- AdvancedAnomalyDetector,
14
- BusinessImpactCalculator,
15
- SimplePredictiveEngine,
16
- EnhancedReliabilityEngine,
17
- OrchestrationManager
18
- )
19
 
20
-
21
- @pytest.fixture
22
- def sample_event():
23
- """Create a sample reliability event for testing"""
24
- return ReliabilityEvent(
25
- component="test-service",
26
- latency_p99=250.0,
27
- error_rate=0.08,
28
- throughput=1000.0,
29
- cpu_util=0.75,
30
- memory_util=0.65,
31
- severity=EventSeverity.MEDIUM
32
- )
33
-
34
-
35
- @pytest.fixture
36
- def critical_event():
37
- """Create a critical reliability event"""
38
- return ReliabilityEvent(
39
- component="critical-service",
40
- latency_p99=600.0,
41
- error_rate=0.35,
42
- throughput=500.0,
43
- cpu_util=0.95,
44
- memory_util=0.92,
45
- severity=EventSeverity.CRITICAL
46
- )
47
-
48
-
49
- @pytest.fixture
50
- def normal_event():
51
- """Create a normal (healthy) reliability event"""
52
- return ReliabilityEvent(
53
- component="healthy-service",
54
- latency_p99=80.0,
55
- error_rate=0.01,
56
- throughput=2000.0,
57
- cpu_util=0.40,
58
- memory_util=0.35,
59
- severity=EventSeverity.LOW
60
- )
61
 
62
 
63
  @pytest.fixture
64
- def sample_policy():
65
- """Create a sample healing policy"""
66
- return HealingPolicy(
67
- name="test_policy",
68
- conditions=[
69
- PolicyCondition(metric="latency_p99", operator="gt", threshold=300.0)
70
- ],
71
- actions=[HealingAction.RESTART_CONTAINER],
72
- priority=2,
73
- cool_down_seconds=60,
74
- max_executions_per_hour=5
75
- )
76
 
77
 
78
  @pytest.fixture
79
- def policy_engine():
80
- """Create a fresh policy engine for testing"""
81
- return PolicyEngine(max_cooldown_history=100, max_execution_history=100)
 
82
 
83
 
84
  @pytest.fixture
85
- def event_store():
86
- """Create a fresh event store"""
87
- return ThreadSafeEventStore(max_size=100)
 
88
 
89
 
90
  @pytest.fixture
91
- def anomaly_detector():
92
- """Create a fresh anomaly detector"""
93
- return AdvancedAnomalyDetector()
 
94
 
95
 
96
  @pytest.fixture
97
- def business_calculator():
98
- """Create a business impact calculator"""
99
- return BusinessImpactCalculator()
 
100
 
101
 
102
  @pytest.fixture
103
- def predictive_engine():
104
- """Create a predictive engine"""
105
- return SimplePredictiveEngine(history_window=20)
 
 
 
 
 
 
 
 
106
 
107
 
108
  @pytest.fixture
109
- def temp_dir():
110
- """Create a temporary directory for test files"""
111
- with tempfile.TemporaryDirectory() as tmpdir:
112
- yield tmpdir
113
 
114
 
115
- @pytest.fixture
116
- def mock_faiss_index(temp_dir):
117
- """Create a mock FAISS index for testing"""
118
- # This would require FAISS to be installed
119
- # For now, return None to allow tests to skip FAISS operations
120
- return None
121
-
122
-
123
- @pytest.fixture
124
- async def reliability_engine(
125
- policy_engine,
126
- event_store,
127
- anomaly_detector,
128
- business_calculator
129
- ):
130
- """Create a fully initialized reliability engine"""
131
- orchestrator = OrchestrationManager()
132
-
133
- engine = EnhancedReliabilityEngine(
134
- orchestrator=orchestrator,
135
- policy_engine=policy_engine,
136
- event_store=event_store,
137
- anomaly_detector=anomaly_detector,
138
- business_calculator=business_calculator
139
  )
140
-
141
- return engine
 
 
1
  """
2
+ Pytest configuration and shared fixtures for timeline tests
3
  """
4
 
5
  import pytest
6
+ from unittest.mock import Mock
7
+ from datetime import datetime
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ # Add your shared fixtures here
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
 
12
  @pytest.fixture
13
+ def sample_timeline_metrics():
14
+ """Create sample TimelineMetrics for testing"""
15
+ # TODO: Return a standard TimelineMetrics instance
16
+ pass
 
 
 
 
 
 
 
 
17
 
18
 
19
  @pytest.fixture
20
+ def timeline_calculator():
21
+ """Create TimelineCalculator with test defaults"""
22
+ # TODO: Return calculator instance
23
+ pass
24
 
25
 
26
  @pytest.fixture
27
+ def timeline_formatter():
28
+ """Create TimelineFormatter instance"""
29
+ # TODO: Return formatter instance
30
+ pass
31
 
32
 
33
  @pytest.fixture
34
+ def mock_business_metrics():
35
+ """Mock BusinessMetricsTracker"""
36
+ # TODO: Return mock with predefined behavior
37
+ pass
38
 
39
 
40
  @pytest.fixture
41
+ def mock_enhanced_engine():
42
+ """Mock EnhancedReliabilityEngine"""
43
+ # TODO: Return mock engine
44
+ pass
45
 
46
 
47
  @pytest.fixture
48
+ def sample_incident_data():
49
+ """Create sample incident data for testing"""
50
+ return {
51
+ "component": "api-service",
52
+ "latency": 450.0,
53
+ "error_rate": 0.22,
54
+ "throughput": 8500,
55
+ "cpu_util": 0.95,
56
+ "memory_util": 0.88,
57
+ "severity": "CRITICAL"
58
+ }
59
 
60
 
61
  @pytest.fixture
62
+ def sample_timeline_display():
63
+ """Create sample timeline markdown display"""
64
+ # TODO: Return formatted markdown string
65
+ pass
66
 
67
 
68
+ # Markers for different test categories
69
+ def pytest_configure(config):
70
+ """Configure custom pytest markers"""
71
+ config.addinivalue_line(
72
+ "markers", "integration: mark test as integration test"
73
+ )
74
+ config.addinivalue_line(
75
+ "markers", "unit: mark test as unit test"
76
+ )
77
+ config.addinivalue_line(
78
+ "markers", "benchmark: mark test as performance benchmark"
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  )
80
+ config.addinivalue_line(
81
+ "markers", "slow: mark test as slow running"
82
+ )