Spaces:
Sleeping
Sleeping
File size: 8,744 Bytes
be1b5d2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
#!/usr/bin/env python3
"""
Integration tests for error handling in UI components.
Tests the integration of error handling across UI components.
Requirements: 9.1, 9.2, 9.3, 9.4
"""
import pytest
from unittest.mock import Mock, patch
from src.interface.enhanced_results_display_manager import EnhancedResultsDisplayManager
from src.core.provider_summary_generator import ProviderSummary, ProviderSummaryGenerator
from src.core.improved_classification_prompt_manager import ImprovedClassificationPromptManager
from src.config.enhanced_display_config import EnhancedDisplayConfig
class TestErrorHandlingIntegration:
"""Integration tests for error handling across UI components."""
def setup_method(self):
"""Set up test fixtures."""
self.display_manager = EnhancedResultsDisplayManager()
self.summary_generator = ProviderSummaryGenerator()
self.classification_manager = ImprovedClassificationPromptManager()
def test_display_manager_handles_invalid_summary(self):
"""Test that display manager handles invalid provider summary gracefully."""
# Create invalid summary with missing required fields
invalid_summary = ProviderSummary(
patient_name="[Patient Name]", # Placeholder
patient_phone="[Phone Number]", # Placeholder
classification="RED",
confidence=1.5, # Invalid confidence
reasoning="", # Empty reasoning
indicators=[], # No indicators
severity_level="INVALID", # Invalid level
urgency_level="INVALID" # Invalid level
)
# Format should not crash and should include validation warnings
result = self.display_manager.format_provider_summary_section(invalid_summary)
assert isinstance(result, str)
assert len(result) > 0
# Should contain some form of content even with invalid data
assert "Provider Summary" in result or "Display Error" in result
def test_display_manager_handles_formatting_error(self):
"""Test that display manager handles formatting errors gracefully."""
# Mock the config to cause an error
with patch.object(self.display_manager, 'config', None):
result = self.display_manager.format_ai_analysis_section(
classification="RED",
indicators=["Test indicator"],
reasoning="Test reasoning"
)
assert isinstance(result, str)
assert len(result) > 0
# Should fall back to basic formatting or show error
assert "AI Analysis" in result or "Display Error" in result
def test_summary_generator_handles_missing_data(self):
"""Test that summary generator handles missing data gracefully."""
# Generate summary with minimal data
summary = self.summary_generator.generate_summary(
indicators=[], # Empty indicators
reasoning="", # Empty reasoning
confidence=0.0,
patient_name=None, # Missing name
patient_phone=None # Missing phone
)
assert isinstance(summary, ProviderSummary)
# Should have fallback values
assert summary.patient_name != "[Patient Name]" or "Patient" in summary.patient_name
assert len(summary.reasoning) > 10 # Should have fallback reasoning
assert len(summary.recommended_actions) > 0 # Should have default actions
def test_summary_generator_handles_generation_error(self):
"""Test that summary generator handles generation errors gracefully."""
# Mock an internal method to raise an error
with patch.object(self.summary_generator, '_generate_conversation_summary', side_effect=Exception("Test error")):
summary = self.summary_generator.generate_summary(
indicators=["Test indicator"],
reasoning="Test reasoning",
confidence=0.8,
patient_name="John Doe",
patient_phone="555-123-4567"
)
assert isinstance(summary, ProviderSummary)
# Should be a fallback summary
assert summary.patient_name in ["John Doe", "Patient (Name Not Available)"]
assert len(summary.reasoning) > 0
def test_classification_manager_handles_invalid_result(self):
"""Test that classification manager handles invalid results gracefully."""
# Create invalid classification result
result = self.classification_manager.create_classification_result(
classification="INVALID", # Invalid classification
confidence=2.0, # Invalid confidence
indicators=[], # Empty indicators
reasoning="" # Empty reasoning
)
assert result is not None
assert result.classification in ["red", "yellow", "green"]
assert 0.0 <= result.confidence <= 1.0
assert len(result.indicators) > 0
assert len(result.reasoning) > 0
def test_end_to_end_error_recovery(self):
"""Test end-to-end error recovery across components."""
# Start with problematic data
problematic_data = {
'classification': 'INVALID',
'confidence': -0.5,
'indicators': [],
'reasoning': '',
'patient_name': '',
'patient_phone': ''
}
# Generate summary (should apply fallbacks)
summary = self.summary_generator.generate_summary(
indicators=problematic_data['indicators'],
reasoning=problematic_data['reasoning'],
confidence=problematic_data['confidence'],
patient_name=problematic_data['patient_name'],
patient_phone=problematic_data['patient_phone']
)
# Display summary (should handle validation issues)
display_result = self.display_manager.format_provider_summary_section(summary)
# Verify the entire pipeline produces usable output
assert isinstance(display_result, str)
assert len(display_result) > 0
assert "Provider Summary" in display_result or "Display Error" in display_result
# Verify summary has been fixed
assert summary.confidence >= 0.0
assert len(summary.reasoning) > 10
assert len(summary.recommended_actions) > 0
def test_degraded_mode_functionality(self):
"""Test that system continues to function in degraded mode."""
# Disable enhancements to test degraded mode
degraded_config = EnhancedDisplayConfig(enabled=False)
degraded_display_manager = EnhancedResultsDisplayManager(config=degraded_config)
# Create a valid summary
summary = ProviderSummary(
patient_name="John Doe",
patient_phone="555-123-4567",
classification="RED",
confidence=0.8,
reasoning="Test reasoning for degraded mode",
indicators=["Test indicator"],
severity_level="HIGH",
urgency_level="URGENT",
situation_description="Test situation",
recommended_actions=["Test action"]
)
# Format in degraded mode
result = degraded_display_manager.format_provider_summary_section(summary)
assert isinstance(result, str)
assert len(result) > 0
# Should contain basic information even in degraded mode
assert "John Doe" in result
assert "555-123-4567" in result
assert "URGENT" in result
def test_error_statistics_collection(self):
"""Test that error statistics are properly collected."""
# Create summary with validation issues
problematic_summary = ProviderSummary(
patient_name="[Patient Name]",
patient_phone="[Phone Number]",
classification="RED",
confidence=1.5, # Invalid
reasoning="", # Empty
indicators=[] # Empty
)
# Validate and collect errors
validation_result = self.display_manager.error_handler.validate_provider_summary_structure(problematic_summary)
# Get error statistics
stats = self.display_manager.error_handler.get_error_statistics(validation_result.errors)
assert stats["total"] > 0
assert "validation" in stats["by_category"] or "data_missing" in stats["by_category"]
assert len(stats["by_severity"]) > 0
assert len(stats["by_component"]) > 0
if __name__ == "__main__":
pytest.main([__file__]) |