#!/usr/bin/env python3 """ Integration tests for error handling in UI components. Tests the integration of error handling across UI components. Requirements: 9.1, 9.2, 9.3, 9.4 """ import pytest from unittest.mock import Mock, patch from src.interface.enhanced_results_display_manager import EnhancedResultsDisplayManager from src.core.provider_summary_generator import ProviderSummary, ProviderSummaryGenerator from src.core.improved_classification_prompt_manager import ImprovedClassificationPromptManager from src.config.enhanced_display_config import EnhancedDisplayConfig class TestErrorHandlingIntegration: """Integration tests for error handling across UI components.""" def setup_method(self): """Set up test fixtures.""" self.display_manager = EnhancedResultsDisplayManager() self.summary_generator = ProviderSummaryGenerator() self.classification_manager = ImprovedClassificationPromptManager() def test_display_manager_handles_invalid_summary(self): """Test that display manager handles invalid provider summary gracefully.""" # Create invalid summary with missing required fields invalid_summary = ProviderSummary( patient_name="[Patient Name]", # Placeholder patient_phone="[Phone Number]", # Placeholder classification="RED", confidence=1.5, # Invalid confidence reasoning="", # Empty reasoning indicators=[], # No indicators severity_level="INVALID", # Invalid level urgency_level="INVALID" # Invalid level ) # Format should not crash and should include validation warnings result = self.display_manager.format_provider_summary_section(invalid_summary) assert isinstance(result, str) assert len(result) > 0 # Should contain some form of content even with invalid data assert "Provider Summary" in result or "Display Error" in result def test_display_manager_handles_formatting_error(self): """Test that display manager handles formatting errors gracefully.""" # Mock the config to cause an error with patch.object(self.display_manager, 'config', None): result = self.display_manager.format_ai_analysis_section( classification="RED", indicators=["Test indicator"], reasoning="Test reasoning" ) assert isinstance(result, str) assert len(result) > 0 # Should fall back to basic formatting or show error assert "AI Analysis" in result or "Display Error" in result def test_summary_generator_handles_missing_data(self): """Test that summary generator handles missing data gracefully.""" # Generate summary with minimal data summary = self.summary_generator.generate_summary( indicators=[], # Empty indicators reasoning="", # Empty reasoning confidence=0.0, patient_name=None, # Missing name patient_phone=None # Missing phone ) assert isinstance(summary, ProviderSummary) # Should have fallback values assert summary.patient_name != "[Patient Name]" or "Patient" in summary.patient_name assert len(summary.reasoning) > 10 # Should have fallback reasoning assert len(summary.recommended_actions) > 0 # Should have default actions def test_summary_generator_handles_generation_error(self): """Test that summary generator handles generation errors gracefully.""" # Mock an internal method to raise an error with patch.object(self.summary_generator, '_generate_conversation_summary', side_effect=Exception("Test error")): summary = self.summary_generator.generate_summary( indicators=["Test indicator"], reasoning="Test reasoning", confidence=0.8, patient_name="John Doe", patient_phone="555-123-4567" ) assert isinstance(summary, ProviderSummary) # Should be a fallback summary assert summary.patient_name in ["John Doe", "Patient (Name Not Available)"] assert len(summary.reasoning) > 0 def test_classification_manager_handles_invalid_result(self): """Test that classification manager handles invalid results gracefully.""" # Create invalid classification result result = self.classification_manager.create_classification_result( classification="INVALID", # Invalid classification confidence=2.0, # Invalid confidence indicators=[], # Empty indicators reasoning="" # Empty reasoning ) assert result is not None assert result.classification in ["red", "yellow", "green"] assert 0.0 <= result.confidence <= 1.0 assert len(result.indicators) > 0 assert len(result.reasoning) > 0 def test_end_to_end_error_recovery(self): """Test end-to-end error recovery across components.""" # Start with problematic data problematic_data = { 'classification': 'INVALID', 'confidence': -0.5, 'indicators': [], 'reasoning': '', 'patient_name': '', 'patient_phone': '' } # Generate summary (should apply fallbacks) summary = self.summary_generator.generate_summary( indicators=problematic_data['indicators'], reasoning=problematic_data['reasoning'], confidence=problematic_data['confidence'], patient_name=problematic_data['patient_name'], patient_phone=problematic_data['patient_phone'] ) # Display summary (should handle validation issues) display_result = self.display_manager.format_provider_summary_section(summary) # Verify the entire pipeline produces usable output assert isinstance(display_result, str) assert len(display_result) > 0 assert "Provider Summary" in display_result or "Display Error" in display_result # Verify summary has been fixed assert summary.confidence >= 0.0 assert len(summary.reasoning) > 10 assert len(summary.recommended_actions) > 0 def test_degraded_mode_functionality(self): """Test that system continues to function in degraded mode.""" # Disable enhancements to test degraded mode degraded_config = EnhancedDisplayConfig(enabled=False) degraded_display_manager = EnhancedResultsDisplayManager(config=degraded_config) # Create a valid summary summary = ProviderSummary( patient_name="John Doe", patient_phone="555-123-4567", classification="RED", confidence=0.8, reasoning="Test reasoning for degraded mode", indicators=["Test indicator"], severity_level="HIGH", urgency_level="URGENT", situation_description="Test situation", recommended_actions=["Test action"] ) # Format in degraded mode result = degraded_display_manager.format_provider_summary_section(summary) assert isinstance(result, str) assert len(result) > 0 # Should contain basic information even in degraded mode assert "John Doe" in result assert "555-123-4567" in result assert "URGENT" in result def test_error_statistics_collection(self): """Test that error statistics are properly collected.""" # Create summary with validation issues problematic_summary = ProviderSummary( patient_name="[Patient Name]", patient_phone="[Phone Number]", classification="RED", confidence=1.5, # Invalid reasoning="", # Empty indicators=[] # Empty ) # Validate and collect errors validation_result = self.display_manager.error_handler.validate_provider_summary_structure(problematic_summary) # Get error statistics stats = self.display_manager.error_handler.get_error_statistics(validation_result.errors) assert stats["total"] > 0 assert "validation" in stats["by_category"] or "data_missing" in stats["by_category"] assert len(stats["by_severity"]) > 0 assert len(stats["by_component"]) > 0 if __name__ == "__main__": pytest.main([__file__])