Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Unit tests for UI Error Handler. | |
| Tests error handling and recovery mechanisms for UI components. | |
| Requirements: 9.1, 9.2, 9.3, 9.4 | |
| """ | |
| import pytest | |
| from unittest.mock import Mock, patch | |
| from datetime import datetime | |
| from src.core.ui_error_handler import ( | |
| UIErrorHandler, UIError, ValidationResult, ErrorCategory, ErrorSeverity | |
| ) | |
| from src.core.provider_summary_generator import ProviderSummary | |
| from src.core.improved_classification_prompt_manager import ClassificationResult | |
| class TestUIErrorHandler: | |
| """Test cases for UIErrorHandler.""" | |
| def setup_method(self): | |
| """Set up test fixtures.""" | |
| self.error_handler = UIErrorHandler() | |
| def test_initialization(self): | |
| """Test error handler initialization.""" | |
| assert self.error_handler is not None | |
| assert hasattr(self.error_handler, 'fallback_templates') | |
| assert hasattr(self.error_handler, 'validation_rules') | |
| def test_validate_provider_summary_structure_valid(self): | |
| """Test validation of valid provider summary.""" | |
| # Create valid summary | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="Patient expressing significant spiritual distress", | |
| indicators=["Loss of meaning", "Spiritual questioning"], | |
| severity_level="HIGH", | |
| urgency_level="URGENT", | |
| situation_description="Patient experiencing spiritual crisis requiring immediate attention", | |
| recommended_actions=["Contact within 24 hours", "Assess support needs"] | |
| ) | |
| result = self.error_handler.validate_provider_summary_structure(summary) | |
| assert isinstance(result, ValidationResult) | |
| assert result.is_valid | |
| assert len(result.errors) == 0 | |
| def test_validate_provider_summary_structure_missing_contact(self): | |
| """Test validation with missing contact information.""" | |
| # Create summary with missing contact info | |
| summary = ProviderSummary( | |
| patient_name="[Patient Name]", | |
| patient_phone="[Phone Number]", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="Patient expressing significant spiritual distress", | |
| indicators=["Loss of meaning"], | |
| severity_level="HIGH", | |
| urgency_level="URGENT" | |
| ) | |
| result = self.error_handler.validate_provider_summary_structure(summary) | |
| assert not result.is_valid | |
| assert len(result.errors) >= 2 # Name and phone errors | |
| # Check for specific errors | |
| error_messages = [error.message for error in result.errors] | |
| assert any("Patient name is missing" in msg for msg in error_messages) | |
| assert any("Patient phone is missing" in msg for msg in error_messages) | |
| def test_validate_provider_summary_structure_invalid_confidence(self): | |
| """Test validation with invalid confidence value.""" | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=1.5, # Invalid - out of range | |
| reasoning="Test reasoning", | |
| indicators=["Test indicator"] | |
| ) | |
| result = self.error_handler.validate_provider_summary_structure(summary) | |
| assert not result.is_valid | |
| confidence_errors = [e for e in result.errors if e.field == "confidence"] | |
| assert len(confidence_errors) > 0 | |
| assert "out of valid range" in confidence_errors[0].message | |
| def test_validate_provider_summary_structure_missing_reasoning(self): | |
| """Test validation with missing reasoning.""" | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="", # Empty reasoning | |
| indicators=["Test indicator"] | |
| ) | |
| result = self.error_handler.validate_provider_summary_structure(summary) | |
| assert not result.is_valid | |
| reasoning_errors = [e for e in result.errors if e.field == "reasoning"] | |
| assert len(reasoning_errors) > 0 | |
| assert "missing or insufficient" in reasoning_errors[0].message | |
| def test_apply_fallback_template_missing_contact(self): | |
| """Test applying fallback template for missing contact information.""" | |
| summary = ProviderSummary( | |
| patient_name="[Patient Name]", | |
| patient_phone="[Phone Number]", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="Test reasoning", | |
| indicators=["Test indicator"] | |
| ) | |
| fixed_summary = self.error_handler.apply_fallback_template(summary, "missing_contact") | |
| assert fixed_summary.patient_name != "[Patient Name]" | |
| assert fixed_summary.patient_phone != "[Phone Number]" | |
| assert "Patient (Name Not Provided)" in fixed_summary.patient_name | |
| assert "not available" in fixed_summary.patient_phone.lower() | |
| def test_apply_fallback_template_missing_reasoning(self): | |
| """Test applying fallback template for missing reasoning.""" | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="", # Empty reasoning | |
| indicators=["Loss of meaning", "Spiritual distress"] | |
| ) | |
| fixed_summary = self.error_handler.apply_fallback_template(summary, "missing_reasoning") | |
| assert len(fixed_summary.reasoning) > 10 | |
| assert "RED flag classification" in fixed_summary.reasoning | |
| assert "Loss of meaning" in fixed_summary.reasoning | |
| def test_apply_fallback_template_invalid_levels(self): | |
| """Test applying fallback template for invalid severity/urgency levels.""" | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="Test reasoning", | |
| indicators=["Test indicator"], | |
| severity_level="INVALID", | |
| urgency_level="INVALID" | |
| ) | |
| fixed_summary = self.error_handler.apply_fallback_template(summary, "invalid_levels") | |
| assert fixed_summary.severity_level in ["CRITICAL", "HIGH", "MODERATE"] | |
| assert fixed_summary.urgency_level in ["IMMEDIATE", "URGENT", "STANDARD"] | |
| def test_apply_fallback_template_missing_actions(self): | |
| """Test applying fallback template for missing recommended actions.""" | |
| summary = ProviderSummary( | |
| patient_name="John Doe", | |
| patient_phone="555-123-4567", | |
| classification="RED", | |
| confidence=0.8, | |
| reasoning="Test reasoning", | |
| indicators=["Test indicator"], | |
| urgency_level="URGENT", | |
| recommended_actions=[] # Empty actions | |
| ) | |
| fixed_summary = self.error_handler.apply_fallback_template(summary, "missing_actions") | |
| assert len(fixed_summary.recommended_actions) > 0 | |
| assert any("Contact patient" in action for action in fixed_summary.recommended_actions) | |
| def test_create_degraded_display(self): | |
| """Test creating degraded display for errors.""" | |
| error_context = "Test error occurred" | |
| original_content = "<div>Original content</div>" | |
| degraded_display = self.error_handler.create_degraded_display(error_context, original_content) | |
| assert isinstance(degraded_display, str) | |
| assert "Display Error Detected" in degraded_display | |
| assert error_context in degraded_display | |
| assert original_content in degraded_display | |
| assert "degraded mode" in degraded_display | |
| def test_create_degraded_display_no_content(self): | |
| """Test creating degraded display without original content.""" | |
| error_context = "Test error occurred" | |
| degraded_display = self.error_handler.create_degraded_display(error_context) | |
| assert isinstance(degraded_display, str) | |
| assert "Display Error Detected" in degraded_display | |
| assert error_context in degraded_display | |
| assert "Recovery Actions" in degraded_display | |
| def test_handle_classification_error(self): | |
| """Test handling classification errors.""" | |
| error = Exception("Test classification error") | |
| input_data = { | |
| 'message': 'I feel hopeless and lost', | |
| 'classification': 'red', | |
| 'confidence': 0.8 | |
| } | |
| result = self.error_handler.handle_classification_error(error, input_data) | |
| assert isinstance(result, ClassificationResult) | |
| assert result.classification in ["red", "yellow", "green"] | |
| assert 0.0 <= result.confidence <= 1.0 | |
| assert len(result.indicators) > 0 | |
| assert len(result.reasoning) > 0 | |
| assert not result.is_valid # Should be marked as invalid due to error | |
| def test_handle_classification_error_critical_keywords(self): | |
| """Test handling classification error with critical keywords.""" | |
| error = Exception("Test error") | |
| input_data = { | |
| 'message': 'I want to kill myself and end it all' | |
| } | |
| result = self.error_handler.handle_classification_error(error, input_data) | |
| assert result.classification == "red" | |
| assert result.confidence >= 0.8 | |
| assert any("Critical" in indicator for indicator in result.indicators) | |
| def test_handle_classification_error_spiritual_keywords(self): | |
| """Test handling classification error with spiritual keywords.""" | |
| error = Exception("Test error") | |
| input_data = { | |
| 'message': 'I have lost all meaning and purpose in life' | |
| } | |
| result = self.error_handler.handle_classification_error(error, input_data) | |
| assert result.classification == "red" | |
| assert result.confidence >= 0.6 | |
| assert any("Spiritual" in indicator for indicator in result.indicators) | |
| def test_get_error_statistics(self): | |
| """Test getting error statistics.""" | |
| errors = [ | |
| UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.HIGH, | |
| message="Test error 1", | |
| component="test_component" | |
| ), | |
| UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.MEDIUM, | |
| message="Test error 2", | |
| component="test_component" | |
| ), | |
| UIError( | |
| category=ErrorCategory.FORMATTING, | |
| severity=ErrorSeverity.HIGH, | |
| message="Test error 3", | |
| component="other_component" | |
| ) | |
| ] | |
| stats = self.error_handler.get_error_statistics(errors) | |
| assert stats["total"] == 3 | |
| assert stats["by_category"]["validation"] == 2 | |
| assert stats["by_category"]["formatting"] == 1 | |
| assert stats["by_severity"]["high"] == 2 | |
| assert stats["by_severity"]["medium"] == 1 | |
| assert stats["by_component"]["test_component"] == 2 | |
| assert stats["by_component"]["other_component"] == 1 | |
| def test_get_error_statistics_empty(self): | |
| """Test getting error statistics with empty list.""" | |
| stats = self.error_handler.get_error_statistics([]) | |
| assert stats["total"] == 0 | |
| assert stats["by_category"] == {} | |
| assert stats["by_severity"] == {} | |
| def test_validation_result_add_error(self): | |
| """Test adding error to validation result.""" | |
| result = ValidationResult(is_valid=True) | |
| error = UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.HIGH, | |
| message="Test error", | |
| component="test" | |
| ) | |
| result.add_error(error) | |
| assert not result.is_valid | |
| assert len(result.errors) == 1 | |
| assert result.errors[0] == error | |
| def test_validation_result_add_warning(self): | |
| """Test adding warning to validation result.""" | |
| result = ValidationResult(is_valid=True) | |
| warning = UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.LOW, | |
| message="Test warning", | |
| component="test" | |
| ) | |
| result.add_warning(warning) | |
| assert result.is_valid # Warnings don't invalidate | |
| assert len(result.warnings) == 1 | |
| assert result.warnings[0] == warning | |
| def test_validation_result_has_critical_errors(self): | |
| """Test checking for critical errors.""" | |
| result = ValidationResult(is_valid=True) | |
| # Add non-critical error | |
| result.add_error(UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.HIGH, | |
| message="High error", | |
| component="test" | |
| )) | |
| assert not result.has_critical_errors() | |
| # Add critical error | |
| result.add_error(UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.CRITICAL, | |
| message="Critical error", | |
| component="test" | |
| )) | |
| assert result.has_critical_errors() | |
| def test_validation_result_get_error_summary(self): | |
| """Test getting error summary.""" | |
| result = ValidationResult(is_valid=True) | |
| # No errors or warnings | |
| summary = result.get_error_summary() | |
| assert "No validation issues" in summary | |
| # Add errors and warnings | |
| result.add_error(UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.HIGH, | |
| message="Test error", | |
| component="test" | |
| )) | |
| result.add_warning(UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.LOW, | |
| message="Test warning", | |
| component="test" | |
| )) | |
| summary = result.get_error_summary() | |
| assert "1 error(s)" in summary | |
| assert "1 warning(s)" in summary | |
| def test_ui_error_to_dict(self): | |
| """Test converting UIError to dictionary.""" | |
| error = UIError( | |
| category=ErrorCategory.VALIDATION, | |
| severity=ErrorSeverity.HIGH, | |
| message="Test error", | |
| component="test_component", | |
| field="test_field", | |
| value="test_value", | |
| suggestion="Test suggestion", | |
| recovery_actions=["Action 1", "Action 2"] | |
| ) | |
| error_dict = error.to_dict() | |
| assert error_dict["category"] == "validation" | |
| assert error_dict["severity"] == "high" | |
| assert error_dict["message"] == "Test error" | |
| assert error_dict["component"] == "test_component" | |
| assert error_dict["field"] == "test_field" | |
| assert error_dict["value"] == "test_value" | |
| assert error_dict["suggestion"] == "Test suggestion" | |
| assert error_dict["recovery_actions"] == ["Action 1", "Action 2"] | |
| if __name__ == "__main__": | |
| pytest.main([__file__]) |