Spiritual_Health_Project / tests /unit /test_feedback_ui_integration.py
DocUA's picture
feat: Complete prompt optimization system implementation
24214fc
#!/usr/bin/env python3
"""
Test script for the feedback UI integration.
Tests Task 4.3 implementation.
"""
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from interface.feedback_ui_integration import FeedbackUIIntegration
from config.prompt_management.feedback_system import FeedbackSystem
def test_ui_integration_initialization():
"""Test that the UI integration initializes correctly."""
print("Testing UI integration initialization...")
# Test with default feedback system
ui_integration = FeedbackUIIntegration()
assert ui_integration.feedback_system is not None
assert hasattr(ui_integration, 'error_type_options')
assert hasattr(ui_integration, 'subcategory_mapping')
# Test with custom feedback system
custom_feedback = FeedbackSystem(storage_path=".verification_data/test_ui_feedback")
ui_integration_custom = FeedbackUIIntegration(feedback_system=custom_feedback)
assert ui_integration_custom.feedback_system == custom_feedback
print("βœ“ UI integration initializes correctly")
return True
def test_error_type_options():
"""Test that error type options are properly defined."""
print("Testing error type options...")
ui_integration = FeedbackUIIntegration()
# Verify error type options
expected_error_types = [
"wrong_classification", "severity_misjudgment", "missed_indicators",
"false_positive", "context_misunderstanding", "language_interpretation"
]
actual_error_types = [value for _, value in ui_integration.error_type_options]
for expected_type in expected_error_types:
assert expected_type in actual_error_types, f"Missing error type: {expected_type}"
print(f"βœ“ All {len(expected_error_types)} error types are defined")
return True
def test_subcategory_mapping():
"""Test that subcategory mappings are complete."""
print("Testing subcategory mappings...")
ui_integration = FeedbackUIIntegration()
# Verify each error type has subcategories
for error_type_label, error_type_value in ui_integration.error_type_options:
assert error_type_value in ui_integration.subcategory_mapping, \
f"Missing subcategory mapping for: {error_type_value}"
subcategories = ui_integration.subcategory_mapping[error_type_value]
assert len(subcategories) > 0, f"No subcategories defined for: {error_type_value}"
# Verify subcategory structure
for subcategory_label, subcategory_value in subcategories:
assert isinstance(subcategory_label, str) and len(subcategory_label) > 0
assert isinstance(subcategory_value, str) and len(subcategory_value) > 0
print("βœ“ All subcategory mappings are complete")
return True
def test_question_issue_options():
"""Test that question issue options are properly defined."""
print("Testing question issue options...")
ui_integration = FeedbackUIIntegration()
expected_issue_types = [
"inappropriate_question", "insensitive_language", "wrong_scenario_targeting",
"unclear_question", "leading_question"
]
actual_issue_types = [value for _, value in ui_integration.question_issue_options]
for expected_type in expected_issue_types:
assert expected_type in actual_issue_types, f"Missing question issue type: {expected_type}"
print(f"βœ“ All {len(expected_issue_types)} question issue types are defined")
return True
def test_scenario_options():
"""Test that scenario options are properly defined."""
print("Testing scenario options...")
ui_integration = FeedbackUIIntegration()
expected_scenarios = [
"loss_of_interest", "loss_of_loved_one", "no_support",
"vague_stress", "sleep_issues", "spiritual_practice_change"
]
actual_scenarios = [value for _, value in ui_integration.scenario_options]
for expected_scenario in expected_scenarios:
assert expected_scenario in actual_scenarios, f"Missing scenario: {expected_scenario}"
print(f"βœ“ All {len(expected_scenarios)} scenario types are defined")
return True
def test_ui_component_creation():
"""Test that UI components can be created without errors."""
print("Testing UI component creation...")
ui_integration = FeedbackUIIntegration()
try:
# Note: We can't actually create Gradio components without a running interface,
# but we can test that the methods exist and don't raise import errors
# Test that methods exist
assert hasattr(ui_integration, 'create_classification_error_interface')
assert hasattr(ui_integration, 'create_question_issue_interface')
assert hasattr(ui_integration, 'create_pattern_analysis_display')
assert hasattr(ui_integration, 'create_complete_feedback_interface')
print(" βœ“ All UI creation methods are available")
# Test that the methods are callable
assert callable(ui_integration.create_classification_error_interface)
assert callable(ui_integration.create_question_issue_interface)
assert callable(ui_integration.create_pattern_analysis_display)
assert callable(ui_integration.create_complete_feedback_interface)
print(" βœ“ All UI creation methods are callable")
except Exception as e:
print(f" βœ— Error with UI component methods: {str(e)}")
return False
print("βœ“ UI component creation methods are properly defined")
return True
def test_feedback_integration():
"""Test that the UI integration works with the feedback system."""
print("Testing feedback system integration...")
from config.prompt_management.data_models import ErrorType, ErrorSubcategory
# Create UI integration with test feedback system
feedback_system = FeedbackSystem(storage_path=".verification_data/test_ui_integration")
ui_integration = FeedbackUIIntegration(feedback_system=feedback_system)
# Record some test feedback through the system
error_id = feedback_system.record_classification_error(
error_type=ErrorType.WRONG_CLASSIFICATION,
subcategory=ErrorSubcategory.GREEN_TO_YELLOW,
expected_category="YELLOW",
actual_category="GREEN",
message_content="Test message for UI integration",
reviewer_comments="Test comment for UI integration",
confidence_level=0.9,
session_id="ui_integration_test"
)
# Verify the feedback was recorded
summary = feedback_system.get_feedback_summary()
assert summary['total_errors'] >= 1, "Feedback should be recorded"
print(f"βœ“ Feedback integration works (recorded error: {error_id[:8]}...)")
return True
def test_predefined_categories_completeness():
"""Test that all predefined categories from documentation are included."""
print("Testing predefined categories completeness...")
ui_integration = FeedbackUIIntegration()
# Test that all major error categories are covered
error_categories = {
"classification_issues": ["wrong_classification", "severity_misjudgment"],
"detection_issues": ["missed_indicators", "false_positive"],
"understanding_issues": ["context_misunderstanding", "language_interpretation"]
}
all_error_types = [value for _, value in ui_integration.error_type_options]
for category, types in error_categories.items():
for error_type in types:
assert error_type in all_error_types, \
f"Missing error type {error_type} from category {category}"
# Test that all major question issue types are covered
question_categories = {
"content_issues": ["inappropriate_question", "insensitive_language"],
"targeting_issues": ["wrong_scenario_targeting"],
"clarity_issues": ["unclear_question", "leading_question"]
}
all_question_types = [value for _, value in ui_integration.question_issue_options]
for category, types in question_categories.items():
for issue_type in types:
assert issue_type in all_question_types, \
f"Missing question issue type {issue_type} from category {category}"
print("βœ“ All predefined categories from documentation are included")
return True
def main():
"""Run all feedback UI integration tests."""
print("=" * 60)
print("FEEDBACK UI INTEGRATION TESTS")
print("=" * 60)
tests = [
test_ui_integration_initialization,
test_error_type_options,
test_subcategory_mapping,
test_question_issue_options,
test_scenario_options,
test_ui_component_creation,
test_feedback_integration,
test_predefined_categories_completeness
]
passed = 0
failed = 0
for test in tests:
try:
print(f"\n{test.__name__.replace('_', ' ').title()}:")
print("-" * 40)
result = test()
if result:
passed += 1
print("βœ“ PASSED")
else:
failed += 1
print("βœ— FAILED")
except Exception as e:
failed += 1
print(f"βœ— FAILED: {str(e)}")
print("\n" + "=" * 60)
print(f"RESULTS: {passed} passed, {failed} failed")
print("=" * 60)
if failed == 0:
print("πŸŽ‰ All feedback UI integration tests passed!")
print("\n**Task 4.3: Feedback UI Integration**")
print("βœ“ COMPLETED: Structured error category selection interface")
print("βœ“ COMPLETED: Predefined subcategories from documentation")
print("βœ“ COMPLETED: Pattern analysis display for reviewers")
return True
else:
print("❌ Some tests failed. Please check the implementation.")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)