Spaces:
Running
Running
File size: 6,342 Bytes
24214fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
#!/usr/bin/env python3
"""
Test script for TriageQuestionGenerator functionality.
"""
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from config.prompt_management.triage_question_generator import TriageQuestionGenerator
from config.prompt_management.data_models import ScenarioType, ConversationHistory
def test_triage_question_generator():
"""Test TriageQuestionGenerator functionality."""
print("Testing TriageQuestionGenerator...")
# Initialize generator
generator = TriageQuestionGenerator()
print("β TriageQuestionGenerator initialized")
# Test 1: Scenario identification
print("\n1. Testing scenario identification...")
test_statements = [
("I used to love gardening, but now I can't", ScenarioType.LOSS_OF_INTEREST),
("My mother passed away last month", ScenarioType.LOSS_OF_LOVED_ONE),
("I don't have anyone to help me", ScenarioType.NO_SUPPORT),
("I feel some stress", ScenarioType.VAGUE_STRESS),
("I can't sleep at night", ScenarioType.SLEEP_ISSUES)
]
for statement, expected_type in test_statements:
identified_type = generator.identify_scenario_type(statement)
assert identified_type == expected_type, f"'{statement}' β Expected {expected_type.value}, got {identified_type}"
print(f" β '{statement}' β {expected_type.value}")
# Test 2: Scenario creation from statements
print("\n2. Testing scenario creation...")
for statement, expected_type in test_statements:
scenario = generator.create_scenario_from_statement(statement)
assert scenario is not None, f"Failed to create scenario for: {statement}"
assert scenario.scenario_type == expected_type, f"Wrong scenario type for: {statement}"
print(f" β Created scenario for: {expected_type.value}")
print(f" Context clues: {len(scenario.context_clues)}")
print(f" Question patterns: {len(scenario.question_patterns)}")
# Test 3: Targeted question generation
print("\n3. Testing targeted question generation...")
for statement, expected_type in test_statements:
scenario = generator.create_scenario_from_statement(statement)
assert scenario is not None, f"No scenario created for: {statement}"
question = generator.generate_targeted_question(scenario)
print(f" β {expected_type.value}:")
print(f" Statement: {statement}")
print(f" Question: {question}")
# Validate question is not empty and is a question
assert len(question.strip()) > 0, "Empty question generated"
assert question.strip().endswith('?'), "Generated text is not a question"
# Test 4: Question effectiveness validation
print("\n4. Testing question effectiveness validation...")
test_questions = [
("Is that something that's been weighing on you emotionally, or is it more about circumstances?", "loss_of_interest", 0.7),
("How are you feeling?", "loss_of_interest", 0.3),
("I'm sorry for your loss. How have you been coping with this?", "loss_of_loved_one", 0.7),
("That's sad.", "loss_of_loved_one", 0.2)
]
for question, scenario, min_expected_score in test_questions:
score = generator.validate_question_effectiveness(question, scenario)
if score >= min_expected_score:
print(f" β '{question[:40]}...' β Score: {score:.2f}")
else:
print(f" β '{question[:40]}...' β Score: {score:.2f} (expected >= {min_expected_score})")
# Test 5: Question patterns retrieval
print("\n5. Testing question patterns retrieval...")
for scenario_type in ScenarioType:
patterns = generator.get_question_patterns(scenario_type.value)
print(f" β {scenario_type.value}: {len(patterns)} patterns")
if patterns:
sample_pattern = patterns[0]
print(f" Sample: {sample_pattern.template[:60]}...")
# Test 6: Variable extraction and template rendering
print("\n6. Testing variable extraction...")
test_cases = [
("I used to love gardening, but now I can't", ScenarioType.LOSS_OF_INTEREST),
("My mother passed away", ScenarioType.LOSS_OF_LOVED_ONE),
("I feel stressed", ScenarioType.VAGUE_STRESS)
]
for statement, scenario_type in test_cases:
patterns = generator._scenario_patterns.get(scenario_type, [])
if patterns:
variables = generator._extract_variables(statement, patterns[0])
print(f" β '{statement}' β Variables: {variables}")
else:
print(f" β No patterns for {scenario_type.value}")
# Test 7: Fallback question generation
print("\n7. Testing fallback question generation...")
fallback_statements = [
"Something is wrong",
"I don't know what to do",
"This is confusing"
]
for statement in fallback_statements:
fallback_question = generator._generate_fallback_question(statement)
print(f" β '{statement}' β '{fallback_question}'")
assert fallback_question.endswith('?'), "Fallback is not a question"
# Test 8: Context integration
print("\n8. Testing context integration...")
# Create mock conversation history
from config.prompt_management.data_models import Message
from datetime import datetime
context = ConversationHistory(
messages=[Message(content="Previous message", classification="yellow", timestamp=datetime.fromisoformat("2024-01-01T00:00:00"))],
distress_indicators_found=["sleep_difficulties"],
context_flags=["medical_context", "previous_distress"]
)
statement = "I can't sleep"
scenario = generator.create_scenario_from_statement(statement, context)
assert scenario is not None, "Failed to integrate context"
print(f" β Context integrated: {len(scenario.context_clues)} clues")
print(f" Context clues: {scenario.context_clues}")
print("\nβ All TriageQuestionGenerator tests passed!")
if __name__ == "__main__":
success = test_triage_question_generator()
sys.exit(0 if success else 1) |