Spaces:
Running
Running
File size: 5,390 Bytes
f173aad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
"""Unit tests for message history utilities."""
import pytest
pytestmark = pytest.mark.unit
from src.utils.message_history import (
convert_gradio_to_message_history,
create_relevance_processor,
create_truncation_processor,
message_history_to_string,
)
def test_convert_gradio_to_message_history_empty():
"""Test conversion with empty history."""
result = convert_gradio_to_message_history([])
assert result == []
def test_convert_gradio_to_message_history_single_turn():
"""Test conversion with a single turn."""
gradio_history = [
{"role": "user", "content": "What is AI?"},
{"role": "assistant", "content": "AI is artificial intelligence."},
]
result = convert_gradio_to_message_history(gradio_history)
assert len(result) == 2
def test_convert_gradio_to_message_history_multiple_turns():
"""Test conversion with multiple turns."""
gradio_history = [
{"role": "user", "content": "What is AI?"},
{"role": "assistant", "content": "AI is artificial intelligence."},
{"role": "user", "content": "Tell me more"},
{"role": "assistant", "content": "AI includes machine learning..."},
]
result = convert_gradio_to_message_history(gradio_history)
assert len(result) == 4
def test_convert_gradio_to_message_history_max_messages():
"""Test conversion with max_messages limit."""
gradio_history = []
for i in range(15): # Create 15 turns
gradio_history.append({"role": "user", "content": f"Message {i}"})
gradio_history.append({"role": "assistant", "content": f"Response {i}"})
result = convert_gradio_to_message_history(gradio_history, max_messages=10)
# Should only include most recent 10 messages
assert len(result) <= 10
def test_convert_gradio_to_message_history_filters_invalid():
"""Test that invalid entries are filtered out."""
gradio_history = [
{"role": "user", "content": "Valid message"},
{"role": "system", "content": "Should be filtered"},
{"role": "assistant", "content": ""}, # Empty content should be filtered
{"role": "assistant", "content": "Valid response"},
]
result = convert_gradio_to_message_history(gradio_history)
# Should only have 2 valid messages (user + assistant)
assert len(result) == 2
def test_message_history_to_string_empty():
"""Test string conversion with empty history."""
result = message_history_to_string([])
assert result == ""
def test_message_history_to_string_format():
"""Test string conversion format."""
# Create mock message history
try:
from pydantic_ai import ModelRequest, ModelResponse
from pydantic_ai.messages import TextPart, UserPromptPart
messages = [
ModelRequest(parts=[UserPromptPart(content="Question 1")]),
ModelResponse(parts=[TextPart(content="Answer 1")]),
]
result = message_history_to_string(messages)
assert "PREVIOUS CONVERSATION" in result
assert "User:" in result
assert "Assistant:" in result
except ImportError:
# Skip if pydantic_ai not available
pytest.skip("pydantic_ai not available")
def test_message_history_to_string_max_messages():
"""Test string conversion with max_messages limit."""
try:
from pydantic_ai import ModelRequest, ModelResponse
from pydantic_ai.messages import TextPart, UserPromptPart
messages = []
for i in range(10): # Create 10 turns
messages.append(ModelRequest(parts=[UserPromptPart(content=f"Question {i}")]))
messages.append(ModelResponse(parts=[TextPart(content=f"Answer {i}")]))
result = message_history_to_string(messages, max_messages=3)
# Should only include most recent 3 messages (1.5 turns)
assert result != ""
except ImportError:
pytest.skip("pydantic_ai not available")
def test_create_truncation_processor():
"""Test truncation processor factory."""
processor = create_truncation_processor(max_messages=5)
assert callable(processor)
try:
from pydantic_ai import ModelRequest
from pydantic_ai.messages import UserPromptPart
messages = [
ModelRequest(parts=[UserPromptPart(content=f"Message {i}")])
for i in range(10)
]
result = processor(messages)
assert len(result) == 5
except ImportError:
pytest.skip("pydantic_ai not available")
def test_create_relevance_processor():
"""Test relevance processor factory."""
processor = create_relevance_processor(min_length=10)
assert callable(processor)
try:
from pydantic_ai import ModelRequest, ModelResponse
from pydantic_ai.messages import TextPart, UserPromptPart
messages = [
ModelRequest(parts=[UserPromptPart(content="Short")]), # Too short
ModelRequest(parts=[UserPromptPart(content="This is a longer message")]), # Valid
ModelResponse(parts=[TextPart(content="OK")]), # Too short
ModelResponse(parts=[TextPart(content="This is a valid response")]), # Valid
]
result = processor(messages)
# Should only keep messages with length >= 10
assert len(result) == 2
except ImportError:
pytest.skip("pydantic_ai not available")
|