File size: 2,475 Bytes
c2f9396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import pytest
import asyncio
from unittest.mock import patch, AsyncMock
from fastapi.testclient import TestClient

from app.main import app
from app.llm_manager import LLMManager


@pytest.fixture(scope="session")
def event_loop():
    """Create an instance of the default event loop for the test session."""
    loop = asyncio.get_event_loop_policy().new_event_loop()
    yield loop
    loop.close()


@pytest.fixture
def mock_llm_manager():
    """Create a mock LLM manager for testing."""
    with patch("app.main.llm_manager") as mock_manager:
        # Set up the mock manager
        mock_manager.is_loaded = True
        mock_manager.model_type = "mock"
        mock_manager.get_model_info.return_value = {
            "id": "llama-2-7b-chat",
            "object": "model",
            "created": 1234567890,
            "owned_by": "huggingface",
            "type": "mock",
            "context_window": 2048,
            "is_loaded": True,
        }

        # Mock the generate_stream method
        async def mock_generate_stream(request):
            # Generate a simple mock response
            yield {
                "id": "test-id-1",
                "object": "chat.completion.chunk",
                "created": 1234567890,
                "model": request.model,
                "choices": [
                    {"index": 0, "delta": {"content": "Hello"}, "finish_reason": None}
                ],
            }
            yield {
                "id": "test-id-2",
                "object": "chat.completion.chunk",
                "created": 1234567890,
                "model": request.model,
                "choices": [
                    {"index": 0, "delta": {"content": " world"}, "finish_reason": None}
                ],
            }
            yield {
                "id": "test-id-3",
                "object": "chat.completion.chunk",
                "created": 1234567890,
                "model": request.model,
                "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
            }

        mock_manager.generate_stream = mock_generate_stream
        yield mock_manager


@pytest.fixture
def client(mock_llm_manager):
    """Create a test client with mocked LLM manager."""
    return TestClient(app)


@pytest.fixture
def async_client(mock_llm_manager):
    """Create an async test client with mocked LLM manager."""
    from httpx import AsyncClient

    return AsyncClient(app=app, base_url="http://test")