File size: 7,504 Bytes
a9dc537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
"""
Pytest configuration and fixtures for SPARKNET tests
Following FAANG best practices for test infrastructure
"""

import pytest
import asyncio
import sys
from pathlib import Path
from typing import Generator, AsyncGenerator
from unittest.mock import MagicMock, AsyncMock

# Add src to path
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))


# ==============================================================================
# Async Configuration
# ==============================================================================

@pytest.fixture(scope="session")
def event_loop():
    """Create an event loop for async tests."""
    loop = asyncio.new_event_loop()
    yield loop
    loop.close()


# ==============================================================================
# Mock LLM Fixtures
# ==============================================================================

@pytest.fixture
def mock_ollama_client():
    """Mock Ollama client for unit tests."""
    client = MagicMock()
    client.generate = MagicMock(return_value="Mock LLM response")
    client.chat = MagicMock(return_value="Mock chat response")
    client.list_models = MagicMock(return_value=["llama3.2:latest", "qwen2.5:14b"])
    return client


@pytest.fixture
def mock_langchain_client():
    """Mock LangChain Ollama client for unit tests."""
    client = MagicMock()

    # Mock LLM
    mock_llm = MagicMock()
    mock_llm.invoke = MagicMock(return_value=MagicMock(content="Mock response"))
    mock_llm.ainvoke = AsyncMock(return_value=MagicMock(content="Mock async response"))

    client.get_llm = MagicMock(return_value=mock_llm)
    client.get_embeddings = MagicMock(return_value=MagicMock())

    return client


# ==============================================================================
# Mock Agent Fixtures
# ==============================================================================

@pytest.fixture
def mock_memory_agent():
    """Mock memory agent for unit tests."""
    agent = MagicMock()
    agent.retrieve_relevant_context = AsyncMock(return_value=[])
    agent.store_episode = AsyncMock(return_value=None)
    agent.search_stakeholders = AsyncMock(return_value=[])
    return agent


@pytest.fixture
def mock_planner_agent():
    """Mock planner agent for unit tests."""
    from src.agents.base_agent import Task

    agent = MagicMock()

    mock_task = Task(
        id="test_task",
        description="Test task",
        status="completed",
        result={
            "task_graph": MagicMock(
                subtasks={},
                get_execution_order=MagicMock(return_value=[])
            ),
            "execution_order": [],
            "total_subtasks": 0,
        }
    )
    agent.process_task = AsyncMock(return_value=mock_task)

    return agent


@pytest.fixture
def mock_critic_agent():
    """Mock critic agent for unit tests."""
    from src.agents.base_agent import Task

    agent = MagicMock()

    mock_validation = MagicMock(
        overall_score=0.9,
        issues=[],
        suggestions=[],
        dimension_scores={"completeness": 0.9, "clarity": 0.9}
    )

    mock_task = Task(
        id="test_task",
        description="Test task",
        status="completed",
        result=mock_validation
    )
    agent.process_task = AsyncMock(return_value=mock_task)
    agent.get_feedback_for_iteration = MagicMock(return_value="Good quality output")

    return agent


# ==============================================================================
# Test Data Fixtures
# ==============================================================================

@pytest.fixture
def sample_patent_analysis():
    """Sample patent analysis result for testing."""
    return {
        "title": "Test Patent: Novel AI System",
        "abstract": "A system for processing natural language using transformers",
        "claims": [
            "Claim 1: A method for natural language processing",
            "Claim 2: A system implementing the method of claim 1"
        ],
        "trl_level": 4,
        "innovation_domains": ["Artificial Intelligence", "Natural Language Processing"],
        "key_innovations": ["Novel attention mechanism", "Efficient inference"],
        "filing_date": "2023-01-15",
        "patent_number": "US12345678",
    }


@pytest.fixture
def sample_market_analysis():
    """Sample market analysis result for testing."""
    return {
        "opportunities": [
            {
                "name": "Enterprise NLP Market",
                "market_size": 12.5,
                "growth_rate": 0.25,
                "relevance_score": 0.85,
            },
            {
                "name": "Conversational AI",
                "market_size": 8.2,
                "growth_rate": 0.32,
                "relevance_score": 0.78,
            },
        ],
        "competitive_landscape": "Moderate competition with major players",
        "commercialization_potential": 0.8,
    }


@pytest.fixture
def sample_stakeholder_match():
    """Sample stakeholder match for testing."""
    return {
        "name": "Tech Corp Inc",
        "type": "company",
        "domain": "Enterprise Software",
        "relevance_score": 0.92,
        "contact_info": {
            "email": "licensing@techcorp.example",
            "phone": "+1-555-0123",
        },
        "match_rationale": "Strong alignment with NLP focus areas",
    }


# ==============================================================================
# Configuration Fixtures
# ==============================================================================

@pytest.fixture
def test_config():
    """Test configuration dictionary."""
    return {
        "gpu": {
            "primary": 0,
            "fallback": [1, 2, 3],
            "max_memory_per_model": "8GB",
        },
        "ollama": {
            "host": "localhost",
            "port": 11434,
            "default_model": "llama3.2:latest",
            "timeout": 300,
        },
        "memory": {
            "vector_store": "chromadb",
            "embedding_model": "nomic-embed-text:latest",
            "max_context_length": 4096,
            "persist_directory": "/tmp/sparknet_test_memory",
        },
        "workflow": {
            "max_parallel_tasks": 5,
            "task_timeout": 600,
            "retry_attempts": 3,
        },
    }


# ==============================================================================
# Cleanup Fixtures
# ==============================================================================

@pytest.fixture(autouse=True)
def cleanup_test_files():
    """Clean up any test files after each test."""
    yield

    # Clean up test output directory
    test_output_dir = Path("/tmp/sparknet_test_outputs")
    if test_output_dir.exists():
        import shutil
        shutil.rmtree(test_output_dir, ignore_errors=True)


# ==============================================================================
# Markers
# ==============================================================================

def pytest_configure(config):
    """Configure pytest markers."""
    config.addinivalue_line(
        "markers", "slow: mark test as slow (deselect with '-m \"not slow\"')"
    )
    config.addinivalue_line(
        "markers", "integration: mark test as integration test"
    )
    config.addinivalue_line(
        "markers", "gpu: mark test as requiring GPU"
    )
    config.addinivalue_line(
        "markers", "ollama: mark test as requiring Ollama server"
    )