File size: 2,769 Bytes
54c5666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""Pytest configuration and fixtures"""
import pytest
import torch
import sys
import os

# Add src to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))


@pytest.fixture(scope="session")
def device():
    """Get available device"""
    return torch.device('cuda' if torch.cuda.is_available() else 'cpu')


@pytest.fixture
def small_model_config():
    """Small model config for testing"""
    from src.models.architecture import ModelConfig
    return ModelConfig(
        vocab_size=1000,
        n_positions=128,
        n_embd=128,
        n_layer=2,
        n_head=4,
        n_kv_head=2,
        intermediate_size=512,
        flash_attention=False,
        gradient_checkpointing=False
    )


@pytest.fixture
def tiny_ultrathink_config():
    """Tiny ULTRATHINK config for testing"""
    from src.models.ultrathink import UltraThinkConfig
    from src.models.architecture import ModelConfig
    
    model_config = ModelConfig(
        vocab_size=1000,
        n_positions=128,
        n_embd=128,
        n_layer=2,
        n_head=4,
        n_kv_head=2,
        intermediate_size=512,
        flash_attention=False,
        gradient_checkpointing=False
    )
    
    return UltraThinkConfig(
        model_config=model_config,
        enable_dre=False,
        enable_constitutional=False,
        enable_moe=False,
        enable_multimodal=False,
        enable_rlhf=False
    )


@pytest.fixture
def sample_batch(device):
    """Generate a sample batch for testing"""
    batch_size = 2
    seq_len = 16
    vocab_size = 1000
    
    return {
        'input_ids': torch.randint(0, vocab_size, (batch_size, seq_len), device=device),
        'attention_mask': torch.ones(batch_size, seq_len, device=device, dtype=torch.long),
        'labels': torch.randint(0, vocab_size, (batch_size, seq_len), device=device)
    }


@pytest.fixture
def temp_dir(tmp_path):
    """Create a temporary directory for test outputs"""
    return tmp_path


def pytest_configure(config):
    """Pytest configuration hook"""
    config.addinivalue_line("markers", "slow: mark test as slow running")
    config.addinivalue_line("markers", "gpu: mark test as requiring GPU")
    config.addinivalue_line("markers", "integration: mark test as integration test")
    config.addinivalue_line("markers", "unit: mark test as unit test")


def pytest_collection_modifyitems(config, items):
    """Modify test collection"""
    # Skip GPU tests if no GPU available
    skip_gpu = pytest.mark.skip(reason="GPU not available")
    if not torch.cuda.is_available():
        for item in items:
            if "gpu" in item.keywords:
                item.add_marker(skip_gpu)