Spaces:
Sleeping
Sleeping
File size: 2,725 Bytes
c5c085b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import sys
import types
from unittest.mock import MagicMock
# 1. Mock PyTorch Class BEFORE importing data_processor
class MockTorch:
def __init__(self):
self.float32 = "float32"
self.long = "long"
# Mock Tensor class for type hints
class MockTensor:
pass
self.Tensor = MockTensor
def FloatTensor(self, x): return x # Return numpy array directly
def LongTensor(self, x): return x
def tensor(self, x): return x
def randn(self, *args):
import numpy as np
return np.random.randn(*args)
def randint(self, low, high, size):
import numpy as np
return np.random.randint(low, high, size)
def stack(self, tensors, axis=0):
import numpy as np
return np.stack(tensors, axis=axis)
# 2. Inject into sys.modules
mock_torch = MockTorch()
sys.modules['torch'] = mock_torch
# 3. Now safe to import
import numpy as np
import pandas as pd
import os
# Add parent dir
sys.path.append(os.getcwd())
from data_processor import AlphaDataProcessor
def test_logic():
print("✅ Successfully imported data_processor with Mock Torch")
# 1. Simulate get_deeplob_tensors return
N, T, Levels = 10, 100, 20
# DataProcessor returns:
# X = stack([p_window, v_window], axis=0) -> (2, T, 2*Levels) for each sample
# Then np.array(X) -> (N, 2, T, 2*Levels)
# Mock simulating what data_processor actually builds
# (N samples, 2 channels, T timesteps, 2*Levels features)
mock_X = np.random.randn(N, 2, T, 2*Levels)
print(f"Mock DataProcessor Output Shape: {mock_X.shape}")
# 2. Verify DeepLOB Input Requirement
# DeepLOB Conv2d(2, 16, ...) expects (N, C, H, W) = (N, 2, T, Features)
# Our features = 2*Levels = 40
# If the shape is 4D: (N, 2, 100, 40) -> IT IS CORRECT
# If we unsqueeze(1) -> (N, 1, 2, 100, 40) -> 5D -> INCORRECT
if mock_X.ndim == 4 and mock_X.shape[1] == 2:
print("✅ Data Shape matches Conv2d Expectation (N, 2, T, F)")
print(" -> (N, Channels=2, Height=100, Width=40)")
print(" -> NO unsqueeze(1) needed!")
else:
print(f"❌ Data Shape Mismatch: {mock_X.shape}")
# 3. Verify TRM logic
# TRM needs (N, T, F)
# F = 6 (Vol, Imb, CVD, Spr, Mom, OFI)
N_trm, T_trm, F_trm = 10, 60, 6
mock_X_trm = np.random.randn(N_trm, T_trm, F_trm)
print(f"\nMock TRM Output Shape: {mock_X_trm.shape}")
if mock_X_trm.ndim == 3 and mock_X_trm.shape[2] == 6:
print("✅ TRM Shape matches Transformer Expectation (N, T, F)")
else:
print(f"❌ TRM Shape Mismatch: {mock_X_trm.shape}")
if __name__ == "__main__":
test_logic()
|