Spaces:
Sleeping
Sleeping
| import sys | |
| import types | |
| from unittest.mock import MagicMock | |
| # 1. Mock PyTorch Class BEFORE importing data_processor | |
| class MockTorch: | |
| def __init__(self): | |
| self.float32 = "float32" | |
| self.long = "long" | |
| # Mock Tensor class for type hints | |
| class MockTensor: | |
| pass | |
| self.Tensor = MockTensor | |
| def FloatTensor(self, x): return x # Return numpy array directly | |
| def LongTensor(self, x): return x | |
| def tensor(self, x): return x | |
| def randn(self, *args): | |
| import numpy as np | |
| return np.random.randn(*args) | |
| def randint(self, low, high, size): | |
| import numpy as np | |
| return np.random.randint(low, high, size) | |
| def stack(self, tensors, axis=0): | |
| import numpy as np | |
| return np.stack(tensors, axis=axis) | |
| # 2. Inject into sys.modules | |
| mock_torch = MockTorch() | |
| sys.modules['torch'] = mock_torch | |
| # 3. Now safe to import | |
| import numpy as np | |
| import pandas as pd | |
| import os | |
| # Add parent dir | |
| sys.path.append(os.getcwd()) | |
| from data_processor import AlphaDataProcessor | |
| def test_logic(): | |
| print("β Successfully imported data_processor with Mock Torch") | |
| # 1. Simulate get_deeplob_tensors return | |
| N, T, Levels = 10, 100, 20 | |
| # DataProcessor returns: | |
| # X = stack([p_window, v_window], axis=0) -> (2, T, 2*Levels) for each sample | |
| # Then np.array(X) -> (N, 2, T, 2*Levels) | |
| # Mock simulating what data_processor actually builds | |
| # (N samples, 2 channels, T timesteps, 2*Levels features) | |
| mock_X = np.random.randn(N, 2, T, 2*Levels) | |
| print(f"Mock DataProcessor Output Shape: {mock_X.shape}") | |
| # 2. Verify DeepLOB Input Requirement | |
| # DeepLOB Conv2d(2, 16, ...) expects (N, C, H, W) = (N, 2, T, Features) | |
| # Our features = 2*Levels = 40 | |
| # If the shape is 4D: (N, 2, 100, 40) -> IT IS CORRECT | |
| # If we unsqueeze(1) -> (N, 1, 2, 100, 40) -> 5D -> INCORRECT | |
| if mock_X.ndim == 4 and mock_X.shape[1] == 2: | |
| print("β Data Shape matches Conv2d Expectation (N, 2, T, F)") | |
| print(" -> (N, Channels=2, Height=100, Width=40)") | |
| print(" -> NO unsqueeze(1) needed!") | |
| else: | |
| print(f"β Data Shape Mismatch: {mock_X.shape}") | |
| # 3. Verify TRM logic | |
| # TRM needs (N, T, F) | |
| # F = 6 (Vol, Imb, CVD, Spr, Mom, OFI) | |
| N_trm, T_trm, F_trm = 10, 60, 6 | |
| mock_X_trm = np.random.randn(N_trm, T_trm, F_trm) | |
| print(f"\nMock TRM Output Shape: {mock_X_trm.shape}") | |
| if mock_X_trm.ndim == 3 and mock_X_trm.shape[2] == 6: | |
| print("β TRM Shape matches Transformer Expectation (N, T, F)") | |
| else: | |
| print(f"β TRM Shape Mismatch: {mock_X_trm.shape}") | |
| if __name__ == "__main__": | |
| test_logic() | |