Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| class InceptionModule(nn.Module): | |
| def __init__(self, in_channels, out_channels): | |
| super(InceptionModule, self).__init__() | |
| # Parallel Convolutions | |
| # Branch 1: 1x1 Conv | |
| self.branch1 = nn.Sequential( | |
| nn.Conv2d(in_channels, out_channels, kernel_size=1), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(out_channels) | |
| ) | |
| # Branch 2: 1x3 Conv | |
| self.branch2 = nn.Sequential( | |
| nn.Conv2d(in_channels, out_channels, kernel_size=(1,1)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.Conv2d(out_channels, out_channels, kernel_size=(1,3), padding=(0,1)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(out_channels) | |
| ) | |
| # Branch 3: 1x5 Conv | |
| self.branch3 = nn.Sequential( | |
| nn.Conv2d(in_channels, out_channels, kernel_size=(1,1)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.Conv2d(out_channels, out_channels, kernel_size=(1,5), padding=(0,2)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(out_channels) | |
| ) | |
| # Branch 4: MaxPool + 1x1 | |
| self.branch4 = nn.Sequential( | |
| nn.MaxPool2d(kernel_size=(1,3), stride=1, padding=(0,1)), | |
| nn.Conv2d(in_channels, out_channels, kernel_size=1), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(out_channels) | |
| ) | |
| def forward(self, x): | |
| b1 = self.branch1(x) | |
| b2 = self.branch2(x) | |
| b3 = self.branch3(x) | |
| b4 = self.branch4(x) | |
| return torch.cat([b1, b2, b3, b4], dim=1) | |
| class SEBlock(nn.Module): | |
| """ | |
| Squeeze-and-Excitation Block (Channel Attention). | |
| Recalibrates feature maps adaptively. | |
| Hu et al. (2018). | |
| """ | |
| def __init__(self, channels, reduction=16): | |
| super(SEBlock, self).__init__() | |
| self.avg_pool = nn.AdaptiveAvgPool2d(1) # Squeeze | |
| self.fc = nn.Sequential( | |
| nn.Linear(channels, channels // reduction, bias=False), | |
| nn.ReLU(inplace=True), | |
| nn.Linear(channels // reduction, channels, bias=False), | |
| nn.Sigmoid() | |
| ) | |
| def forward(self, x): | |
| b, c = x.shape[0], x.shape[1] | |
| # b, c, _, _ = x.size() # This tuple unpacking can fail in ONNX tracing | |
| y = self.avg_pool(x).view(b, c) # Mean per channel | |
| y = self.fc(y).view(b, c, 1, 1) # Attention weights | |
| return x * y.expand_as(x) # Scale features | |
| class DeepLOB(nn.Module): | |
| """ | |
| DeepLOB with Inception + SE-Block Attention (Academic Standard). | |
| """ | |
| def __init__(self, y_len=3): | |
| super().__init__() | |
| self.y_len = y_len | |
| # Initial Blocks | |
| self.block1 = nn.Sequential( | |
| nn.Conv2d(2, 16, kernel_size=(1,2), stride=(1,2)), # Input channels=2 (P, V) | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(16), | |
| InceptionModule(16, 8), # Out: 32 | |
| SEBlock(32) # Attention | |
| ) | |
| self.block2 = nn.Sequential( | |
| nn.Conv2d(32, 16, kernel_size=(1,2), stride=(1,2)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(16), | |
| InceptionModule(16, 8), # Out: 32 | |
| SEBlock(32) # Attention | |
| ) | |
| self.block3 = nn.Sequential( | |
| nn.Conv2d(32, 16, kernel_size=(1,10)), | |
| nn.LeakyReLU(negative_slope=0.01), | |
| nn.BatchNorm2d(16), | |
| InceptionModule(16, 8), # Out: 32 | |
| SEBlock(32) # Attention | |
| ) | |
| # LSTM | |
| self.lstm = nn.LSTM(input_size=32, hidden_size=64, num_layers=1, batch_first=True) | |
| self.fc = nn.Linear(64, y_len) | |
| def forward(self, x): | |
| # x: (N, 2, 100, 40) | |
| x = self.block1(x) | |
| x = self.block2(x) | |
| x = self.block3(x) | |
| # Reshape for LSTM: (N, T, Features) | |
| x = x.permute(0, 2, 1, 3) | |
| x = x.reshape(x.shape[0], x.shape[1], -1) | |
| if x.dim() == 2: | |
| x = x.unsqueeze(0) | |
| # Explicit Init for ONNX | |
| h0 = torch.zeros(1, x.size(0), 64).to(x.device) | |
| c0 = torch.zeros(1, x.size(0), 64).to(x.device) | |
| out, _ = self.lstm(x, (h0, c0)) | |
| out = out[:, -1, :] | |
| out = self.fc(out) | |
| return out | |