Spaces:
Runtime error
Runtime error
| """ | |
| PyTorch model architectures for stock prediction. | |
| These classes must match the architectures used during training. | |
| """ | |
| import torch | |
| import torch.nn as nn | |
| class LSTMModel(nn.Module): | |
| """LSTM model for sequence prediction""" | |
| def __init__(self, input_size, hidden_size=128, num_layers=2, dropout=0.2): | |
| super(LSTMModel, self).__init__() | |
| self.hidden_size = hidden_size | |
| self.num_layers = num_layers | |
| self.lstm = nn.LSTM(input_size, hidden_size, num_layers, | |
| batch_first=True, dropout=dropout if num_layers > 1 else 0) | |
| self.fc1 = nn.Linear(hidden_size, 64) | |
| self.relu = nn.ReLU() | |
| self.dropout = nn.Dropout(dropout) | |
| self.fc2 = nn.Linear(64, 2) | |
| def forward(self, x): | |
| x = x.unsqueeze(1) # Add sequence dimension | |
| h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) | |
| c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) | |
| out, _ = self.lstm(x, (h0, c0)) | |
| out = self.fc1(out[:, -1, :]) | |
| out = self.relu(out) | |
| out = self.dropout(out) | |
| out = self.fc2(out) | |
| return out | |
| class GRUModel(nn.Module): | |
| """GRU model for sequence prediction""" | |
| def __init__(self, input_size, hidden_size=128, num_layers=2, dropout=0.2): | |
| super(GRUModel, self).__init__() | |
| self.hidden_size = hidden_size | |
| self.num_layers = num_layers | |
| self.gru = nn.GRU(input_size, hidden_size, num_layers, | |
| batch_first=True, dropout=dropout if num_layers > 1 else 0) | |
| self.fc1 = nn.Linear(hidden_size, 64) | |
| self.relu = nn.ReLU() | |
| self.dropout = nn.Dropout(dropout) | |
| self.fc2 = nn.Linear(64, 2) | |
| def forward(self, x): | |
| x = x.unsqueeze(1) | |
| h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device) | |
| out, _ = self.gru(x, h0) | |
| out = self.fc1(out[:, -1, :]) | |
| out = self.relu(out) | |
| out = self.dropout(out) | |
| out = self.fc2(out) | |
| return out | |
| class FeedForwardNN(nn.Module): | |
| """Feed-forward neural network""" | |
| def __init__(self, input_size, hidden_sizes=[256, 128, 64], dropout=0.3): | |
| super(FeedForwardNN, self).__init__() | |
| layers = [] | |
| prev_size = input_size | |
| for hidden_size in hidden_sizes: | |
| layers.append(nn.Linear(prev_size, hidden_size)) | |
| layers.append(nn.BatchNorm1d(hidden_size)) | |
| layers.append(nn.ReLU()) | |
| layers.append(nn.Dropout(dropout)) | |
| prev_size = hidden_size | |
| layers.append(nn.Linear(prev_size, 2)) | |
| self.network = nn.Sequential(*layers) | |
| def forward(self, x): | |
| return self.network(x) | |
| class BiLSTMModel(nn.Module): | |
| """Bidirectional LSTM model""" | |
| def __init__(self, input_size, hidden_size=64, num_layers=2, dropout=0.2): | |
| super(BiLSTMModel, self).__init__() | |
| self.hidden_size = hidden_size | |
| self.num_layers = num_layers | |
| self.lstm = nn.LSTM(input_size, hidden_size, num_layers, | |
| batch_first=True, dropout=dropout if num_layers > 1 else 0, | |
| bidirectional=True) | |
| self.fc1 = nn.Linear(hidden_size * 2, 128) | |
| self.relu = nn.ReLU() | |
| self.dropout = nn.Dropout(dropout) | |
| self.fc2 = nn.Linear(128, 64) | |
| self.fc3 = nn.Linear(64, 2) | |
| def forward(self, x): | |
| x = x.unsqueeze(1) | |
| h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device) | |
| c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device) | |
| out, _ = self.lstm(x, (h0, c0)) | |
| out = self.fc1(out[:, -1, :]) | |
| out = self.relu(out) | |
| out = self.dropout(out) | |
| out = self.fc2(out) | |
| out = self.relu(out) | |
| out = self.dropout(out) | |
| out = self.fc3(out) | |
| return out | |
| class CNN1DModel(nn.Module): | |
| """1D Convolutional Neural Network""" | |
| def __init__(self, input_size, num_filters=64, dropout=0.3): | |
| super(CNN1DModel, self).__init__() | |
| self.conv1 = nn.Conv1d(1, num_filters, kernel_size=3, padding=1) | |
| self.conv2 = nn.Conv1d(num_filters, num_filters * 2, kernel_size=3, padding=1) | |
| self.pool = nn.MaxPool1d(2) | |
| self.relu = nn.ReLU() | |
| self.dropout = nn.Dropout(dropout) | |
| self.batch_norm1 = nn.BatchNorm1d(num_filters) | |
| self.batch_norm2 = nn.BatchNorm1d(num_filters * 2) | |
| self.flatten = nn.Flatten() | |
| # Calculate flattened size | |
| self.fc1 = nn.Linear(num_filters * 2 * (input_size // 4), 128) | |
| self.fc2 = nn.Linear(128, 2) | |
| def forward(self, x): | |
| x = x.unsqueeze(1) # Add channel dimension | |
| x = self.conv1(x) | |
| x = self.batch_norm1(x) | |
| x = self.relu(x) | |
| x = self.pool(x) | |
| x = self.conv2(x) | |
| x = self.batch_norm2(x) | |
| x = self.relu(x) | |
| x = self.pool(x) | |
| x = self.flatten(x) | |
| x = self.fc1(x) | |
| x = self.relu(x) | |
| x = self.dropout(x) | |
| x = self.fc2(x) | |
| return x | |