StockPredict / core /models.py
aromidvar1355's picture
Update core/models.py
a70bce2 verified
import torch
import torch.nn as nn
class BaseTimeSeriesModel(nn.Module):
def __init__(self):
super(BaseTimeSeriesModel, self).__init__()
def reset_weights(self):
for layer in self.children():
if hasattr(layer, 'reset_parameters'):
layer.reset_parameters()
class LSTMModel(BaseTimeSeriesModel):
def __init__(self, input_size, hidden_size=64, num_layers=2, dropout=0.2, output_size=1):
super(LSTMModel, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True, dropout=dropout)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.lstm(x)
return self.fc(out[:, -1, :])
class GRUModel(BaseTimeSeriesModel):
def __init__(self, input_size, hidden_size=64, num_layers=2, dropout=0.2, output_size=1):
super(GRUModel, self).__init__()
self.gru = nn.GRU(input_size, hidden_size, num_layers,
batch_first=True, dropout=dropout)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.gru(x)
return self.fc(out[:, -1, :])
class CNNModel(BaseTimeSeriesModel):
def __init__(self, input_size, output_size=1, kernel_size=3, num_filters=32):
super(CNNModel, self).__init__()
self.conv1 = nn.Conv1d(in_channels=input_size, out_channels=num_filters, kernel_size=kernel_size)
self.relu = nn.ReLU()
self.pool = nn.AdaptiveMaxPool1d(1)
self.flatten = nn.Flatten()
self.fc = nn.Linear(num_filters, output_size)
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.flatten(x)
return self.fc(x)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=500):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div = torch.exp(torch.arange(0, d_model, 2) * (-torch.log(torch.tensor(10000.0)) / d_model))
pe[:, 0::2] = torch.sin(position * div)
pe[:, 1::2] = torch.cos(position * div)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return x + self.pe[:, :x.size(1)]
class TransformerModel(BaseTimeSeriesModel):
def __init__(self, input_size, d_model=64, nhead=4, num_layers=2, output_size=1, dropout=0.1):
super(TransformerModel, self).__init__()
self.input_proj = nn.Linear(input_size, d_model)
self.pos_encoder = PositionalEncoding(d_model)
encoder_layers = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)
self.decoder = nn.Linear(d_model, output_size)
def forward(self, x):
x = self.input_proj(x)
x = self.pos_encoder(x)
x = self.transformer_encoder(x)
return self.decoder(x[:, -1, :])
class MLPModel(BaseTimeSeriesModel):
def __init__(self, input_size, hidden_sizes=[64, 64], output_size=1, dropout=0.2):
super(MLPModel, self).__init__()
layers = []
prev_size = input_size
for h in hidden_sizes:
layers.append(nn.Linear(prev_size, h))
layers.append(nn.ReLU())
layers.append(nn.Dropout(dropout))
prev_size = h
layers.append(nn.Linear(prev_size, output_size))
self.net = nn.Sequential(*layers)
def forward(self, x):
x = x.reshape(x.size(0), -1)
return self.net(x)
class BiLSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=2, dropout=0.3):
super(BiLSTMModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bilstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True, dropout=dropout, bidirectional=True)
self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, 128),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(128, output_size)
)
def forward(self, x):
h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
out, _ = self.bilstm(x, (h0, c0))
out = out[:, -1, :]
return self.fc(out)
class HybridModel(BaseTimeSeriesModel):
def __init__(self, input_size, hidden_size=64, output_size=1, num_layers=1, dropout=0.2):
super(HybridModel, self).__init__()
self.conv1 = nn.Conv1d(in_channels=input_size, out_channels=32, kernel_size=3)
self.relu = nn.ReLU()
self.pool = nn.AdaptiveMaxPool1d(10)
self.bilstm = nn.LSTM(input_size=32, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True, bidirectional=True)
self.fc = nn.Sequential(
nn.Linear(hidden_size * 2, 64),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(64, output_size)
)
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.relu(self.conv1(x))
x = self.pool(x)
x = x.permute(0, 2, 1)
out, _ = self.bilstm(x)
out = out[:, -1, :]
return self.fc(out)