| | import torch.nn as nn |
| |
|
| |
|
| | class LSTMModel(nn.Module): |
| | def __init__(self, input_size=12, hidden_size=64, output_size=12, num_layers=2): |
| | super(LSTMModel, self).__init__() |
| | self.lstm = nn.LSTM(input_size=input_size, |
| | hidden_size=hidden_size, |
| | num_layers=num_layers, |
| | batch_first=True) |
| | self.fc = nn.Linear(hidden_size, output_size) |
| | |
| | def forward(self, x): |
| | out, _ = self.lstm(x) |
| | out = self.fc(out) |
| | return out |
| |
|
| | from transformers import PreTrainedModel, PretrainedConfig |
| | import torch.nn as nn |
| |
|
| | |
| | class LSTMConfig(PretrainedConfig): |
| | model_type = "lstm_model" |
| |
|
| | def __init__(self, input_size=12, hidden_size=100, output_size=12, num_layers=2, **kwargs): |
| | super().__init__(**kwargs) |
| | self.input_size = input_size |
| | self.hidden_size = hidden_size |
| | self.output_size = output_size |
| | self.num_layers = num_layers |
| |
|
| |
|
| | |
| | class HuggingFaceLSTM(PreTrainedModel): |
| | config_class = LSTMConfig |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.lstm_model = LSTMModel( |
| | input_size=config.input_size, |
| | hidden_size=config.hidden_size, |
| | output_size=config.output_size, |
| | num_layers=config.num_layers, |
| | ) |
| |
|
| | def forward(self, x, **kwargs): |
| | return self.lstm_model(x) |
| | |