| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| from aviator_predictor import AviatorLSTM | |
| import numpy as np | |
| def train_model(): | |
| # Hyperparameters | |
| input_size = 1 | |
| hidden_size = 64 | |
| num_layers = 2 | |
| output_size = 1 | |
| learning_rate = 0.001 | |
| num_epochs = 10 | |
| model = AviatorLSTM(input_size, hidden_size, num_layers, output_size) | |
| criterion = nn.BCELoss() | |
| optimizer = optim.Adam(model.parameters(), lr=learning_rate) | |
| # Dummy training loop for demonstration | |
| print("Starting training...") | |
| for epoch in range(num_epochs): | |
| # Simulated batch | |
| inputs = torch.randn(32, 10, 1) | |
| targets = torch.randint(0, 2, (32, 1)).float() | |
| optimizer.zero_grad() | |
| outputs = model(inputs) | |
| loss = criterion(outputs, targets) | |
| loss.backward() | |
| optimizer.step() | |
| if (epoch+1) % 2 == 0: | |
| print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}') | |
| torch.save(model.state_dict(), 'aviator_model.pth') | |
| print("Model saved to aviator_model.pth") | |
| if __name__ == '__main__': | |
| train_model() | |