OptimusPrimal / app.py
ArtistikMilitia's picture
Update app.py
d0f78c2 verified
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# Load the dataset
data = pd.read_csv('lottery_data.csv') # Assuming the CSV has 'draw_date', 'winning_numbers', and 'bonus_number'
# Process the draw date
data['draw_date'] = pd.to_datetime(data['draw_date'])
data['day'] = data['draw_date'].dt.day
data['month'] = data['draw_date'].dt.month
data['year'] = data['draw_date'].dt.year
# Split the winning numbers into separate columns
data[['num1', 'num2', 'num3', 'num4', 'num5', 'num6']] = data['winning_numbers'].str.split(' ', expand=True).astype(int)
# Final columns: ['day', 'month', 'year', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6', 'bonus_number']
data = data[['day', 'month', 'year', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6', 'bonus_number']]
# Prepare the dataset for modeling
class LotteryDataset(Dataset):
def __init__(self, data, seq_length):
self.data = data
self.seq_length = seq_length
def __len__(self):
return len(self.data) - self.seq_length
def __getitem__(self, index):
# Extract features for the sequence
input_seq = self.data.iloc[index:index + self.seq_length, :-1].values # All columns except bonus_number
# Target: next set of winning numbers and bonus number
target = self.data.iloc[index + self.seq_length, 3:].values # Winning numbers and bonus number
return (
torch.tensor(input_seq, dtype=torch.float32),
torch.tensor(target, dtype=torch.float32),
)
# Define sequence length
seq_length = 5
dataset = LotteryDataset(data, seq_length)
dataloader = DataLoader(dataset, batch_size=16, shuffle=True)
import torch.nn as nn
class LotteryTransformer(nn.Module):
def __init__(self, input_dim, d_model, nhead, num_layers, dim_feedforward, dropout, seq_length):
super(LotteryTransformer, self).__init__()
self.input_layer = nn.Linear(seq_length * input_dim, d_model)
self.positional_encoding = nn.Parameter(torch.zeros(1, 1000, d_model)) # Positional encoding
self.transformer = nn.Transformer(
d_model=d_model, nhead=nhead, num_encoder_layers=num_layers,
num_decoder_layers=num_layers, dim_feedforward=dim_feedforward, dropout=dropout
)
self.output_layer = nn.Linear(d_model, 7) # Predict 6 winning numbers + 1 bonus number
def forward(self, src):
# Flatten the input sequence for processing
src = self.input_layer(src)
src = src.unsqueeze(1) # Add sequence dimension
out = self.transformer(src, src) # Pass through transformer
out = self.output_layer(out.squeeze(1)) # Output layer
return out
import torch.optim as optim
# Model parameters
input_dim = 9 # 3 date features + 6 winning numbers
d_model = 128
nhead = 4
num_layers = 2
dim_feedforward = 256
dropout = 0.1
model = LotteryTransformer(input_dim, d_model, nhead, num_layers, dim_feedforward, dropout, seq_length)
criterion = nn.MSELoss() # Regression loss
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Training loop
num_epochs = 20
for epoch in range(num_epochs):
model.train()
epoch_loss = 0
for x, y in dataloader:
x = x.view(x.size(0), -1) # Flatten input sequence
y = y.float()
# Forward pass
output = model(x)
loss = criterion(output, y)
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(f"Epoch {epoch+1}, Loss: {epoch_loss / len(dataloader)}")
# Generate new winning numbers
model.eval()
current_seq = torch.tensor(data.iloc[-seq_length:, :-1].values, dtype=torch.float32).view(1, -1)
for i in range(10):
with torch.no_grad():
next_set = model(current_seq).round().int().tolist()[0] # Round to nearest integer
print(f"Set {i+1}: Winning Numbers: {next_set[:6]}, Bonus Number: {next_set[6]}")
# Update current sequence
current_seq = torch.cat((current_seq[:, input_dim:], torch.tensor(next_set).float().view(1, -1)), dim=1)