| import torch | |
| import torch.nn as nn | |
| class MLP(nn.Module): | |
| def __init__(self, input_dim, hidden_dims=[128, 64, 32], dropout_rate=0.3): | |
| """ | |
| Multi-Layer Perceptron for xG prediction | |
| Args: | |
| input_dim: Number of input features | |
| hidden_dims: List of hidden layer dimensions | |
| dropout_rate: Dropout probability | |
| """ | |
| super(MLP, self).__init__() | |
| layers = [] | |
| prev_dim = input_dim | |
| # Build hidden layers | |
| for hidden_dim in hidden_dims: | |
| layers.append(nn.Linear(prev_dim, hidden_dim)) | |
| layers.append(nn.ReLU()) | |
| layers.append(nn.BatchNorm1d(hidden_dim)) | |
| layers.append(nn.Dropout(dropout_rate)) | |
| prev_dim = hidden_dim | |
| # Output layer | |
| layers.append(nn.Linear(prev_dim, 1)) | |
| layers.append(nn.Sigmoid()) | |
| self.network = nn.Sequential(*layers) | |
| def forward(self, x): | |
| return self.network(x) | |