Uploaded AntiCheatPT_256 model
Browse files- AntiCheatPT_256.py +51 -0
- AntiCheatPT_256_model_state_dict.pth +3 -0
- PositionalEncoding.py +24 -0
AntiCheatPT_256.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from .PositionalEncoding import PositionalEncoding
|
| 4 |
+
|
| 5 |
+
class AntiCheatPT_256(nn.Module):
|
| 6 |
+
def __init__(
|
| 7 |
+
self,
|
| 8 |
+
feature_dim=44, # nr of features per tick
|
| 9 |
+
seq_len=256, # nr of ticks
|
| 10 |
+
nhead=1, # nr of attention heads
|
| 11 |
+
num_layers=4, # nr of transformer encoder layers
|
| 12 |
+
dim_feedforward=176, # hidden size of feedforward network (MLP)
|
| 13 |
+
dropout=0.1 # dropout rate
|
| 14 |
+
):
|
| 15 |
+
super(AntiCheatPT_256, self).__init__()
|
| 16 |
+
|
| 17 |
+
self.positional_encoding = PositionalEncoding(d_model=feature_dim, max_len=seq_len + 1) # +1 for CLS token
|
| 18 |
+
|
| 19 |
+
encoder_layer = nn.TransformerEncoderLayer(
|
| 20 |
+
d_model=feature_dim,
|
| 21 |
+
nhead=nhead,
|
| 22 |
+
dim_feedforward=dim_feedforward,
|
| 23 |
+
dropout=dropout,
|
| 24 |
+
batch_first=True
|
| 25 |
+
) # input shape is (batch, seq_len, d_model)
|
| 26 |
+
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
|
| 27 |
+
|
| 28 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, feature_dim)) # add classification token
|
| 29 |
+
|
| 30 |
+
self.fc_out = nn.Sequential(
|
| 31 |
+
nn.Linear(feature_dim, 128),
|
| 32 |
+
nn.ReLU(),
|
| 33 |
+
nn.Linear(128, 1)
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
def forward(self, x):
|
| 37 |
+
# x shape: (batch_size, seq_len, feature_dim) = (batch_size, 256, d_model)
|
| 38 |
+
B = x.size(0)
|
| 39 |
+
|
| 40 |
+
# add classification token
|
| 41 |
+
cls_tokens = self.cls_token.expand(B, -1, -1).to(x.device) # (batch_size, 1, d_model)
|
| 42 |
+
x = torch.cat((cls_tokens, x), dim=1) # -> (batch_size, 257, d_model)
|
| 43 |
+
|
| 44 |
+
x = self.positional_encoding(x) # add positional encoding
|
| 45 |
+
|
| 46 |
+
x = self.transformer_encoder(x) # -> (batch_size, 257, d_model)
|
| 47 |
+
|
| 48 |
+
cls_output = x[:, 0] # get output for classification token
|
| 49 |
+
out = self.fc_out(cls_output) # -> (batch_size, 1)
|
| 50 |
+
|
| 51 |
+
return out.squeeze(1) # -> (batch_size,)
|
AntiCheatPT_256_model_state_dict.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d72263e7c94031d43a4df70e4e55fd16275c7a743b8e51b4467fe5b29abe8a0
|
| 3 |
+
size 471582
|
PositionalEncoding.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
pe_scaling = 0.1 # Hyperparameter, 0.1 value was used from training
|
| 6 |
+
|
| 7 |
+
class PositionalEncoding(nn.Module):
|
| 8 |
+
def __init__(self, d_model, max_len):
|
| 9 |
+
super().__init__()
|
| 10 |
+
self.register_buffer("pe", self._generate_pe(max_len, d_model))
|
| 11 |
+
|
| 12 |
+
def _generate_pe(self, max_len, d_model):
|
| 13 |
+
pe = torch.zeros(max_len, d_model)
|
| 14 |
+
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
|
| 15 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
| 16 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 17 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 18 |
+
pe = (pe + 1) / 2
|
| 19 |
+
pe = pe_scaling * pe
|
| 20 |
+
return pe.unsqueeze(0)
|
| 21 |
+
|
| 22 |
+
def forward(self, x):
|
| 23 |
+
x = x + self.pe[:, :x.size(1), :].to(x.device)
|
| 24 |
+
return x
|