File size: 1,690 Bytes
b19c92c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import torch
import torch.nn as nn
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=8192):
super().__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
def forward(self, x):
t = x.size(1)
return x + self.pe[:t].unsqueeze(0)
class SmallCodeTransformer(nn.Module):
def __init__(self, vocab_size, d_model=512, nhead=8, nlayers=6, dim_feed=2048, max_len=8192):
super().__init__()
self.token_emb = nn.Embedding(vocab_size, d_model)
self.pos = PositionalEncoding(d_model, max_len)
encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feed, dropout=0.1, activation="gelu")
self.encoder = nn.TransformerEncoder(encoder_layer, nlayers)
self.ln = nn.LayerNorm(d_model)
self.head = nn.Linear(d_model, vocab_size, bias=False)
self._init_weights()
def _init_weights(self):
nn.init.normal_(self.token_emb.weight, mean=0.0, std=0.02)
nn.init.normal_(self.head.weight, mean=0.0, std=0.02)
def forward(self, input_ids, attention_mask=None):
x = self.token_emb(input_ids)
x = self.pos(x)
x = x.permute(1,0,2)
x = self.encoder(x, src_key_padding_mask=(attention_mask==0) if attention_mask is not None else None)
x = x.permute(1,0,2)
x = self.ln(x)
return self.head(x)
|