Update super-large-language-model.py
Browse files
super-large-language-model.py
CHANGED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
class TransformerModel(nn.Module):
|
| 6 |
+
def __init__(self, vocab_size, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=0.1):
|
| 7 |
+
super(TransformerModel, self).__init__()
|
| 8 |
+
self.model_type = 'Transformer'
|
| 9 |
+
self.src_mask = None
|
| 10 |
+
self.pos_encoder = PositionalEncoding(d_model, dropout)
|
| 11 |
+
self.encoder = nn.Embedding(vocab_size, d_model)
|
| 12 |
+
self.transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout)
|
| 13 |
+
self.decoder = nn.Linear(d_model, vocab_size)
|
| 14 |
+
|
| 15 |
+
def forward(self, src, tgt, src_mask=None, tgt_mask=None):
|
| 16 |
+
src = self.encoder(src) * math.sqrt(self.d_model)
|
| 17 |
+
src = self.pos_encoder(src)
|
| 18 |
+
tgt = self.encoder(tgt) * math.sqrt(self.d_model)
|
| 19 |
+
tgt = self.pos_encoder(tgt)
|
| 20 |
+
output = self.transformer(src, tgt, src_mask, tgt_mask)
|
| 21 |
+
output = self.decoder(output)
|
| 22 |
+
return output
|
| 23 |
+
|
| 24 |
+
class PositionalEncoding(nn.Module):
|
| 25 |
+
def __init__(self, d_model, dropout=0.1, max_len=5000):
|
| 26 |
+
super(PositionalEncoding, self).__init__()
|
| 27 |
+
self.dropout = nn.Dropout(p=dropout)
|
| 28 |
+
|
| 29 |
+
pe = torch.zeros(max_len, d_model)
|
| 30 |
+
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
| 31 |
+
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
| 32 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
| 33 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
| 34 |
+
pe = pe.unsqueeze(0).transpose(0, 1)
|
| 35 |
+
self.register_buffer('pe', pe)
|
| 36 |
+
|
| 37 |
+
def forward(self, x):
|
| 38 |
+
x = x + self.pe[:x.size(0), :]
|
| 39 |
+
return self.dropout(x)
|