| import math |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
|
|
| |
| class MVTConfig: |
| vocab_size = 5000 |
| block_size = 256 |
| n_layer = 8 |
| n_head = 8 |
| n_embd = 512 |
| batch_size = 16 |
| dropout = 0.1 |
| bias = False |
|
|
| |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
| |
| class CausalSelfAttention(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| assert config.n_embd % config.n_head == 0 |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) |
| self.attn_dropout = nn.Dropout(config.dropout) |
| self.resid_dropout = nn.Dropout(config.dropout) |
| self.n_head = config.n_head |
| self.n_embd = config.n_embd |
| self.dropout = config.dropout |
| self.block_size = config.block_size |
| self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size)) |
| .view(1, 1, config.block_size, config.block_size)) |
| nn.init.normal_(self.c_proj.weight, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer)) |
|
|
| def forward(self, x): |
| B, T, C = x.size() |
| q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
| k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
| att = att.masked_fill(self.mask[:, :, :T, :T] == 0, float('-inf')) |
| att = F.softmax(att, dim=-1) |
| att = self.attn_dropout(att) |
| y = att @ v |
| y = y.transpose(1, 2).contiguous().view(B, T, C) |
| y = self.resid_dropout(self.c_proj(y)) |
| return y |
|
|
| |
| class MLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) |
| self.gelu = nn.GELU() |
| self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) |
| self.dropout = nn.Dropout(config.dropout) |
| nn.init.normal_(self.c_proj.weight, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer)) |
|
|
| def forward(self, x): |
| x = self.c_fc(x) |
| x = self.gelu(x) |
| x = self.c_proj(x) |
| x = self.dropout(x) |
| return x |
|
|
| |
| class Block(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.ln_1 = nn.LayerNorm(config.n_embd, bias=config.bias) |
| self.attn = CausalSelfAttention(config) |
| self.ln_2 = nn.LayerNorm(config.n_embd, bias=config.bias) |
| self.mlp = MLP(config) |
|
|
| def forward(self, x): |
| x = x + self.attn(self.ln_1(x)) |
| x = x + self.mlp(self.ln_2(x)) |
| return x |
|
|
| |
| class MinimalGPT(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| |
| self.vocab_size = config.vocab_size |
| self.block_size = config.block_size |
| self.n_layer = config.n_layer |
| self.n_head = config.n_head |
| self.n_embd = config.n_embd |
| self.dropout = config.dropout |
| self.bias = config.bias |
|
|
| self.transformer = nn.ModuleDict(dict( |
| wte=nn.Embedding(self.vocab_size, self.n_embd), |
| wpe=nn.Embedding(self.block_size, self.n_embd), |
| drop=nn.Dropout(self.dropout), |
| h=nn.ModuleList([Block(config) for _ in range(self.n_layer)]), |
| ln_f=nn.LayerNorm(self.n_embd, bias=self.bias), |
| )) |
| self.lm_head = nn.Linear(self.n_embd, self.vocab_size, bias=False) |
| self.transformer.wte.weight = self.lm_head.weight |
| print(f"Minimal GPT Model initialized: {sum(p.numel() for p in self.parameters())/1e6:.2f}M parameters") |
|
|
| def forward(self, idx, targets=None): |
| B, T = idx.size() |
| assert T <= self.block_size, f"Input sequence length {T} exceeds block size {self.block_size}" |
| pos = torch.arange(0, T, dtype=torch.long, device=idx.device) |
| tok_emb = self.transformer.wte(idx) |
| pos_emb = self.transformer.wpe(pos) |
| x = self.transformer.drop(tok_emb + pos_emb) |
| for block in self.transformer.h: |
| x = block(x) |
| x = self.transformer.ln_f(x) |
| logits = self.lm_head(x) |
| loss = None |
| if targets is not None: |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) |
| else: |
| loss = torch.tensor(0.0, device=idx.device) |
| return logits, loss |
|
|