| import math |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
|
|
| class CausalSelfAttention(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| assert config.n_embd % config.n_head == 0 |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) |
| self.n_head = config.n_head |
| self.n_embd = config.n_embd |
| self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) |
| .view(1, 1, config.block_size, config.block_size)) |
|
|
| def forward(self, x): |
| B, T, C = x.size() |
| q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
| k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| |
| |
| y = F.scaled_dot_product_attention(q, k, v, is_causal=True) |
| |
| y = y.transpose(1, 2).contiguous().view(B, T, C) |
| y = self.c_proj(y) |
| return y |
|
|
| class MLP(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) |
| self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) |
|
|
| def forward(self, x): |
| x = self.c_fc(x) |
| x = F.gelu(x) |
| x = self.c_proj(x) |
| return x |
|
|
| class Block(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.ln_1 = nn.LayerNorm(config.n_embd) |
| self.attn = CausalSelfAttention(config) |
| self.ln_2 = nn.LayerNorm(config.n_embd) |
| self.mlp = MLP(config) |
|
|
| def forward(self, x): |
| x = x + self.attn(self.ln_1(x)) |
| x = x + self.mlp(self.ln_2(x)) |
| return x |
|
|
| class GPTConfig: |
| def __init__(self, block_size=1024, vocab_size=50304, n_layer=12, n_head=12, n_embd=768, bias=True): |
| self.block_size = block_size |
| self.vocab_size = vocab_size |
| self.n_layer = n_layer |
| self.n_head = n_head |
| self.n_embd = n_embd |
| self.bias = bias |
|
|
| class GPT(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
|
|
| self.transformer = nn.ModuleDict(dict( |
| wte = nn.Embedding(config.vocab_size, config.n_embd), |
| wpe = nn.Embedding(config.block_size, config.n_embd), |
| drop = nn.LayerNorm(config.n_embd), |
| h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
| ln_f = nn.LayerNorm(config.n_embd), |
| )) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.transformer.wte.weight = self.lm_head.weight |
|
|
| def forward(self, idx, targets=None): |
| device = idx.device |
| b, t = idx.size() |
| pos = torch.arange(0, t, dtype=torch.long, device=device) |
| |
| tok_emb = self.transformer.wte(idx) |
| pos_emb = self.transformer.wpe(pos) |
| x = self.transformer.drop(tok_emb + pos_emb) |
| for block in self.transformer.h: |
| x = block(x) |
| x = self.transformer.ln_f(x) |
|
|
| if targets is not None: |
| logits = self.lm_head(x) |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) |
| else: |
| logits = self.lm_head(x[:, [-1], :]) |
| loss = None |
|
|
| return logits, loss |
|
|