| | import math |
| | import torch |
| | import torch.nn as nn |
| | from torch.nn import functional as F |
| |
|
| | class LayerNorm(nn.Module): |
| | def __init__(self, ndim, bias): |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(ndim)) |
| | self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None |
| |
|
| | def forward(self, x): |
| | return F.layer_norm(x, x.shape[-1:], self.weight, self.bias, eps=1e-5) |
| |
|
| | class CausalSelfAttention(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | assert config.n_embd % config.n_head == 0 |
| | self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) |
| | self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) |
| | self.attn_dropout = nn.Dropout(config.dropout) |
| | self.resid_dropout = nn.Dropout(config.dropout) |
| | self.n_head = config.n_head |
| | self.n_embd = config.n_embd |
| | self.dropout = config.dropout |
| |
|
| | self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) |
| | .view(1, 1, config.block_size, config.block_size)) |
| |
|
| | def forward(self, x): |
| | B, T, C = x.size() |
| | q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
| | k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| | q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| | v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| |
|
| | att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
| | att = att.masked_fill(self.bias[:, :, :T, :T] == 0, float('-inf')) |
| | att = F.softmax(att, dim=-1) |
| | att = self.attn_dropout(att) |
| |
|
| | y = att @ v |
| | y = y.transpose(1, 2).contiguous().view(B, T, C) |
| | y = self.resid_dropout(self.c_proj(y)) |
| | return y |
| |
|
| | class MLP(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) |
| | self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) |
| | self.dropout = nn.Dropout(config.dropout) |
| |
|
| | def forward(self, x): |
| | x = self.c_fc(x) |
| | x = F.gelu(x) |
| | x = self.c_proj(x) |
| | return self.dropout(x) |
| |
|
| | class Block(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.ln1 = LayerNorm(config.n_embd, bias=config.bias) |
| | self.attn = CausalSelfAttention(config) |
| | self.ln2 = LayerNorm(config.n_embd, bias=config.bias) |
| | self.mlp = MLP(config) |
| |
|
| | def forward(self, x): |
| | x = x + self.attn(self.ln1(x)) |
| | x = x + self.mlp(self.ln2(x)) |
| | return x |
| |
|
| | class GPTConfig: |
| | def __init__(self, **kwargs): |
| | self.vocab_size = kwargs.get("vocab_size", 50304) |
| | self.block_size = kwargs.get("block_size", 1024) |
| | self.n_layer = kwargs.get("n_layer", 12) |
| | self.n_head = kwargs.get("n_head", 12) |
| | self.n_embd = kwargs.get("n_embd", 768) |
| | self.dropout = kwargs.get("dropout", 0.1) |
| | self.bias = kwargs.get("bias", True) |
| |
|
| | class GPT(nn.Module): |
| | def __init__(self, config): |
| | super().__init__() |
| | self.config = config |
| |
|
| | self.transformer = nn.ModuleDict(dict( |
| | wte = nn.Embedding(config.vocab_size, config.n_embd), |
| | wpe = nn.Embedding(config.block_size, config.n_embd), |
| | drop = nn.Dropout(config.dropout), |
| | h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
| | ln_f = LayerNorm(config.n_embd, bias=config.bias), |
| | )) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | self.apply(self._init_weights) |
| |
|
| | def _init_weights(self, module): |
| | if isinstance(module, nn.Linear): |
| | nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| | if module.bias is not None: |
| | nn.init.zeros_(module.bias) |
| | elif isinstance(module, nn.Embedding): |
| | nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| |
|
| | def forward(self, idx, targets=None): |
| | B, T = idx.size() |
| | assert T <= self.config.block_size, "Cannot forward, sequence too long" |
| |
|
| | pos = torch.arange(0, T, dtype=torch.long, device=idx.device).unsqueeze(0) |
| |
|
| | tok_emb = self.transformer.wte(idx) |
| | pos_emb = self.transformer.wpe(pos) |
| | x = self.transformer.drop(tok_emb + pos_emb) |
| | for block in self.transformer.h: |
| | x = block(x) |
| | x = self.transformer.ln_f(x) |
| |
|
| | logits = self.lm_head(x) |
| |
|
| | if targets is not None: |
| | loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) |
| | return logits, loss |
| | else: |
| | return logits, None |
| |
|
| | @torch.no_grad() |
| | def generate(self, idx, max_new_tokens): |
| | for _ in range(max_new_tokens): |
| | idx_cond = idx[:, -self.config.block_size:] |
| | logits, _ = self(idx_cond) |
| | logits = logits[:, -1, :] |
| | probs = F.softmax(logits, dim=-1) |
| | next_token = torch.multinomial(probs, num_samples=1) |
| | idx = torch.cat((idx, next_token), dim=1) |
| | return idx |