| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
|
|
| |
| class AttentionHead(nn.Module): |
| """a single head of self attention""" |
| |
| def __init__(self, n_embed, head_size, block_size, dropout): |
| super().__init__() |
| self.key = nn.Linear(n_embed, head_size, bias=False) |
| self.query = nn.Linear(n_embed, head_size, bias=False) |
| self.value = nn.Linear(n_embed, head_size, bias=False) |
| self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) |
| |
| self.dropout = nn.Dropout(dropout) |
| |
| def forward(self, x): |
| B, T, C = x.shape |
| K = self.key(x) |
| Q = self.query(x) |
| |
| wei = Q @ K.transpose(-2,-1) * C**-0.5 |
| wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) |
| wei = F.softmax(wei, dim=-1) |
| wei = self.dropout(wei) |
|
|
| V = self.value(x) |
| out = wei @ V |
| return out |
| |
| class MultiHeadAttention(nn.Module): |
| """a multi-head self attention layer""" |
| |
| def __init__(self, n_embed, n_heads, head_size, block_size, dropout): |
| super().__init__() |
| self.heads = nn.ModuleList([AttentionHead(n_embed, head_size, block_size, dropout) for _ in range(n_heads)]) |
| self.fc = nn.Linear(head_size * n_heads, n_embed) |
| self.dropout = nn.Dropout(dropout) |
| |
| def forward(self, x): |
| out = torch.cat([h(x) for h in self.heads], dim=-1) |
| out = self.fc(out) |
| out = self.dropout(out) |
| return out |
| |
| class FeedForward(nn.Module): |
| def __init__(self, n_embed, n_hidden, dropout): |
| super().__init__() |
| self.net = nn.Sequential( |
| nn.Linear(n_embed, n_hidden), |
| nn.ReLU(), |
| nn.Linear(n_hidden, n_embed), |
| nn.Dropout(dropout) |
| ) |
| |
| def forward(self, x): |
| return self.net(x) |
| |
| class Block(nn.Module): |
| def __init__(self, n_embed, n_heads, block_size, dropout): |
| super().__init__() |
| self.sa_heads = MultiHeadAttention(n_embed, n_heads, n_embed // n_heads, block_size, dropout) |
| self.ffwd = FeedForward(n_embed, n_embed*4, dropout) |
| self.ln1 = nn.LayerNorm(n_embed) |
| self.ln2 = nn.LayerNorm(n_embed) |
| |
| |
| def forward(self, x): |
| x = x + self.sa_heads(self.ln1(x)) |
| x = x + self.ffwd(self.ln2(x)) |
| return x |
|
|
| class NanoGPT(nn.Module): |
| def __init__(self, hyperparameters, device="cpu"): |
| super().__init__() |
| |
| |
| vocab_size = hyperparameters['vocab_size'] |
| block_size = hyperparameters['block_size'] |
| n_embed = hyperparameters['n_embed'] |
| n_heads = hyperparameters['n_heads'] |
| n_layers = hyperparameters['n_layers'] |
| dropout = hyperparameters['dropout'] |
| |
| self.token_embedding_table = nn.Embedding(vocab_size, n_embed) |
| self.position_embedding_table = nn.Embedding(block_size, n_embed) |
| self.blocks = nn.Sequential(*[Block(n_embed, n_heads, block_size, dropout) for _ in range(n_layers)]) |
| self.ln_f = nn.LayerNorm(n_embed) |
| self.lm_head = nn.Linear(n_embed, vocab_size) |
| |
| self.device = device |
| self.block_size = block_size |
| |
| def forward(self, idx, targets=None): |
| |
| B, T = idx.shape |
| |
| tok_emb = self.token_embedding_table(idx) |
| pos_emb = self.position_embedding_table(torch.arange(T, device=self.device)) |
| x = tok_emb + pos_emb |
| x = self.blocks(x) |
| x = self.ln_f(x) |
| logits = self.lm_head(x) |
| |
| if targets is None: |
| loss = None |
| else: |
| B, T, C = logits.shape |
| logits = logits.view(B*T, C) |
| targets = targets.view(B*T) |
| loss = F.cross_entropy(logits, targets) |
| |
| return logits, loss |
| |
| |
| def generate(self, idx, max_new_tokens=100): |
| |
| for _ in range(max_new_tokens): |
| |
| idx_cond = idx[:, -self.block_size:] |
| |
| logits, _ = self(idx_cond) |
| |
| logits = logits[:, -1, :] |
| |
| probs = F.softmax(logits, dim=1) |
| |
| idx_next = torch.multinomial(probs, num_samples=1) |
| |
| idx = torch.cat((idx, idx_next), dim=1) |
| |
| return idx |
|
|