| import torch |
| import torch.nn.functional as F |
| from dataclasses import dataclass |
|
|
| import tiktoken |
|
|
| import safetensors.torch |
|
|
| tokenizer = tiktoken.get_encoding("gpt2") |
|
|
| import gradio as gr |
| from huggingface_hub import hf_hub_download |
|
|
| |
| @dataclass |
| class GPTConfig: |
| vocab_size : int = 50304 |
| n_layer : int = 12 |
| n_head : int = 6 |
| n_embd : int = 768 |
|
|
| |
| class Rotary(torch.nn.Module): |
|
|
| def __init__(self, dim, base=10000): |
| super().__init__() |
| self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
| self.seq_len_cached = None |
| self.cos_cached = None |
| self.sin_cached = None |
|
|
| def forward(self, x): |
| seq_len = x.shape[1] |
| if seq_len!= self.seq_len_cached: |
| self.seq_len_cached = seq_len |
| t = torch.arange(seq_len, device=x.device).type_as(self.inv_freq) |
| freqs = torch.outer(t, self.inv_freq).to(x.device) |
| self.cos_cached = freqs.cos().bfloat16() |
| self.sin_cached = freqs.sin().bfloat16() |
| return self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :] |
|
|
| def apply_rotary_emb(x, cos, sin): |
| assert x.ndim == 4 |
| d = x.shape[3]//2 |
| x1 = x[..., :d] |
| x2 = x[..., d:] |
| y1 = x1 * cos + x2 * sin |
| y2 = x1 * (-sin) + x2 * cos |
| return torch.cat([y1, y2], 3).type_as(x) |
|
|
| |
| class CausalSelfAttention(torch.nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.n_head = config.n_head |
| self.n_embd = config.n_embd |
| self.head_dim = self.n_embd // self.n_head |
| assert self.n_embd % self.n_head == 0 |
| self.c_q = torch.nn.Linear(self.n_embd, self.n_embd, bias=False) |
| self.c_k = torch.nn.Linear(self.n_embd, self.n_embd, bias=False) |
| self.c_v = torch.nn.Linear(self.n_embd, self.n_embd, bias=False) |
| |
| self.c_proj = torch.nn.Linear(self.n_embd, self.n_embd, bias=False) |
| self.c_proj.weight.data.zero_() |
| self.rotary = Rotary(self.head_dim) |
| self.lamb = torch.nn.Parameter(torch.tensor(0.5)) |
|
|
| def forward(self, x, v1=None): |
| B, T, C = x.size() |
| q = self.c_q(x).view(B, T, self.n_head, self.head_dim) |
| k = self.c_k(x).view(B, T, self.n_head, self.head_dim) |
| v = self.c_v(x).view(B, T, self.n_head, self.head_dim) |
| if v1 is None: |
| v1 = v |
| v = (1 - self.lamb) * v + self.lamb * v1.view_as(v) |
| cos, sin = self.rotary(q) |
| q, k = F.rms_norm(q, (q.size(-1),)), F.rms_norm(k, (k.size(-1),)) |
| q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin) |
| y = F.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), is_causal=True) |
| y = y.transpose(1, 2).contiguous().view_as(x) |
| y = self.c_proj(y) |
| return y, v1 |
|
|
| |
| class MLP(torch.nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.c_fc = torch.nn.Linear(config.n_embd, 4 * config.n_embd, bias=False) |
| self.c_proj = torch.nn.Linear(4 * config.n_embd, config.n_embd, bias=False) |
| self.c_proj.weight.data.zero_() |
|
|
| def forward(self, x): |
| x = self.c_fc(x) |
| x = F.relu(x).square() |
| x = self.c_proj(x) |
| return x |
|
|
| |
| class Block(torch.nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.attn = CausalSelfAttention(config) |
| self.mlp = MLP(config) |
| self.lambdas = torch.nn.Parameter(torch.tensor([1., 0.])) |
|
|
| def forward(self, x, v1, x0): |
| x = self.lambdas[0] * x + self.lambdas[1] * x0 |
| x1, v1 = self.attn(F.rms_norm(x, (x.size(-1),)), v1) |
| x = x + x1 |
| x = x + self.mlp(F.rms_norm(x, (x.size(-1),))) |
| return x, v1 |
|
|
| |
| class GPT(torch.nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.config = config |
|
|
| self.transformer = torch.nn.ModuleDict(dict( |
| wte = torch.nn.Embedding(config.vocab_size, config.n_embd), |
| h = torch.nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
| )) |
| self.lm_head = torch.nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.lm_head.weight.data.zero_() |
|
|
| def forward(self, idx, targets=None, return_logits=True): |
|
|
| |
| x = self.transformer.wte(idx) |
| x = F.rms_norm(x, (x.size(-1),)) |
| x0 = x |
| v1 = None |
| for block in self.transformer.h: |
| x, v1 = block(x, v1, x0) |
| x = F.rms_norm(x, (x.size(-1),)) |
|
|
| if targets is not None: |
| |
| logits = self.lm_head(x) |
| logits = 30 * torch.tanh(logits / 30) |
| logits = logits.float() |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) |
| else: |
| |
| logits = self.lm_head(x[:, [-1], :]) |
| logits = 30 * torch.tanh(logits / 30) |
| logits = logits.float() |
| loss = None |
|
|
| |
| if not return_logits: |
| logits = None |
|
|
| return logits, loss |
| |
| def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): |
| """ |
| Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete |
| the sequence max_new_tokens times, feeding the predictions back into the model each time. |
| Most likely you'll want to make sure to be in model.eval() mode of operation for this. |
| """ |
| for _ in range(max_new_tokens): |
| |
| |
| |
| logits, _ = self(idx) |
| |
| logits = logits[:, -1, :] / temperature |
| |
| if top_k is not None: |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) |
| logits[logits < v[:, [-1]]] = -float('Inf') |
| |
| probs = F.softmax(logits, dim=-1) |
| |
| idx_next = torch.multinomial(probs, num_samples=1) |
| |
| idx = torch.cat((idx, idx_next), dim=1) |
|
|
| return idx |
|
|
|
|
| |
| def run_inference(model, input_ids, max_new_tokens, temperature): |
| input_ids = torch.tensor(input_ids).unsqueeze(0) |
| return model.generate(input_ids, max_new_tokens=max_new_tokens, temperature=temperature) |
|
|
| |
| def load_model(): |
| config = GPTConfig() |
| model = GPT(config) |
| model_path = 'nanogpt-speedrun-baseline.safetensors' |
| hf_path = hf_hub_download("lemonteaa/nanogpt-speedrun", model_path) |
| missing, unexpected = safetensors.torch.load_model(model, hf_path) |
| model.eval() |
| return model |
|
|
| nanogpt_model = load_model() |
|
|
| def text_complete(prompt, max_new_tokens, temperature): |
| input_ids = tokenizer.encode_ordinary(prompt) |
| output_ids = run_inference(nanogpt_model, input_ids, max_new_tokens, temperature) |
| tmp = output_ids.squeeze().tolist() |
| return tokenizer.decode(tmp) |
|
|
|
|
| demo = gr.Interface( |
| title="NanoGPT Speedrun Baseline Model Inference Demo", |
| fn=text_complete, |
| inputs=[ |
| gr.Text(label="Prompt"), |
| gr.Slider(label="max new tokens", minimum=10, maximum=500, value=100, step=1), |
| gr.Slider(label="temperature", minimum=0.0, maximum=2.0, value=0.9, step=0.1) |
| ], |
| outputs=["text"], |
| examples=[["Once upon a time, "], ["A quick and delicious recipe for pancake: "], ["The role of IT in supply chain is "]] |
| ) |
|
|
| demo.queue() |
| demo.launch() |
|
|