Spaces:
Sleeping
Sleeping
| # model_transformer.py | |
| # Requires: pip install torch | |
| import math | |
| import torch | |
| import torch.nn as nn | |
| class PositionalEncoding(nn.Module): | |
| def __init__(self, d_model, max_len=2048): | |
| super().__init__() | |
| pe = torch.zeros(max_len, d_model) | |
| position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) | |
| div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) | |
| pe[:, 0::2] = torch.sin(position * div_term) | |
| pe[:, 1::2] = torch.cos(position * div_term) | |
| self.register_buffer("pe", pe.unsqueeze(0)) # (1, max_len, d_model) | |
| def forward(self, x): | |
| # x: (B, T, D) | |
| L = x.size(1) | |
| return x + self.pe[:, :L, :] | |
| class TransformerLM(nn.Module): | |
| def __init__(self, vocab_size, d_model=384, nhead=8, num_layers=4, dim_feedforward=1536, dropout=0.1, pad_id=0): | |
| """ | |
| d_model=384, num_layers=4 is a reasonable size for a ~10M-ish model depending on vocab. | |
| """ | |
| super().__init__() | |
| self.pad_id = pad_id | |
| self.tok_embedding = nn.Embedding(vocab_size, d_model, padding_idx=pad_id) | |
| self.pos_enc = PositionalEncoding(d_model) | |
| encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=True) | |
| self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) | |
| self.ln_f = nn.LayerNorm(d_model) | |
| self.head = nn.Linear(d_model, vocab_size, bias=False) | |
| # init | |
| nn.init.normal_(self.tok_embedding.weight, mean=0.0, std=0.02) | |
| nn.init.normal_(self.head.weight, mean=0.0, std=0.02) | |
| def forward(self, input_ids): | |
| """ | |
| input_ids: (B, T) LongTensor | |
| returns logits: (B, T, V) | |
| """ | |
| # create attention mask to prevent attending to pad tokens | |
| x = self.tok_embedding(input_ids) # (B,T,D) | |
| x = self.pos_enc(x) | |
| # mask padding: transformer expects key_padding_mask bool of shape (B,T) True=pad | |
| key_padding_mask = (input_ids == self.pad_id) # bool | |
| x = self.transformer(x, src_key_padding_mask=key_padding_mask) | |
| x = self.ln_f(x) | |
| logits = self.head(x) | |
| return logits | |
| def generate(self, tokenizer, device, prompt, max_new_tokens=64, temperature=1.0, top_k=40): | |
| """ | |
| Simple autoregressive generation using the model as an encoder-decoder LM: | |
| We feed the entire sequence and sample the next token from last position. | |
| This is simple and works for smaller models. | |
| """ | |
| self.eval() | |
| ids = tokenizer.encode(prompt) | |
| ids = [i for i in ids if i is not None] | |
| input_ids = torch.tensor(ids, dtype=torch.long, device=device).unsqueeze(0) # (1, T) | |
| for _ in range(max_new_tokens): | |
| logits = self.forward(input_ids) # (1, T, V) | |
| next_logits = logits[:, -1, :] / max(temperature, 1e-8) | |
| if top_k is not None and top_k > 0: | |
| topk_vals, topk_idx = torch.topk(next_logits, min(top_k, next_logits.size(-1))) | |
| probs = torch.zeros_like(next_logits).scatter_(1, topk_idx, nn.functional.softmax(topk_vals, dim=-1)) | |
| else: | |
| probs = nn.functional.softmax(next_logits, dim=-1) | |
| next_id = torch.multinomial(probs, num_samples=1).item() | |
| input_ids = torch.cat([input_ids, torch.tensor([[next_id]], device=device)], dim=1) | |
| out_ids = input_ids.squeeze(0).tolist() | |
| return tokenizer.decode(out_ids) |