|
|
|
|
|
--- |
|
|
base_model: GPT-2-like-character-level |
|
|
tags: |
|
|
- text-generation |
|
|
- character-level |
|
|
- pytorch |
|
|
--- |
|
|
|
|
|
# Character-level GPT Model |
|
|
|
|
|
This is a custom character-level GPT model trained on a text dataset (e.g., Shakespeare). It's a minimal implementation designed for educational purposes. |
|
|
|
|
|
## Model Architecture |
|
|
|
|
|
The model is a Transformer-based decoder-only architecture, similar to GPT-2, but operating at the character level. |
|
|
|
|
|
- `block_size`: 1024 |
|
|
- `vocab_size`: Dynamically determined from training data |
|
|
- `n_layer`: 12 |
|
|
- `n_head`: 12 |
|
|
- `n_embd`: 768 |
|
|
|
|
|
## How to Use |
|
|
|
|
|
To use this model, you'll need the `pytorch_model.bin` (weights) and `vocab.json` (character mappings). |
|
|
|
|
|
```python |
|
|
import torch |
|
|
import json |
|
|
from dataclasses import dataclass |
|
|
import torch.nn as nn |
|
|
from torch.nn import functional as F |
|
|
import math |
|
|
|
|
|
# --- Define your model classes (GPTConfig, CausalSelfAttention, MLP, Block, GPT) here --- |
|
|
# Copy the relevant classes from your training script. |
|
|
|
|
|
@dataclass |
|
|
class GPTConfig: |
|
|
block_size: int = 1024 |
|
|
vocab_size: int = 50257 |
|
|
n_layer: int = 12 |
|
|
n_head: int = 12 |
|
|
n_embd: int = 768 |
|
|
|
|
|
# ... (CausalSelfAttention, MLP, Block, GPT class definitions) ... |
|
|
|
|
|
class CausalSelfAttention(nn.Module): |
|
|
'''A minimal Causal Self-Attention block.''' |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
assert config.n_embd % config.n_head == 0 |
|
|
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd) |
|
|
self.c_proj = nn.Linear(config.n_embd, config.n_embd) |
|
|
self.c_proj.NANGPT_SCALE_INIT = 1.0 / math.sqrt(2.0 * config.n_layer) |
|
|
self.n_head = config.n_head |
|
|
self.n_embd = config.n_embd |
|
|
self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) |
|
|
.view(1, 1, config.block_size, config.block_size)) |
|
|
|
|
|
def forward(self, x): |
|
|
B, T, C = x.size() |
|
|
q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
|
|
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
|
|
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) |
|
|
att = F.softmax(att, dim=-1) |
|
|
y = att @ v |
|
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
y = self.c_proj(y) |
|
|
return y |
|
|
|
|
|
class MLP(nn.Module): |
|
|
'''A minimal Multi-Layer Perceptron block.''' |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd) |
|
|
self.gelu = nn.GELU(approximate='tanh') |
|
|
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd) |
|
|
self.c_proj.NANGPT_SCALE_INIT = 1.0 / math.sqrt(2.0 * config.n_layer) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.c_fc(x) |
|
|
x = self.gelu(x) |
|
|
x = self.c_proj(x) |
|
|
return x |
|
|
|
|
|
class Block(nn.Module): |
|
|
'''A minimal Transformer Block consisting of Attention and MLP.''' |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.ln_1 = nn.LayerNorm(config.n_embd) |
|
|
self.attn = CausalSelfAttention(config) |
|
|
self.ln_2 = nn.LayerNorm(config.n_embd) |
|
|
self.mlp = MLP(config) |
|
|
|
|
|
def forward(self, x): |
|
|
x = x + self.attn(self.ln_1(x)) |
|
|
x = x + self.mlp(self.ln_2(x)) |
|
|
return x |
|
|
|
|
|
class GPT(nn.Module): |
|
|
'''The full GPT model composed of Blocks.''' |
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.transformer = nn.ModuleDict(dict( |
|
|
wte = nn.Embedding(config.vocab_size, config.n_embd), |
|
|
wpe = nn.Embedding(config.block_size, config.n_embd), |
|
|
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
|
|
ln_f = nn.LayerNorm(config.n_embd), |
|
|
)) |
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
self.transformer.wte.weight = self.lm_head.weight # Weight tying |
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def get_num_params(self, non_embedding=True): |
|
|
n_params = sum(p.numel() for p in self.parameters()) |
|
|
if non_embedding: |
|
|
n_params -= self.transformer.wpe.weight.numel() |
|
|
return n_params |
|
|
|
|
|
def _init_weights(self, module): |
|
|
if isinstance(module, nn.Linear): |
|
|
std = 0.02 |
|
|
if hasattr(module, 'NANGPT_SCALE_INIT'): |
|
|
std *= module.NANGPT_SCALE_INIT |
|
|
torch.nn.init.normal_(module.weight, mean=0.0, std=std) |
|
|
if module.bias is not None: |
|
|
torch.nn.init.zeros_(module.bias) |
|
|
elif isinstance(module, nn.Embedding): |
|
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
elif isinstance(module, nn.LayerNorm): |
|
|
torch.nn.init.zeros_(module.bias) |
|
|
torch.nn.init.ones_(module.weight) |
|
|
|
|
|
def forward(self, idx, targets=None): |
|
|
device = idx.device |
|
|
B, T = idx.size() |
|
|
assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}" |
|
|
|
|
|
pos = torch.arange(0, T, dtype=torch.long, device=device).unsqueeze(0) |
|
|
|
|
|
tok_emb = self.transformer.wte(idx) |
|
|
pos_emb = self.transformer.wpe(pos) |
|
|
x = tok_emb + pos_emb |
|
|
|
|
|
for block in self.transformer.h: |
|
|
x = block(x) |
|
|
|
|
|
x = self.transformer.ln_f(x) |
|
|
logits = self.lm_head(x) |
|
|
|
|
|
loss = None |
|
|
if targets is not None: |
|
|
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) |
|
|
|
|
|
return logits, loss |
|
|
|
|
|
|
|
|
# --- Custom tokenizer based on vocab.json --- |
|
|
class SimpleCharTokenizer: |
|
|
def __init__(self, vocab_file): |
|
|
with open(vocab_file, 'r') as f: |
|
|
vocab_data = json.load(f) |
|
|
self.stoi = vocab_data['stoi'] |
|
|
self.itos = {int(k): v for k, v in vocab_data['itos'].items()} # keys are string in json |
|
|
self.vocab_size = vocab_data['vocab_size'] |
|
|
|
|
|
def encode(self, s): |
|
|
return [self.stoi[c] for c in s] |
|
|
|
|
|
def decode(self, l): |
|
|
return ''.join([self.itos[i] for i in l]) |
|
|
|
|
|
|
|
|
# --- Generation function (simplified) --- |
|
|
def generate_from_hf(model, tokenizer, start_str, max_new_tokens, temperature=1.0, top_k=50, device='cpu'): |
|
|
model.eval() |
|
|
B, T_model = 1, model.config.block_size # Model's block_size |
|
|
|
|
|
start_ids = tokenizer.encode(start_str) |
|
|
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...]) |
|
|
|
|
|
x = x[:, -T_model:] # Truncate if start string is too long for model's block_size |
|
|
|
|
|
for _ in range(max_new_tokens): |
|
|
# crop context if necessary |
|
|
x_cond = x if x.size(1) <= T_model else x[:, -T_model:] |
|
|
|
|
|
with torch.no_grad(): |
|
|
logits, _ = model(x_cond) |
|
|
logits = logits[:, -1, :] / temperature |
|
|
if top_k is not None: |
|
|
v, _ = torch.topk(logits, min(top_k, logits.size(-1))) |
|
|
logits[logits < v[:, [-1]]] = -float('Inf') |
|
|
|
|
|
probs = F.softmax(logits, dim=-1) |
|
|
idx_next = torch.multinomial(probs, num_samples=1) |
|
|
|
|
|
x = torch.cat((x, idx_next), dim=1) |
|
|
|
|
|
if tokenizer.stoi.get(' |
|
|
') is not None and idx_next.item() == tokenizer.stoi.get(' |
|
|
'): |
|
|
break |
|
|
|
|
|
return tokenizer.decode(x[0].tolist()) |
|
|
|
|
|
|
|
|
|
|
|
# Example usage: |
|
|
# device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
# # Load config and vocab |
|
|
# with open('my_gpt_model/config.json', 'r') as f: |
|
|
# model_config_dict = json.load(f) |
|
|
# model_config = GPTConfig(**model_config_dict) |
|
|
# |
|
|
# tokenizer = SimpleCharTokenizer('my_gpt_model/vocab.json') |
|
|
# model = GPT(model_config).to(device) |
|
|
# model.load_state_dict(torch.load('my_gpt_model/pytorch_model.bin', map_location=device)) |
|
|
# |
|
|
# prompt = "First Citizen:" |
|
|
# generated_text = generate_from_hf(model, tokenizer, prompt, max_new_tokens=200, temperature=0.9, device=device) |
|
|
# print(generated_text) |
|
|
|
|
|
``` |
|
|
|
|
|
## Files in `.` directory: |
|
|
- `pytorch_model.bin`: Contains the model's state dictionary (weights). |
|
|
- `vocab.json`: Contains the character-to-integer (`stoi`) and integer-to-character (`itos`) mappings. |
|
|
- `config.json`: Contains the model's configuration parameters (`GPTConfig`). |
|
|
|
|
|
## How to Load and Generate Text |
|
|
|
|
|
```python |
|
|
# (Refer to the example usage in the code block above for loading and generating text) |
|
|
``` |
|
|
|
|
|
**Note**: The model architecture classes (`GPTConfig`, `CausalSelfAttention`, `MLP`, `Block`, `GPT`) and the `generate` function itself are part of the model's definition and would need to be present in your environment when loading the model from Hugging Face. The `README.md` includes these definitions for clarity and ease of use. |
|
|
|