Spaces:
Sleeping
Sleeping
| import os | |
| import math | |
| import time | |
| import torch | |
| import torch.nn as nn | |
| from torch.nn import functional as F | |
| from dataclasses import dataclass | |
| from torch.nn.parallel import DistributedDataParallel as DDP | |
| import numpy as np | |
| from datetime import datetime | |
| # Hyperparameters | |
| learning_rate = 3e-4 # Peak learning rate | |
| min_lr = 3e-5 # Minimum learning rate at the end of training | |
| warmup_iters = 2000 # Linear warmup over warmup_iters | |
| lr_decay_iters = 800000 # Cosine decay over lr_decay_iters | |
| weight_decay = 0.1 # AdamW weight decay | |
| beta1 = 0.9 # AdamW beta1 | |
| beta2 = 0.95 # AdamW beta2 | |
| grad_clip = 1.0 # Clip gradients at this value | |
| decay_lr = True # Whether to decay learning rate | |
| batch_size = 64 # Training batch size | |
| block_size = 256 # Maximum sequence length | |
| eval_interval = 500 # How often to evaluate | |
| eval_iters = 200 # Number of iterations to use for evaluation | |
| log_interval = 10 # How often to print training progress | |
| # Model architecture | |
| class GPTConfig: | |
| block_size: int = block_size | |
| vocab_size: int = 50304 | |
| n_layer: int = 12 | |
| n_head: int = 16 | |
| n_embd: int = 1024 | |
| dropout: float = 0.1 | |
| bias: bool = False | |
| class CausalSelfAttention(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| assert config.n_embd % config.n_head == 0 | |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) | |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) | |
| self.attn_dropout = nn.Dropout(config.dropout) | |
| self.resid_dropout = nn.Dropout(config.dropout) | |
| self.n_head = config.n_head | |
| self.n_embd = config.n_embd | |
| self.dropout = config.dropout | |
| def forward(self, x): | |
| B, T, C = x.size() | |
| qkv = self.c_attn(x) | |
| q, k, v = qkv.split(self.n_embd, dim=2) | |
| k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) | |
| y = F.scaled_dot_product_attention(q, k, v, is_causal=True) | |
| y = y.transpose(1, 2).contiguous().view(B, T, C) | |
| y = self.resid_dropout(self.c_proj(y)) | |
| return y | |
| class MLP(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias) | |
| self.gelu = nn.GELU() | |
| self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias) | |
| self.dropout = nn.Dropout(config.dropout) | |
| def forward(self, x): | |
| x = self.c_fc(x) | |
| x = self.gelu(x) | |
| x = self.c_proj(x) | |
| x = self.dropout(x) | |
| return x | |
| class Block(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.ln_1 = nn.LayerNorm(config.n_embd) | |
| self.attn = CausalSelfAttention(config) | |
| self.ln_2 = nn.LayerNorm(config.n_embd) | |
| self.mlp = MLP(config) | |
| def forward(self, x): | |
| x = x + self.attn(self.ln_1(x)) | |
| x = x + self.mlp(self.ln_2(x)) | |
| return x | |
| class GPT(nn.Module): | |
| def __init__(self, config): | |
| super().__init__() | |
| self.config = config | |
| self.transformer = nn.ModuleDict(dict( | |
| wte = nn.Embedding(config.vocab_size, config.n_embd), | |
| wpe = nn.Embedding(config.block_size, config.n_embd), | |
| drop = nn.Dropout(config.dropout), | |
| h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), | |
| ln_f = nn.LayerNorm(config.n_embd) | |
| )) | |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) | |
| self.transformer.wte.weight = self.lm_head.weight | |
| # Initialize weights | |
| self.apply(self._init_weights) | |
| for pn, p in self.named_parameters(): | |
| if pn.endswith('c_proj.weight'): | |
| torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer)) | |
| def _init_weights(self, module): | |
| if isinstance(module, nn.Linear): | |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) | |
| if module.bias is not None: | |
| torch.nn.init.zeros_(module.bias) | |
| elif isinstance(module, nn.Embedding): | |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) | |
| def forward(self, idx, targets=None): | |
| device = idx.device | |
| b, t = idx.size() | |
| pos = torch.arange(0, t, dtype=torch.long, device=device) | |
| tok_emb = self.transformer.wte(idx) | |
| pos_emb = self.transformer.wpe(pos) | |
| x = self.transformer.drop(tok_emb + pos_emb) | |
| for block in self.transformer.h: | |
| x = block(x) | |
| x = self.transformer.ln_f(x) | |
| if targets is not None: | |
| logits = self.lm_head(x) | |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) | |
| else: | |
| logits = self.lm_head(x[:, [-1], :]) | |
| loss = None | |
| return logits, loss | |
| def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): | |
| for _ in range(max_new_tokens): | |
| idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:] | |
| logits, _ = self(idx_cond) | |
| logits = logits[:, -1, :] / temperature | |
| if top_k is not None: | |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | |
| logits[logits < v[:, [-1]]] = -float('Inf') | |
| probs = F.softmax(logits, dim=-1) | |
| idx_next = torch.multinomial(probs, num_samples=1) | |
| idx = torch.cat((idx, idx_next), dim=1) | |
| return idx | |
| def get_batch(data, block_size, batch_size): | |
| ix = torch.randint(len(data) - block_size, (batch_size,)) | |
| x = torch.stack([data[i:i+block_size] for i in ix]) | |
| y = torch.stack([data[i+1:i+1+block_size] for i in ix]) | |
| return x, y | |
| def get_lr(it): | |
| # 1) Linear warmup for warmup_iters steps | |
| if it < warmup_iters: | |
| return learning_rate * it / warmup_iters | |
| # 2) If it > lr_decay_iters, return min learning rate | |
| if it > lr_decay_iters: | |
| return min_lr | |
| # 3) In between, use cosine decay down to min learning rate | |
| decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) | |
| assert 0 <= decay_ratio <= 1 | |
| coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) | |
| return min_lr + coeff * (learning_rate - min_lr) | |
| def save_training_log(log_entry, filename='training_logs.md'): | |
| """Save training logs in markdown format""" | |
| timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') | |
| with open(filename, 'a') as f: | |
| if not f.tell(): # If file is empty, write header | |
| f.write('# Training Logs\n\n') | |
| f.write('| Timestamp | Iteration | Training Loss | Learning Rate |\n') | |
| f.write('|-----------|------------|---------------|---------------|\n') | |
| f.write(f'| {timestamp} | {log_entry["iter"]:10d} | {log_entry["train_loss"]:.6f} | {log_entry["lr"]:.2e} |\n') | |
| def save_model(model, optimizer, iter_num, loss, filename): | |
| """Save model checkpoint with error handling""" | |
| try: | |
| # First save to a temporary file | |
| tmp_filename = filename + '.tmp' | |
| checkpoint = { | |
| 'model_state_dict': model.state_dict(), | |
| 'optimizer_state_dict': optimizer.state_dict(), | |
| 'iter_num': iter_num, | |
| 'loss': loss, | |
| } | |
| # Use torch.save with zip compression | |
| torch.save(checkpoint, tmp_filename, _use_new_zipfile_serialization=True) | |
| # If save was successful, rename tmp file to final filename | |
| if os.path.exists(filename): | |
| os.remove(filename) # Remove old file if it exists | |
| os.rename(tmp_filename, filename) | |
| return True | |
| except Exception as e: | |
| print(f"Error saving model to {filename}: {str(e)}") | |
| # Clean up temp file if it exists | |
| if os.path.exists(tmp_filename): | |
| try: | |
| os.remove(tmp_filename) | |
| except: | |
| pass | |
| return False | |
| def main(): | |
| torch.manual_seed(1337) | |
| torch.backends.cuda.matmul.allow_tf32 = True | |
| torch.backends.cudnn.allow_tf32 = True | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| print(f"Using device: {device}") | |
| # Create checkpoint directory | |
| os.makedirs('checkpoints', exist_ok=True) | |
| # Load the data | |
| with open('input.txt', 'r') as f: | |
| text = f.read() | |
| chars = sorted(list(set(text))) | |
| vocab_size = len(chars) | |
| stoi = {ch:i for i,ch in enumerate(chars)} | |
| itos = {i:ch for i,ch in enumerate(chars)} | |
| encode = lambda s: [stoi[c] for c in s] | |
| data = torch.tensor(encode(text), dtype=torch.long) | |
| n = int(0.9 * len(data)) | |
| train_data = data[:n] | |
| val_data = data[n:] | |
| # Initialize the model | |
| model = GPT(GPTConfig(vocab_size=vocab_size)) | |
| model = model.to(device) | |
| print(f"Model parameters: {sum(p.numel() for p in model.parameters())/1e6:.2f}M") | |
| # Initialize optimizer | |
| optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, betas=(beta1, beta2), weight_decay=weight_decay) | |
| # Training loop | |
| best_train_loss = float('inf') | |
| iter_num = 0 | |
| while True: | |
| # Get batch and learning rate | |
| xb, yb = get_batch(train_data, block_size, batch_size) | |
| xb, yb = xb.to(device), yb.to(device) | |
| lr = get_lr(iter_num) if decay_lr else learning_rate | |
| for param_group in optimizer.param_groups: | |
| param_group['lr'] = lr | |
| # Forward pass | |
| logits, loss = model(xb, yb) | |
| optimizer.zero_grad(set_to_none=True) | |
| loss.backward() | |
| torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) | |
| optimizer.step() | |
| # Logging and model saving | |
| if iter_num % log_interval == 0: | |
| train_loss = loss.item() | |
| print(f"iter {iter_num}: loss {train_loss:.4f}, lr {lr:e}") | |
| save_training_log({ | |
| "iter": iter_num, | |
| "train_loss": train_loss, | |
| "lr": lr | |
| }) | |
| # Save model if loss improved | |
| if train_loss < best_train_loss: | |
| best_train_loss = train_loss | |
| print(f"Saving model with training loss: {best_train_loss:.6f}") | |
| # Save the latest model | |
| save_model( | |
| model, | |
| optimizer, | |
| iter_num, | |
| best_train_loss, | |
| os.path.join('checkpoints', 'latest_model.pt') | |
| ) | |
| if best_train_loss < 0.099999: | |
| print(f"Achieved target loss of {best_train_loss:.6f}") | |
| break | |
| iter_num += 1 | |
| if iter_num > lr_decay_iters: | |
| break | |
| if __name__ == '__main__': | |
| main() |