import sys import traceback import os print("šŸ”® Deep-NanoGPT Inference Script") try: import torch import torch.nn as nn from torch.nn import functional as F import requests # --- Config (must match training) --- block_size = 256 n_embd = 128 n_head = 4 n_layer = 72 dropout = 0.1 device = 'cuda' if torch.cuda.is_available() else 'cpu' # --- Storage --- storage_dir = "/home/user/app/storage/deep_experiment_v2" ckpt_path_a = os.path.join(storage_dir, 'ckpt_a.pt') ckpt_path_b = os.path.join(storage_dir, 'ckpt_b.pt') # --- Vocab (rebuild from data) --- url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt' data = requests.get(url).text chars = sorted(list(set(data))) vocab_size = len(chars) stoi = { ch:i for i,ch in enumerate(chars) } itos = { i:ch for i,ch in enumerate(chars) } encode = lambda s: [stoi.get(c, 0) for c in s] decode = lambda l: ''.join([itos[i] for i in l]) # --- Model Classes --- class Head(nn.Module): def __init__(self, head_size): super().__init__() self.key = nn.Linear(n_embd, head_size, bias=False) self.query = nn.Linear(n_embd, head_size, bias=False) self.value = nn.Linear(n_embd, head_size, bias=False) self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) self.dropout = nn.Dropout(dropout) def forward(self, x): B,T,C = x.shape k = self.key(x) q = self.query(x) wei = q @ k.transpose(-2, -1) * C**-0.5 wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) wei = F.softmax(wei, dim=-1) wei = self.dropout(wei) v = self.value(x) return wei @ v class MultiHeadAttention(nn.Module): def __init__(self, num_heads, head_size): super().__init__() self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)]) self.proj = nn.Linear(n_embd, n_embd) self.dropout = nn.Dropout(dropout) def forward(self, x): out = torch.cat([h(x) for h in self.heads], dim=-1) return self.dropout(self.proj(out)) class FeedForward(nn.Module): def __init__(self, n_embd): super().__init__() self.net = nn.Sequential( nn.Linear(n_embd, 4 * n_embd), nn.ReLU(), nn.Linear(4 * n_embd, n_embd), nn.Dropout(dropout), ) def forward(self, x): return self.net(x) class BlockStandard(nn.Module): def __init__(self, n_embd, n_head): super().__init__() head_size = n_embd // n_head self.sa = MultiHeadAttention(n_head, head_size) self.ffwd = FeedForward(n_embd) self.ln1 = nn.LayerNorm(n_embd) self.ln2 = nn.LayerNorm(n_embd) def forward(self, x): x = x + self.sa(self.ln1(x)) x = x + self.ffwd(self.ln2(x)) return x class RMSNorm(nn.Module): def __init__(self, dim, eps=1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): return self._norm(x.float()).type_as(x) * self.weight class BlockMHC(nn.Module): def __init__(self, n_embd, n_head): super().__init__() head_size = n_embd // n_head self.sa = MultiHeadAttention(n_head, head_size) self.ffwd = FeedForward(n_embd) self.alpha1 = nn.Parameter(torch.tensor(0.9)) self.beta1 = nn.Parameter(torch.tensor(0.1)) self.ln1 = RMSNorm(n_embd) self.alpha2 = nn.Parameter(torch.tensor(0.9)) self.beta2 = nn.Parameter(torch.tensor(0.1)) self.ln2 = RMSNorm(n_embd) def forward(self, x): mix1 = self.alpha1 * x + self.beta1 * self.sa(x) x = self.ln1(mix1) mix2 = self.alpha2 * x + self.beta2 * self.ffwd(x) x = self.ln2(mix2) return x class GPT(nn.Module): def __init__(self, arch_type='standard'): super().__init__() self.arch_type = arch_type self.token_embedding_table = nn.Embedding(vocab_size, n_embd) self.position_embedding_table = nn.Embedding(block_size, n_embd) if arch_type == 'standard': self.blocks = nn.Sequential(*[BlockStandard(n_embd, n_head) for _ in range(n_layer)]) self.ln_f = nn.LayerNorm(n_embd) elif arch_type == 'mhc': self.blocks = nn.Sequential(*[BlockMHC(n_embd, n_head) for _ in range(n_layer)]) self.ln_f = RMSNorm(n_embd) self.lm_head = nn.Linear(n_embd, vocab_size) def forward(self, idx, targets=None): B, T = idx.shape tok_emb = self.token_embedding_table(idx) pos_emb = self.position_embedding_table(torch.arange(T, device=device)) x = tok_emb + pos_emb x = self.blocks(x) x = self.ln_f(x) logits = self.lm_head(x) return logits, None def generate(self, idx, max_new_tokens): for _ in range(max_new_tokens): idx_cond = idx[:, -block_size:] logits, _ = self(idx_cond) logits = logits[:, -1, :] probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, num_samples=1) idx = torch.cat((idx, idx_next), dim=1) return idx # --- Load Models --- print(f"šŸ“¦ Loading Model A (Standard) from {ckpt_path_a}...") model_a = GPT(arch_type='standard').to(device) model_a.load_state_dict(torch.load(ckpt_path_a, map_location=device)) model_a.eval() print(f"šŸ“¦ Loading Model B (mHC) from {ckpt_path_b}...") model_b = GPT(arch_type='mhc').to(device) model_b.load_state_dict(torch.load(ckpt_path_b, map_location=device)) model_b.eval() # --- Inference --- PROMPT = "ROMEO:" # Shakespearean prompt MAX_TOKENS = 300 print(f"\nšŸŽ­ Prompt: '{PROMPT}'") print(f"šŸ”¢ Max Tokens: {MAX_TOKENS}") context = torch.tensor([encode(PROMPT)], dtype=torch.long, device=device) print("\n--- MODEL A (Standard GPT, 72 Layers) ---") with torch.no_grad(): out_a = model_a.generate(context.clone(), max_new_tokens=MAX_TOKENS) print(decode(out_a[0].tolist())) print("\n--- MODEL B (mHC GPT, 72 Layers) ---") with torch.no_grad(): out_b = model_b.generate(context.clone(), max_new_tokens=MAX_TOKENS) print(decode(out_b[0].tolist())) print("\nāœ… Inference Complete.") except Exception as e: print(f"\nāŒ FATAL ERROR: {e}") traceback.print_exc()