M64's picture
Upload folder using huggingface_hub
1262b25 verified
"""
SID-GPT v2 - LLaMA-style transformer for SID register prediction.
258-token vocabulary (0-255 byte values, 256=SEP, 257=FRAME).
Predicts next SID register token given sequence of previous tokens.
"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
@dataclass
class ModelConfig:
n_embd: int = 512
n_layer: int = 8
n_head: int = 8
n_kv_head: int = 2
intermediate_size: int = 1408
block_size: int = 4096
vocab_size: int = 258
rope_theta: float = 10000.0
bias: bool = False
dropout: float = 0.0
@staticmethod
def small() -> "ModelConfig":
return ModelConfig(
n_embd=512,
n_layer=8,
n_head=8,
n_kv_head=2,
intermediate_size=1408,
)
@staticmethod
def large() -> "ModelConfig":
return ModelConfig(
n_embd=1024,
n_layer=24,
n_head=16,
n_kv_head=4,
intermediate_size=2816,
)
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
# Norm in float32 for stability
norm_x = x.float()
norm_x = norm_x * torch.rsqrt(
norm_x.pow(2).mean(-1, keepdim=True) + self.eps
)
return (norm_x * self.weight).to(x.dtype)
def precompute_rope_freqs(
dim: int,
max_seq_len: int,
theta: float = 10000.0,
device: Optional[torch.device] = None,
) -> torch.Tensor:
"""
Precompute complex-valued RoPE frequency table.
For each position p and frequency index i:
freq_i = 1 / (theta^(2i/dim))
rope[p, i] = exp(j * p * freq_i)
Returns complex tensor of shape (max_seq_len, dim//2).
"""
freqs = 1.0 / (
theta
** (torch.arange(0, dim, 2, device=device).float() / dim)
)
t = torch.arange(max_seq_len, device=device).float()
freqs = torch.outer(t, freqs)
return torch.polar(torch.ones_like(freqs), freqs)
def apply_rope(
xq: torch.Tensor,
xk: torch.Tensor,
freqs: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Apply rotary embeddings to Q and K via complex multiplication.
Reshapes last dim of Q/K into pairs -> complex,
multiplies by precomputed freqs, converts back to real.
"""
xq_c = torch.view_as_complex(
xq.float().reshape(*xq.shape[:-1], -1, 2)
)
xk_c = torch.view_as_complex(
xk.float().reshape(*xk.shape[:-1], -1, 2)
)
# xq_c/xk_c: (B, heads, T, head_dim//2)
# freqs: (T, head_dim//2) -> (1, 1, T, head_dim//2)
freqs = freqs.unsqueeze(0).unsqueeze(1)
xq_out = torch.view_as_real(xq_c * freqs).flatten(-2)
xk_out = torch.view_as_real(xk_c * freqs).flatten(-2)
return xq_out.to(xq.dtype), xk_out.to(xk.dtype)
class GQAAttention(nn.Module):
"""
Grouped-Query Attention: fewer KV heads than Q heads.
Group size = n_head / n_kv_head. KV heads are expanded
via repeat_interleave to match Q head count.
"""
def __init__(self, config: ModelConfig):
super().__init__()
self.n_head = config.n_head
self.n_kv_head = config.n_kv_head
self.head_dim = config.n_embd // config.n_head
self.n_rep = config.n_head // config.n_kv_head
self.block_size = config.block_size
self.q_proj = nn.Linear(
config.n_embd,
config.n_head * self.head_dim,
bias=config.bias,
)
self.k_proj = nn.Linear(
config.n_embd,
config.n_kv_head * self.head_dim,
bias=config.bias,
)
self.v_proj = nn.Linear(
config.n_embd,
config.n_kv_head * self.head_dim,
bias=config.bias,
)
self.o_proj = nn.Linear(
config.n_embd,
config.n_embd,
bias=config.bias,
)
self.attn_dropout = nn.Dropout(config.dropout)
self.resid_dropout = nn.Dropout(config.dropout)
# KV cache buffers (populated during inference)
self.cache_k: Optional[torch.Tensor] = None
self.cache_v: Optional[torch.Tensor] = None
def forward(
self,
x: torch.Tensor,
freqs: torch.Tensor,
start_pos: Optional[int] = None,
) -> torch.Tensor:
B, T, _ = x.shape
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
q = q.view(B, T, self.n_head, self.head_dim)
k = k.view(B, T, self.n_kv_head, self.head_dim)
v = v.view(B, T, self.n_kv_head, self.head_dim)
# Transpose to (B, heads, T, head_dim)
q = q.transpose(1, 2)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# Apply RoPE to Q and K
q, k = apply_rope(q, k, freqs)
if start_pos is not None:
# Inference with KV-cache
if self.cache_k is None or start_pos == 0:
self.cache_k = torch.zeros(
B, self.n_kv_head, self.block_size,
self.head_dim,
device=x.device, dtype=x.dtype,
)
self.cache_v = torch.zeros_like(self.cache_k)
end_pos = start_pos + T
self.cache_k[:, :, start_pos:end_pos, :] = k
self.cache_v[:, :, start_pos:end_pos, :] = v
k = self.cache_k[:, :, :end_pos, :]
v = self.cache_v[:, :, :end_pos, :]
# Expand KV heads to match Q heads
if self.n_rep > 1:
k = k.repeat_interleave(self.n_rep, dim=1)
v = v.repeat_interleave(self.n_rep, dim=1)
is_causal = start_pos is None or start_pos == 0
if start_pos is not None and start_pos > 0:
# Single-token decode: no causal mask needed
# (attending to all cached positions)
is_causal = False
y = F.scaled_dot_product_attention(
q, k, v,
dropout_p=(
self.attn_dropout.p if self.training else 0.0
),
is_causal=is_causal,
)
y = y.transpose(1, 2).contiguous().view(B, T, -1)
return self.resid_dropout(self.o_proj(y))
class SwiGLUFFN(nn.Module):
"""
SwiGLU feed-forward: down(silu(gate(x)) * up(x))
Three projections, no bias.
"""
def __init__(self, config: ModelConfig):
super().__init__()
self.gate_proj = nn.Linear(
config.n_embd,
config.intermediate_size,
bias=config.bias,
)
self.up_proj = nn.Linear(
config.n_embd,
config.intermediate_size,
bias=config.bias,
)
self.down_proj = nn.Linear(
config.intermediate_size,
config.n_embd,
bias=config.bias,
)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.dropout(
self.down_proj(
F.silu(self.gate_proj(x)) * self.up_proj(x)
)
)
class TransformerBlock(nn.Module):
"""Pre-norm residual: x + attn(norm(x)), h + ffn(norm(h))"""
def __init__(self, config: ModelConfig):
super().__init__()
self.attn_norm = RMSNorm(config.n_embd)
self.attn = GQAAttention(config)
self.ffn_norm = RMSNorm(config.n_embd)
self.ffn = SwiGLUFFN(config)
def forward(
self,
x: torch.Tensor,
freqs: torch.Tensor,
start_pos: Optional[int] = None,
) -> torch.Tensor:
h = x + self.attn(self.attn_norm(x), freqs, start_pos)
return h + self.ffn(self.ffn_norm(h))
class Transformer(nn.Module):
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.drop = nn.Dropout(config.dropout)
self.blocks = nn.ModuleList(
[TransformerBlock(config) for _ in range(config.n_layer)]
)
self.norm = RMSNorm(config.n_embd)
self.lm_head = nn.Linear(
config.n_embd, config.vocab_size, bias=False
)
# Weight tying
self.lm_head.weight = self.tok_emb.weight
# Precompute RoPE frequencies
head_dim = config.n_embd // config.n_head
self.register_buffer(
"rope_freqs",
precompute_rope_freqs(
head_dim, config.block_size, config.rope_theta
),
persistent=False,
)
self.apply(self._init_weights)
# Scale residual projections
res_scale = 1.0 / math.sqrt(2 * config.n_layer)
for block in self.blocks:
block.attn.o_proj.weight.data *= res_scale
block.ffn.down_proj.weight.data *= res_scale
def _init_weights(self, module: nn.Module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(
self,
idx: torch.Tensor,
targets: Optional[torch.Tensor] = None,
start_pos: Optional[int] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
B, T = idx.shape
assert T <= self.config.block_size, (
f"Sequence length {T} exceeds block_size "
f"{self.config.block_size}"
)
x = self.drop(self.tok_emb(idx))
if start_pos is not None:
freqs = self.rope_freqs[start_pos : start_pos + T]
else:
freqs = self.rope_freqs[:T]
for block in self.blocks:
x = block(x, freqs, start_pos)
x = self.norm(x)
logits = self.lm_head(x)
loss = None
if targets is not None:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
)
return logits, loss
def count_params(self) -> int:
# Subtract lm_head since it's tied
n = sum(p.numel() for p in self.parameters())
n -= self.lm_head.weight.numel()
return n