|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import math |
|
|
from dataclasses import dataclass |
|
|
|
|
|
@dataclass |
|
|
class ModelConfig: |
|
|
"""Configuration matching SmolLM2-135M""" |
|
|
vocab_size: int = 49152 |
|
|
hidden_size: int = 576 |
|
|
num_hidden_layers: int = 30 |
|
|
num_attention_heads: int = 9 |
|
|
intermediate_size: int = 1536 |
|
|
max_position_embeddings: int = 2048 |
|
|
layer_norm_eps: float = 1e-5 |
|
|
hidden_dropout_prob: float = 0.1 |
|
|
attention_dropout_prob: float = 0.1 |
|
|
|
|
|
@property |
|
|
def head_dim(self): |
|
|
return self.hidden_size // self.num_attention_heads |
|
|
|
|
|
|
|
|
class RotaryEmbedding(nn.Module): |
|
|
"""Rotary Position Embedding (RoPE)""" |
|
|
def __init__(self, dim, max_position_embeddings=2048, base=10000): |
|
|
super().__init__() |
|
|
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
|
|
self.register_buffer("inv_freq", inv_freq) |
|
|
self.max_seq_len_cached = max_position_embeddings |
|
|
|
|
|
t = torch.arange(self.max_seq_len_cached, dtype=self.inv_freq.dtype) |
|
|
freqs = torch.outer(t, self.inv_freq) |
|
|
emb = torch.cat((freqs, freqs), dim=-1) |
|
|
self.register_buffer("cos_cached", emb.cos(), persistent=False) |
|
|
self.register_buffer("sin_cached", emb.sin(), persistent=False) |
|
|
|
|
|
def forward(self, x, seq_len): |
|
|
return ( |
|
|
self.cos_cached[:seq_len, ...], |
|
|
self.sin_cached[:seq_len, ...], |
|
|
) |
|
|
|
|
|
|
|
|
def rotate_half(x): |
|
|
"""Rotates half the hidden dims of the input.""" |
|
|
x1 = x[..., : x.shape[-1] // 2] |
|
|
x2 = x[..., x.shape[-1] // 2 :] |
|
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
|
|
|
|
def apply_rotary_pos_emb(q, k, cos, sin): |
|
|
"""Apply rotary position embedding to query and key tensors.""" |
|
|
q_embed = (q * cos) + (rotate_half(q) * sin) |
|
|
k_embed = (k * cos) + (rotate_half(k) * sin) |
|
|
return q_embed, k_embed |
|
|
|
|
|
|
|
|
class MultiHeadAttention(nn.Module): |
|
|
"""Multi-head attention with RoPE""" |
|
|
def __init__(self, config: ModelConfig): |
|
|
super().__init__() |
|
|
self.num_heads = config.num_attention_heads |
|
|
self.head_dim = config.head_dim |
|
|
self.hidden_size = config.hidden_size |
|
|
|
|
|
self.q_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False) |
|
|
self.k_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False) |
|
|
self.v_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False) |
|
|
self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False) |
|
|
|
|
|
self.rotary_emb = RotaryEmbedding(self.head_dim, config.max_position_embeddings) |
|
|
self.dropout = nn.Dropout(config.attention_dropout_prob) |
|
|
|
|
|
def forward(self, hidden_states, attention_mask=None): |
|
|
batch_size, seq_len, _ = hidden_states.shape |
|
|
|
|
|
|
|
|
q = self.q_proj(hidden_states) |
|
|
k = self.k_proj(hidden_states) |
|
|
v = self.v_proj(hidden_states) |
|
|
|
|
|
|
|
|
q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
k = k.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
v = v.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) |
|
|
|
|
|
|
|
|
cos, sin = self.rotary_emb(v, seq_len) |
|
|
q, k = apply_rotary_pos_emb(q, k, cos, sin) |
|
|
|
|
|
|
|
|
attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim) |
|
|
|
|
|
if attention_mask is not None: |
|
|
attn_weights = attn_weights + attention_mask |
|
|
|
|
|
attn_weights = F.softmax(attn_weights, dim=-1) |
|
|
attn_weights = self.dropout(attn_weights) |
|
|
|
|
|
|
|
|
attn_output = torch.matmul(attn_weights, v) |
|
|
|
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
attn_output = attn_output.view(batch_size, seq_len, self.hidden_size) |
|
|
attn_output = self.o_proj(attn_output) |
|
|
|
|
|
return attn_output |
|
|
|
|
|
|
|
|
class MLP(nn.Module): |
|
|
"""Feed-forward network""" |
|
|
def __init__(self, config: ModelConfig): |
|
|
super().__init__() |
|
|
self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) |
|
|
self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) |
|
|
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) |
|
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
|
|
def forward(self, x): |
|
|
|
|
|
gate = F.silu(self.gate_proj(x)) |
|
|
up = self.up_proj(x) |
|
|
return self.dropout(self.down_proj(gate * up)) |
|
|
|
|
|
|
|
|
class TransformerBlock(nn.Module): |
|
|
"""Single transformer block""" |
|
|
def __init__(self, config: ModelConfig): |
|
|
super().__init__() |
|
|
self.attention = MultiHeadAttention(config) |
|
|
self.mlp = MLP(config) |
|
|
self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
def forward(self, hidden_states, attention_mask=None): |
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.input_layernorm(hidden_states) |
|
|
hidden_states = self.attention(hidden_states, attention_mask) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.post_attention_layernorm(hidden_states) |
|
|
hidden_states = self.mlp(hidden_states) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
return hidden_states |
|
|
|
|
|
|
|
|
class CustomSmolLM(nn.Module): |
|
|
"""Custom implementation mimicking SmolLM2-135M""" |
|
|
def __init__(self, config: ModelConfig): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
|
|
|
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) |
|
|
self.layers = nn.ModuleList([ |
|
|
TransformerBlock(config) for _ in range(config.num_hidden_layers) |
|
|
]) |
|
|
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
|
|
|
|
|
|
self.lm_head.weight = self.embed_tokens.weight |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self, module): |
|
|
std = 0.02 |
|
|
if isinstance(module, nn.Linear): |
|
|
module.weight.data.normal_(mean=0.0, std=std) |
|
|
if module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
elif isinstance(module, nn.Embedding): |
|
|
module.weight.data.normal_(mean=0.0, std=std) |
|
|
|
|
|
def forward(self, input_ids, attention_mask=None, labels=None): |
|
|
batch_size, seq_len = input_ids.shape |
|
|
|
|
|
|
|
|
if attention_mask is None: |
|
|
causal_mask = torch.triu( |
|
|
torch.full((seq_len, seq_len), float('-inf'), device=input_ids.device), |
|
|
diagonal=1 |
|
|
) |
|
|
causal_mask = causal_mask.unsqueeze(0).unsqueeze(0) |
|
|
else: |
|
|
causal_mask = None |
|
|
|
|
|
|
|
|
hidden_states = self.embed_tokens(input_ids) |
|
|
|
|
|
|
|
|
for layer in self.layers: |
|
|
hidden_states = layer(hidden_states, causal_mask) |
|
|
|
|
|
hidden_states = self.norm(hidden_states) |
|
|
logits = self.lm_head(hidden_states) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
shift_logits = logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
loss = F.cross_entropy( |
|
|
shift_logits.view(-1, self.config.vocab_size), |
|
|
shift_labels.view(-1) |
|
|
) |
|
|
|
|
|
return {'loss': loss, 'logits': logits} |
|
|
|