LoopFormer-1block-24iterations / modeling_loopformer.py
armenjeddi's picture
Add LoopFormer model with 1 layers - max 24 iterations
631b3b2 verified
import math
from dataclasses import dataclass
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PreTrainedModel, PretrainedConfig
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
# regularization
self.attn_dropout = nn.Dropout(config.dropout)
self.resid_dropout = nn.Dropout(config.dropout)
self.n_head = config.n_head
self.n_embd = config.n_embd
self.dropout = config.dropout
# flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
if not self.flash:
print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
def forward(self, x):
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
if self.flash:
# efficient attention using Flash Attention CUDA kernels
y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
else:
# manual implementation of attention
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_dropout(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_dropout(self.c_proj(y))
return y
class MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, config.intermediate_dim, bias=config.bias)
self.gelu = nn.GELU()
self.c_proj = nn.Linear(config.intermediate_dim, config.n_embd, bias=config.bias)
self.dropout = nn.Dropout(config.dropout)
def forward(self, x):
x = self.c_fc(x)
x = self.gelu(x)
x = self.c_proj(x)
x = self.dropout(x)
return x
class LoopFormerBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.norm_1 = nn.RMSNorm(config.n_embd, elementwise_affine=False)
self.attn = CausalSelfAttention(config)
self.norm_2 = nn.RMSNorm(config.n_embd, elementwise_affine=False)
self.mlp = MLP(config)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(config.n_embd, 4 * config.n_embd, bias=True),
)
nn.init.zeros_(self.adaLN_modulation[1].weight)
nn.init.zeros_(self.adaLN_modulation[1].bias)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
gate_msa, gate_mlp, scale_msa, scale_mlp = self.adaLN_modulation(c).chunk(4, dim=1)
x = x + gate_msa.unsqueeze(1) * self.attn(
self.norm_1(x) * (1 + scale_msa.unsqueeze(1))
)
x = x + gate_mlp.unsqueeze(1) * self.mlp(
self.norm_2(x) * (1 + scale_mlp.unsqueeze(1))
)
return x
class TimestepEmbedder(nn.Module):
def __init__(self, hidden_size, frequency_embedding_size=256):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size, bias=True),
)
self.frequency_embedding_size = frequency_embedding_size
@staticmethod
def timestep_embedding(t, dim, max_period=10000):
half = dim // 2
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
device=t.device
)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def forward(self, t):
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
t_freq = t_freq.to(dtype=self.mlp[0].weight.dtype)
t_emb = self.mlp(t_freq)
return t_emb
class SharedBlock(nn.Module):
def __init__(self, depth, config):
super().__init__()
self.blocks = nn.ModuleList([
LoopFormerBlock(config) for _ in range(depth)
])
def forward(self, x, c):
for block in self.blocks:
x = block(x, c)
return x
@dataclass
class GPTConfig(PretrainedConfig):
model_type: str = 'loopformer'
block_size: int = 1024
vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
n_layer: int = 1
n_head: int = 32
n_embd: int = 2048
dropout: float = 0.0
bias: bool = False # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
intermediate_dim: int = 5120
def __init__(self, **kwargs):
super().__init__(**kwargs)
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
wpe = nn.Embedding(config.block_size, config.n_embd),
drop = nn.Dropout(config.dropout),
h = SharedBlock(config.n_layer, config),
norm_f = nn.RMSNorm(config.n_embd),
))
self.time_embedder = TimestepEmbedder(config.n_embd)
self.dt_embedder = TimestepEmbedder(config.n_embd)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# with weight tying when using torch.compile() some warnings get generated:
# "UserWarning: functional_call was passed multiple values for tied weights.
# This behavior is deprecated and will be an error in future versions"
# not 100% sure what this is, so far seems to be harmless. TODO investigate
self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
# init all weights
self.apply(self._init_weights)
# apply special scaled init to the residual projections, per GPT-2 paper
for pn, p in self.named_parameters():
if pn.endswith('c_proj.weight'):
torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
# report number of parameters
print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
def get_num_params(self, non_embedding=True):
"""
Return the number of parameters in the model.
For non-embedding count (default), the position embeddings get subtracted.
The token embeddings would too, except due to the parameter sharing these
params are actually used as weights in the final layer, so we include them.
"""
n_params = sum(p.numel() for p in self.parameters())
if non_embedding:
n_params -= self.transformer.wpe.weight.numel()
return n_params
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(self, idx, targets=None, steps=[1/24]*24, **kwargs):
device = idx.device
b, t = idx.size()
assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
# forward the GPT model itself
tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
x = self.transformer.drop(tok_emb + pos_emb)
ti = torch.zeros(x.shape[0], dtype=x.dtype).to(x.device)
for dt in steps:
dt_base = torch.ones_like(ti) * dt
te = self.time_embedder(ti)
dte = self.dt_embedder(dt_base)
c = te + dte
x = self.transformer.h(x, c)
ti = ti + dt
x = self.transformer.norm_f(x)
logits = self.lm_head(x)
loss = None
if targets is not None:
loss = F.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
ignore_index=-1,
)
return logits, loss
# ---- HF wrapper -------------------------------------------------------------
from transformers.generation.utils import GenerationMixin
class LoopFormerGPTForCausalLM(PreTrainedModel, GenerationMixin):
config_class = GPTConfig
main_input_name = "input_ids"
_tied_weights_keys = ["gpt.transformer.wte.weight", "gpt.lm_head.weight"]
def __init__(self, config: GPTConfig, **kwargs):
super().__init__(config)
self.gpt = GPT(config)
self.post_init()
# expose embeddings/heads for HF utilities
def get_input_embeddings(self):
return self.gpt.transformer.wte
def set_input_embeddings(self, new_emb):
self.gpt.transformer.wte = new_emb
self.gpt.lm_head.weight = new_emb.weight # keep tied
def get_output_embeddings(self):
return self.gpt.lm_head
def set_output_embeddings(self, new_out):
self.gpt.lm_head = new_out
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, steps=None, **kwargs):
# Let HF build the usual inputs (esp. past_key_values, position_ids, etc.)
model_inputs = super().prepare_inputs_for_generation(
input_ids=input_ids,
attention_mask=attention_mask,
**kwargs
)
# Whitelist your custom arg so `generate()` won't complain
if steps is not None:
model_inputs["steps"] = steps
return model_inputs
def forward(self, input_ids=None, attention_mask=None, labels=None, steps=None, **kwargs):
# pick steps: explicit arg > kwargs > default
if steps is None:
steps = kwargs.pop("steps", [1/24]*24)
logits, loss = self.gpt(
input_ids, targets=labels, steps=steps, attention_mask=attention_mask
)
return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits)