|
|
| import math |
| from dataclasses import dataclass |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from transformers import PreTrainedModel, PretrainedConfig |
| from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions |
|
|
| class CausalSelfAttention(nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| assert config.n_embd % config.n_head == 0 |
| |
| self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) |
| |
| self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) |
| |
| self.attn_dropout = nn.Dropout(config.dropout) |
| self.resid_dropout = nn.Dropout(config.dropout) |
| self.n_head = config.n_head |
| self.n_embd = config.n_embd |
| self.dropout = config.dropout |
| |
| self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') |
| if not self.flash: |
| print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0") |
| |
| self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) |
| .view(1, 1, config.block_size, config.block_size)) |
|
|
| def forward(self, x): |
| B, T, C = x.size() |
|
|
| |
| q, k, v = self.c_attn(x).split(self.n_embd, dim=2) |
| k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
| v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
| |
| if self.flash: |
| |
| y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True) |
| else: |
| |
| att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) |
| att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf')) |
| att = F.softmax(att, dim=-1) |
| att = self.attn_dropout(att) |
| y = att @ v |
| y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
| |
| y = self.resid_dropout(self.c_proj(y)) |
| return y |
|
|
| class MLP(nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.c_fc = nn.Linear(config.n_embd, config.intermediate_dim, bias=config.bias) |
| self.gelu = nn.GELU() |
| self.c_proj = nn.Linear(config.intermediate_dim, config.n_embd, bias=config.bias) |
| self.dropout = nn.Dropout(config.dropout) |
|
|
| def forward(self, x): |
| x = self.c_fc(x) |
| x = self.gelu(x) |
| x = self.c_proj(x) |
| x = self.dropout(x) |
| return x |
|
|
| class LoopFormerBlock(nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| self.norm_1 = nn.RMSNorm(config.n_embd, elementwise_affine=False) |
| self.attn = CausalSelfAttention(config) |
| self.norm_2 = nn.RMSNorm(config.n_embd, elementwise_affine=False) |
| self.mlp = MLP(config) |
|
|
| self.adaLN_modulation = nn.Sequential( |
| nn.SiLU(), |
| nn.Linear(config.n_embd, 4 * config.n_embd, bias=True), |
| ) |
|
|
| nn.init.zeros_(self.adaLN_modulation[1].weight) |
| nn.init.zeros_(self.adaLN_modulation[1].bias) |
|
|
| def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor: |
| gate_msa, gate_mlp, scale_msa, scale_mlp = self.adaLN_modulation(c).chunk(4, dim=1) |
|
|
| x = x + gate_msa.unsqueeze(1) * self.attn( |
| self.norm_1(x) * (1 + scale_msa.unsqueeze(1)) |
| ) |
| x = x + gate_mlp.unsqueeze(1) * self.mlp( |
| self.norm_2(x) * (1 + scale_mlp.unsqueeze(1)) |
| ) |
| return x |
|
|
| class TimestepEmbedder(nn.Module): |
| def __init__(self, hidden_size, frequency_embedding_size=256): |
| super().__init__() |
| self.mlp = nn.Sequential( |
| nn.Linear(frequency_embedding_size, hidden_size, bias=True), |
| nn.SiLU(), |
| nn.Linear(hidden_size, hidden_size, bias=True), |
| ) |
| self.frequency_embedding_size = frequency_embedding_size |
|
|
| @staticmethod |
| def timestep_embedding(t, dim, max_period=10000): |
| half = dim // 2 |
| freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to( |
| device=t.device |
| ) |
| args = t[:, None].float() * freqs[None] |
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| if dim % 2: |
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) |
| return embedding |
|
|
| def forward(self, t): |
| t_freq = self.timestep_embedding(t, self.frequency_embedding_size) |
| t_freq = t_freq.to(dtype=self.mlp[0].weight.dtype) |
| t_emb = self.mlp(t_freq) |
| return t_emb |
|
|
| class SharedBlock(nn.Module): |
| def __init__(self, depth, config): |
| super().__init__() |
| self.blocks = nn.ModuleList([ |
| LoopFormerBlock(config) for _ in range(depth) |
| ]) |
|
|
| def forward(self, x, c): |
| for block in self.blocks: |
| x = block(x, c) |
| return x |
|
|
| @dataclass |
| class GPTConfig(PretrainedConfig): |
| model_type: str = 'loopformer' |
| block_size: int = 1024 |
| vocab_size: int = 50304 |
| n_layer: int = 3 |
| n_head: int = 32 |
| n_embd: int = 2048 |
| dropout: float = 0.0 |
| bias: bool = False |
| intermediate_dim: int = 5120 |
|
|
| def __init__(self, **kwargs): |
| super().__init__(**kwargs) |
|
|
| class GPT(nn.Module): |
|
|
| def __init__(self, config): |
| super().__init__() |
| assert config.vocab_size is not None |
| assert config.block_size is not None |
| self.config = config |
|
|
| self.transformer = nn.ModuleDict(dict( |
| wte = nn.Embedding(config.vocab_size, config.n_embd), |
| wpe = nn.Embedding(config.block_size, config.n_embd), |
| drop = nn.Dropout(config.dropout), |
| h = SharedBlock(config.n_layer, config), |
| norm_f = nn.RMSNorm(config.n_embd), |
| )) |
|
|
| self.time_embedder = TimestepEmbedder(config.n_embd) |
| self.dt_embedder = TimestepEmbedder(config.n_embd) |
|
|
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
| |
| |
| |
| self.transformer.wte.weight = self.lm_head.weight |
|
|
| |
| self.apply(self._init_weights) |
| |
| for pn, p in self.named_parameters(): |
| if pn.endswith('c_proj.weight'): |
| torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer)) |
|
|
| |
| print("number of parameters: %.2fM" % (self.get_num_params()/1e6,)) |
|
|
| def get_num_params(self, non_embedding=True): |
| """ |
| Return the number of parameters in the model. |
| For non-embedding count (default), the position embeddings get subtracted. |
| The token embeddings would too, except due to the parameter sharing these |
| params are actually used as weights in the final layer, so we include them. |
| """ |
| n_params = sum(p.numel() for p in self.parameters()) |
| if non_embedding: |
| n_params -= self.transformer.wpe.weight.numel() |
| return n_params |
|
|
| def _init_weights(self, module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
| if module.bias is not None: |
| torch.nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.Embedding): |
| torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
| def forward(self, idx, targets=None, steps=[1/8]*8, **kwargs): |
| device = idx.device |
| b, t = idx.size() |
| assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}" |
| pos = torch.arange(0, t, dtype=torch.long, device=device) |
|
|
| |
| tok_emb = self.transformer.wte(idx) |
| pos_emb = self.transformer.wpe(pos) |
| x = self.transformer.drop(tok_emb + pos_emb) |
|
|
| ti = torch.zeros(x.shape[0], dtype=x.dtype).to(x.device) |
| for dt in steps: |
| dt_base = torch.ones_like(ti) * dt |
| te = self.time_embedder(ti) |
| dte = self.dt_embedder(dt_base) |
| c = te + dte |
| x = self.transformer.h(x, c) |
| ti = ti + dt |
|
|
| x = self.transformer.norm_f(x) |
|
|
| logits = self.lm_head(x) |
| |
| loss = None |
| if targets is not None: |
| loss = F.cross_entropy( |
| logits.view(-1, logits.size(-1)), |
| targets.view(-1), |
| ignore_index=-1, |
| ) |
|
|
| return logits, loss |
|
|
| |
|
|
| from transformers.generation.utils import GenerationMixin |
|
|
| class LoopFormerGPTForCausalLM(PreTrainedModel, GenerationMixin): |
| config_class = GPTConfig |
| main_input_name = "input_ids" |
| _tied_weights_keys = ["gpt.transformer.wte.weight", "gpt.lm_head.weight"] |
|
|
| def __init__(self, config: GPTConfig, **kwargs): |
| super().__init__(config) |
| self.gpt = GPT(config) |
| self.post_init() |
|
|
| |
| def get_input_embeddings(self): |
| return self.gpt.transformer.wte |
|
|
| def set_input_embeddings(self, new_emb): |
| self.gpt.transformer.wte = new_emb |
| self.gpt.lm_head.weight = new_emb.weight |
|
|
| def get_output_embeddings(self): |
| return self.gpt.lm_head |
|
|
| def set_output_embeddings(self, new_out): |
| self.gpt.lm_head = new_out |
|
|
| def prepare_inputs_for_generation(self, input_ids, attention_mask=None, steps=None, **kwargs): |
| |
| model_inputs = super().prepare_inputs_for_generation( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| **kwargs |
| ) |
| |
| if steps is not None: |
| model_inputs["steps"] = steps |
| return model_inputs |
|
|
| def forward(self, input_ids=None, attention_mask=None, labels=None, steps=None, **kwargs): |
| |
| if steps is None: |
| steps = kwargs.pop("steps", [1/8]*8) |
|
|
| logits, loss = self.gpt( |
| input_ids, targets=labels, steps=steps, attention_mask=attention_mask |
| ) |
| return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits) |
|
|