Challenger-1 / modeling_challenger.py
MaxiiMin's picture
Update modeling_challenger.py
9e11f7d verified
# modeling_challenger.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import PreTrainedModel, GenerationMixin
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from .configuration_challenger import ChallengerConfig
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-8):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x: torch.Tensor):
return x * torch.rsqrt((x * x).mean(-1, keepdim=True) + self.eps)
def forward(self, x: torch.Tensor):
output = self._norm(x.float())
return (output * self.weight.float()).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
# regularization
self.n_head = config.n_head
self.n_embd = config.n_embd
def forward(self, x):
B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
# nh is "number of heads", hs is "head size", and C (number of channels) = nh * hs
# e.g. in GPT-2 (124M), n_head=12, hs=64, so nh*hs=C=768 channels in the Transformer
qkv = self.c_attn(x)
q, k, v = qkv.split(self.n_embd, dim=2)
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
y = F.scaled_dot_product_attention(q, k, v, is_causal=True) # flash attention
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, 8 * config.n_embd, bias=False)
self.gelu = nn.SiLU()
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False)
def forward(self, x):
x, y = self.c_fc(x).split(x.size(-1) * 4, dim=2)
x = self.gelu(x)
x = self.c_proj(x * y)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.ln_1 = RMSNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.ln_2 = RMSNorm(config.n_embd)
self.mlp = MLP(config)
def forward(self, x):
return x + self.attn(self.ln_1(x)) + self.mlp(self.ln_2(x))
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
ln_f = RMSNorm(config.n_embd),
))
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
def forward(self, idx, targets=None):
x = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
# forward the blocks of the transformer
for block in self.transformer.h:
x = block(x)
# forward the final layernorm and the classifier
x = self.transformer.ln_f(x)
logits = self.lm_head(x).float() # (B, T, vocab_size)
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
class ChallengerForCausalLM(PreTrainedModel, GenerationMixin):
config_class = ChallengerConfig
_keys_to_ignore_on_load_unexpected = [r"past_key_values"]
def __init__(self, config):
super().__init__(config)
self.model = GPT(config)
def forward(
self,
input_ids=None,
inputs_embeds=None,
attention_mask=None,
labels=None,
use_cache=None,
past_key_values=None,
**kwargs
):
logits, loss = self.model(input_ids, labels)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits
)