|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from transformers import PreTrainedModel, GenerationMixin |
|
|
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions |
|
|
from .configuration_challenger import ChallengerConfig |
|
|
|
|
|
class RMSNorm(nn.Module): |
|
|
def __init__(self, dim: int, eps: float = 1e-8): |
|
|
super().__init__() |
|
|
self.eps = eps |
|
|
self.weight = nn.Parameter(torch.ones(dim)) |
|
|
|
|
|
def _norm(self, x: torch.Tensor): |
|
|
return x * torch.rsqrt((x * x).mean(-1, keepdim=True) + self.eps) |
|
|
|
|
|
def forward(self, x: torch.Tensor): |
|
|
output = self._norm(x.float()) |
|
|
return (output * self.weight.float()).type_as(x) |
|
|
|
|
|
class CausalSelfAttention(nn.Module): |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
assert config.n_embd % config.n_head == 0 |
|
|
|
|
|
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) |
|
|
|
|
|
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False) |
|
|
|
|
|
self.n_head = config.n_head |
|
|
self.n_embd = config.n_embd |
|
|
|
|
|
def forward(self, x): |
|
|
B, T, C = x.size() |
|
|
|
|
|
|
|
|
|
|
|
qkv = self.c_attn(x) |
|
|
q, k, v = qkv.split(self.n_embd, dim=2) |
|
|
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
|
y = F.scaled_dot_product_attention(q, k, v, is_causal=True) |
|
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
|
|
y = self.c_proj(y) |
|
|
return y |
|
|
|
|
|
class MLP(nn.Module): |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.c_fc = nn.Linear(config.n_embd, 8 * config.n_embd, bias=False) |
|
|
self.gelu = nn.SiLU() |
|
|
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False) |
|
|
|
|
|
def forward(self, x): |
|
|
x, y = self.c_fc(x).split(x.size(-1) * 4, dim=2) |
|
|
x = self.gelu(x) |
|
|
x = self.c_proj(x * y) |
|
|
return x |
|
|
|
|
|
class Block(nn.Module): |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.ln_1 = RMSNorm(config.n_embd) |
|
|
self.attn = CausalSelfAttention(config) |
|
|
self.ln_2 = RMSNorm(config.n_embd) |
|
|
self.mlp = MLP(config) |
|
|
|
|
|
def forward(self, x): |
|
|
return x + self.attn(self.ln_1(x)) + self.mlp(self.ln_2(x)) |
|
|
|
|
|
class GPT(nn.Module): |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
|
|
|
self.transformer = nn.ModuleDict(dict( |
|
|
wte = nn.Embedding(config.vocab_size, config.n_embd), |
|
|
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
|
|
ln_f = RMSNorm(config.n_embd), |
|
|
)) |
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
|
|
|
def forward(self, idx, targets=None): |
|
|
x = self.transformer.wte(idx) |
|
|
|
|
|
for block in self.transformer.h: |
|
|
x = block(x) |
|
|
|
|
|
x = self.transformer.ln_f(x) |
|
|
logits = self.lm_head(x).float() |
|
|
loss = None |
|
|
if targets is not None: |
|
|
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) |
|
|
return logits, loss |
|
|
|
|
|
class ChallengerForCausalLM(PreTrainedModel, GenerationMixin): |
|
|
config_class = ChallengerConfig |
|
|
_keys_to_ignore_on_load_unexpected = [r"past_key_values"] |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
self.model = GPT(config) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids=None, |
|
|
inputs_embeds=None, |
|
|
attention_mask=None, |
|
|
labels=None, |
|
|
use_cache=None, |
|
|
past_key_values=None, |
|
|
**kwargs |
|
|
): |
|
|
logits, loss = self.model(input_ids, labels) |
|
|
return CausalLMOutputWithCrossAttentions( |
|
|
loss=loss, |
|
|
logits=logits |
|
|
) |