Update modeling_tinygpt.py
Browse files- modeling_tinygpt.py +42 -98
modeling_tinygpt.py
CHANGED
|
@@ -1,119 +1,63 @@
|
|
| 1 |
-
|
| 2 |
import torch
|
| 3 |
import torch.nn as nn
|
| 4 |
-
|
| 5 |
-
from transformers import PreTrainedModel
|
| 6 |
from transformers.modeling_outputs import CausalLMOutput
|
|
|
|
|
|
|
| 7 |
|
| 8 |
-
# -------------------------
|
| 9 |
-
# TinyGPTConfig (Required)
|
| 10 |
-
# -------------------------
|
| 11 |
-
class TinyGPTConfig:
|
| 12 |
-
model_type = "tinygpt"
|
| 13 |
-
|
| 14 |
-
def __init__(self,
|
| 15 |
-
vocab_size=30522,
|
| 16 |
-
d_model=256,
|
| 17 |
-
n_heads=4,
|
| 18 |
-
n_layers=4,
|
| 19 |
-
d_ff=1024,
|
| 20 |
-
max_seq_len=256,
|
| 21 |
-
**kwargs):
|
| 22 |
-
self.vocab_size = vocab_size
|
| 23 |
-
self.d_model = d_model
|
| 24 |
-
self.n_heads = n_heads
|
| 25 |
-
self.n_layers = n_layers
|
| 26 |
-
self.d_ff = d_ff
|
| 27 |
-
self.max_seq_len = max_seq_len
|
| 28 |
-
|
| 29 |
-
# store additional HF keys
|
| 30 |
-
for k, v in kwargs.items():
|
| 31 |
-
setattr(self, k, v)
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
# -------------------------
|
| 35 |
-
# Your Original TinyGPT Core
|
| 36 |
-
# -------------------------
|
| 37 |
-
class TinyGPT(nn.Module):
|
| 38 |
-
def __init__(self, vocab_size=30522, d_model=256, n_heads=4,
|
| 39 |
-
n_layers=4, d_ff=1024, max_seq_len=256):
|
| 40 |
-
super().__init__()
|
| 41 |
-
self.tok_emb = nn.Embedding(vocab_size, d_model)
|
| 42 |
-
self.pos_emb = nn.Embedding(max_seq_len, d_model)
|
| 43 |
-
|
| 44 |
-
self.layers = nn.ModuleList([
|
| 45 |
-
TransformerBlock(d_model, n_heads, d_ff)
|
| 46 |
-
for _ in range(n_layers)
|
| 47 |
-
])
|
| 48 |
-
|
| 49 |
-
self.ln_f = nn.LayerNorm(d_model)
|
| 50 |
-
self.head = nn.Linear(d_model, vocab_size, bias=False)
|
| 51 |
-
self.max_seq_len = max_seq_len
|
| 52 |
-
|
| 53 |
-
def forward(self, input_ids):
|
| 54 |
-
b, t = input_ids.size()
|
| 55 |
-
pos = torch.arange(0, t, device=input_ids.device).unsqueeze(0)
|
| 56 |
-
x = self.tok_emb(input_ids) + self.pos_emb(pos)
|
| 57 |
-
|
| 58 |
-
for layer in self.layers:
|
| 59 |
-
x = layer(x)
|
| 60 |
-
|
| 61 |
-
x = self.ln_f(x)
|
| 62 |
-
return self.head(x)
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
class TransformerBlock(nn.Module):
|
| 66 |
-
def __init__(self, d_model, n_heads, d_ff):
|
| 67 |
-
super().__init__()
|
| 68 |
-
self.attn = nn.MultiheadAttention(d_model, n_heads, batch_first=True)
|
| 69 |
-
self.ln1 = nn.LayerNorm(d_model)
|
| 70 |
-
self.ff = nn.Sequential(
|
| 71 |
-
nn.Linear(d_model, d_ff),
|
| 72 |
-
nn.GELU(),
|
| 73 |
-
nn.Linear(d_ff, d_model)
|
| 74 |
-
)
|
| 75 |
-
self.ln2 = nn.LayerNorm(d_model)
|
| 76 |
-
|
| 77 |
-
def forward(self, x):
|
| 78 |
-
attn_out, _ = self.attn(x, x, x)
|
| 79 |
-
x = self.ln1(x + attn_out)
|
| 80 |
-
ff_out = self.ff(x)
|
| 81 |
-
x = self.ln2(x + ff_out)
|
| 82 |
-
return x
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
# -------------------------
|
| 86 |
-
# HF Wrapper: TinyGPTForCausalLM
|
| 87 |
-
# -------------------------
|
| 88 |
class TinyGPTForCausalLM(PreTrainedModel):
|
| 89 |
config_class = TinyGPTConfig
|
| 90 |
|
| 91 |
def __init__(self, config):
|
| 92 |
super().__init__(config)
|
| 93 |
|
| 94 |
-
self.
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
self.post_init()
|
| 104 |
|
| 105 |
def forward(self, input_ids, labels=None):
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
loss = None
|
| 109 |
if labels is not None:
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
)
|
|
|
|
| 114 |
|
| 115 |
return CausalLMOutput(
|
| 116 |
-
|
| 117 |
-
|
| 118 |
)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
+
from torch.nn import CrossEntropyLoss
|
|
|
|
| 4 |
from transformers.modeling_outputs import CausalLMOutput
|
| 5 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 6 |
+
from configuration_tinygpt import TinyGPTConfig # Changed from relative to absolute import
|
| 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
class TinyGPTForCausalLM(PreTrainedModel):
|
| 9 |
config_class = TinyGPTConfig
|
| 10 |
|
| 11 |
def __init__(self, config):
|
| 12 |
super().__init__(config)
|
| 13 |
|
| 14 |
+
self.embed = nn.Embedding(config.vocab_size, config.d_model)
|
| 15 |
+
self.pos_embed = nn.Embedding(config.max_seq_len, config.d_model)
|
| 16 |
+
|
| 17 |
+
self.blocks = nn.ModuleList([
|
| 18 |
+
nn.TransformerEncoderLayer(
|
| 19 |
+
d_model=config.d_model,
|
| 20 |
+
nhead=config.n_heads,
|
| 21 |
+
dim_feedforward=config.d_ff,
|
| 22 |
+
batch_first=True
|
| 23 |
+
)
|
| 24 |
+
for _ in range(config.n_layers)
|
| 25 |
+
])
|
| 26 |
+
|
| 27 |
+
self.norm = nn.LayerNorm(config.d_model)
|
| 28 |
+
self.lm_head = nn.Linear(config.d_model, config.vocab_size)
|
| 29 |
|
| 30 |
self.post_init()
|
| 31 |
|
| 32 |
def forward(self, input_ids, labels=None):
|
| 33 |
+
B, T = input_ids.shape
|
| 34 |
+
positions = torch.arange(T, device=input_ids.device).unsqueeze(0)
|
| 35 |
+
|
| 36 |
+
x = self.embed(input_ids) + self.pos_embed(positions)
|
| 37 |
+
|
| 38 |
+
for blk in self.blocks:
|
| 39 |
+
x = blk(x)
|
| 40 |
+
|
| 41 |
+
x = self.norm(x)
|
| 42 |
+
logits = self.lm_head(x)
|
| 43 |
|
| 44 |
loss = None
|
| 45 |
if labels is not None:
|
| 46 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 47 |
+
shift_labels = labels[:, 1:].contiguous()
|
| 48 |
+
loss_fct = CrossEntropyLoss()
|
| 49 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
|
| 50 |
+
shift_labels.view(-1))
|
| 51 |
|
| 52 |
return CausalLMOutput(
|
| 53 |
+
loss=loss,
|
| 54 |
+
logits=logits
|
| 55 |
)
|
| 56 |
|
| 57 |
+
@torch.no_grad()
|
| 58 |
+
def generate(self, input_ids, max_new_tokens=50):
|
| 59 |
+
for _ in range(max_new_tokens):
|
| 60 |
+
logits = self.forward(input_ids).logits
|
| 61 |
+
next_token = torch.argmax(logits[:, -1, :], dim=-1)
|
| 62 |
+
input_ids = torch.cat([input_ids, next_token[:, None]], dim=1)
|
| 63 |
+
return input_ids
|