armenjeddi commited on
Commit
07ebdbd
·
verified ·
1 Parent(s): d0e1057

Add base gpt2 model with 3 layers

Browse files
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BaseGPTForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "modeling_base.GPTConfig",
7
+ "AutoModelForCausalLM": "modeling_base.BaseGPTForCausalLM"
8
+ },
9
+ "dtype": "bfloat16",
10
+ "model_type": "base",
11
+ "transformers_version": "4.57.0"
12
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.57.0"
4
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d1b6c888cb33f3003acb892723a322d4d7aabec376ac61ab924d9746e0f7baa
3
+ size 436762904
modeling_base.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ import torch
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from transformers import PreTrainedModel, PretrainedConfig
7
+ from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
8
+
9
+ class CausalSelfAttention(nn.Module):
10
+
11
+ def __init__(self, config):
12
+ super().__init__()
13
+ assert config.n_embd % config.n_head == 0
14
+ # key, query, value projections for all heads, but in a batch
15
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
16
+ # output projection
17
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
18
+ # regularization
19
+ self.attn_dropout = nn.Dropout(config.dropout)
20
+ self.resid_dropout = nn.Dropout(config.dropout)
21
+ self.n_head = config.n_head
22
+ self.n_embd = config.n_embd
23
+ self.dropout = config.dropout
24
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
25
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
26
+ if not self.flash:
27
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
28
+ # causal mask to ensure that attention is only applied to the left in the input sequence
29
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
30
+ .view(1, 1, config.block_size, config.block_size))
31
+
32
+ def forward(self, x):
33
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
34
+
35
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
36
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
37
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
38
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
39
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
40
+
41
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
42
+ if self.flash:
43
+ # efficient attention using Flash Attention CUDA kernels
44
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
45
+ else:
46
+ # manual implementation of attention
47
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
48
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
49
+ att = F.softmax(att, dim=-1)
50
+ att = self.attn_dropout(att)
51
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
52
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
53
+
54
+ # output projection
55
+ y = self.resid_dropout(self.c_proj(y))
56
+ return y
57
+
58
+ class MLP(nn.Module):
59
+
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.c_fc = nn.Linear(config.n_embd, config.intermediate_dim, bias=config.bias)
63
+ self.gelu = nn.GELU()
64
+ self.c_proj = nn.Linear(config.intermediate_dim, config.n_embd, bias=config.bias)
65
+ self.dropout = nn.Dropout(config.dropout)
66
+
67
+ def forward(self, x):
68
+ x = self.c_fc(x)
69
+ x = self.gelu(x)
70
+ x = self.c_proj(x)
71
+ x = self.dropout(x)
72
+ return x
73
+
74
+ class Block(nn.Module):
75
+
76
+ def __init__(self, config):
77
+ super().__init__()
78
+ self.norm_1 = nn.RMSNorm(config.n_embd)
79
+ self.attn = CausalSelfAttention(config)
80
+ self.norm_2 = nn.RMSNorm(config.n_embd)
81
+ self.mlp = MLP(config)
82
+
83
+ def forward(self, x):
84
+ x = x + self.attn(self.norm_1(x))
85
+ x = x + self.mlp(self.norm_2(x))
86
+ return x
87
+
88
+ @dataclass
89
+ class GPTConfig(PretrainedConfig):
90
+ model_type: str = 'base'
91
+ block_size: int = 1024
92
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
93
+ n_layer: int = 3
94
+ n_head: int = 32
95
+ n_embd: int = 2048
96
+ dropout: float = 0.0
97
+ bias: bool = False # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
98
+ intermediate_dim: int = 5120
99
+
100
+ def __init__(self, **kwargs):
101
+ super().__init__(**kwargs)
102
+
103
+ class GPT(nn.Module):
104
+
105
+ def __init__(self, config):
106
+ super().__init__()
107
+ assert config.vocab_size is not None
108
+ assert config.block_size is not None
109
+ self.config = config
110
+
111
+ self.transformer = nn.ModuleDict(dict(
112
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
113
+ wpe = nn.Embedding(config.block_size, config.n_embd),
114
+ drop = nn.Dropout(config.dropout),
115
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
116
+ norm_f = nn.RMSNorm(config.n_embd),
117
+ ))
118
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
119
+ # with weight tying when using torch.compile() some warnings get generated:
120
+ # "UserWarning: functional_call was passed multiple values for tied weights.
121
+ # This behavior is deprecated and will be an error in future versions"
122
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
123
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
124
+
125
+ # init all weights
126
+ self.apply(self._init_weights)
127
+ # apply special scaled init to the residual projections, per GPT-2 paper
128
+ for pn, p in self.named_parameters():
129
+ if pn.endswith('c_proj.weight'):
130
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
131
+
132
+ # report number of parameters
133
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
134
+
135
+ def get_num_params(self, non_embedding=True):
136
+ """
137
+ Return the number of parameters in the model.
138
+ For non-embedding count (default), the position embeddings get subtracted.
139
+ The token embeddings would too, except due to the parameter sharing these
140
+ params are actually used as weights in the final layer, so we include them.
141
+ """
142
+ n_params = sum(p.numel() for p in self.parameters())
143
+ if non_embedding:
144
+ n_params -= self.transformer.wpe.weight.numel()
145
+ return n_params
146
+
147
+ def _init_weights(self, module):
148
+ if isinstance(module, nn.Linear):
149
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
150
+ if module.bias is not None:
151
+ torch.nn.init.zeros_(module.bias)
152
+ elif isinstance(module, nn.Embedding):
153
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
154
+
155
+ def forward(self, idx, targets=None, **kwargs):
156
+ device = idx.device
157
+ b, t = idx.size()
158
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
159
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
160
+
161
+ # forward the GPT model itself
162
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
163
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
164
+ x = self.transformer.drop(tok_emb + pos_emb)
165
+ for block in self.transformer.h:
166
+ x = block(x)
167
+ x = self.transformer.norm_f(x)
168
+
169
+ logits = self.lm_head(x)
170
+
171
+ loss = None
172
+ if targets is not None:
173
+ loss = F.cross_entropy(
174
+ logits.view(-1, logits.size(-1)),
175
+ targets.view(-1),
176
+ ignore_index=-1,
177
+ )
178
+
179
+ return logits, loss
180
+
181
+ # ---- HF wrapper -------------------------------------------------------------
182
+
183
+ from transformers.generation.utils import GenerationMixin
184
+
185
+ class BaseGPTForCausalLM(PreTrainedModel, GenerationMixin):
186
+ config_class = GPTConfig
187
+ main_input_name = "input_ids"
188
+ _tied_weights_keys = ["gpt.transformer.wte.weight", "gpt.lm_head.weight"]
189
+
190
+ def __init__(self, config: GPTConfig, **kwargs):
191
+ super().__init__(config)
192
+ self.gpt = GPT(config)
193
+ self.post_init()
194
+
195
+ # expose embeddings/heads for HF utilities
196
+ def get_input_embeddings(self):
197
+ return self.gpt.transformer.wte
198
+
199
+ def set_input_embeddings(self, new_emb):
200
+ self.gpt.transformer.wte = new_emb
201
+ self.gpt.lm_head.weight = new_emb.weight # keep tied
202
+
203
+ def get_output_embeddings(self):
204
+ return self.gpt.lm_head
205
+
206
+ def set_output_embeddings(self, new_out):
207
+ self.gpt.lm_head = new_out
208
+
209
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
210
+ return {
211
+ "input_ids": input_ids,
212
+ "attention_mask": attention_mask,
213
+ # no labels during generation
214
+ }
215
+
216
+ def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
217
+ # pass the mask all the way through
218
+ logits, loss = self.gpt(
219
+ input_ids, targets=labels, attention_mask=attention_mask
220
+ )
221
+ return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits)
222
+
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff