armenjeddi commited on
Commit
04e086e
·
verified ·
1 Parent(s): 194f1ce

Add base-loop model with 3 layers - max 8 iterations

Browse files
config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Base_Loop_GPTForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "modeling_base_loop.GPTConfig",
7
+ "AutoModelForCausalLM": "modeling_base_loop.Base_Loop_GPTForCausalLM"
8
+ },
9
+ "dtype": "bfloat16",
10
+ "model_type": "base_loop",
11
+ "transformers_version": "4.57.0"
12
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.57.0"
4
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b395d8ced9bffcc9908bed57f2685543ab1f5b3fc0802219efb90099ca0ed232
3
+ size 436763032
modeling_base_loop.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from transformers import PreTrainedModel, PretrainedConfig
7
+ from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
8
+
9
+ class CausalSelfAttention(nn.Module):
10
+
11
+ def __init__(self, config):
12
+ super().__init__()
13
+ assert config.n_embd % config.n_head == 0
14
+ # key, query, value projections for all heads, but in a batch
15
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
16
+ # output projection
17
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
18
+ # regularization
19
+ self.attn_dropout = nn.Dropout(config.dropout)
20
+ self.resid_dropout = nn.Dropout(config.dropout)
21
+ self.n_head = config.n_head
22
+ self.n_embd = config.n_embd
23
+ self.dropout = config.dropout
24
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
25
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
26
+ if not self.flash:
27
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
28
+ # causal mask to ensure that attention is only applied to the left in the input sequence
29
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
30
+ .view(1, 1, config.block_size, config.block_size))
31
+
32
+ def forward(self, x):
33
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
34
+
35
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
36
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
37
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
38
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
39
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
40
+
41
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
42
+ if self.flash:
43
+ # efficient attention using Flash Attention CUDA kernels
44
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
45
+ else:
46
+ # manual implementation of attention
47
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
48
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
49
+ att = F.softmax(att, dim=-1)
50
+ att = self.attn_dropout(att)
51
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
52
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
53
+
54
+ # output projection
55
+ y = self.resid_dropout(self.c_proj(y))
56
+ return y
57
+
58
+ class MLP(nn.Module):
59
+
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.c_fc = nn.Linear(config.n_embd, config.intermediate_dim, bias=config.bias)
63
+ self.gelu = nn.GELU()
64
+ self.c_proj = nn.Linear(config.intermediate_dim, config.n_embd, bias=config.bias)
65
+ self.dropout = nn.Dropout(config.dropout)
66
+
67
+ def forward(self, x):
68
+ x = self.c_fc(x)
69
+ x = self.gelu(x)
70
+ x = self.c_proj(x)
71
+ x = self.dropout(x)
72
+ return x
73
+
74
+ class Block(nn.Module):
75
+
76
+ def __init__(self, config):
77
+ super().__init__()
78
+ self.norm_1 = nn.RMSNorm(config.n_embd)
79
+ self.attn = CausalSelfAttention(config)
80
+ self.norm_2 = nn.RMSNorm(config.n_embd)
81
+ self.mlp = MLP(config)
82
+
83
+ def forward(self, x):
84
+ x = x + self.attn(self.norm_1(x))
85
+ x = x + self.mlp(self.norm_2(x))
86
+ return x
87
+
88
+ class SharedBlock(nn.Module):
89
+ def __init__(self, depth, config):
90
+ super().__init__()
91
+ self.blocks = nn.ModuleList([
92
+ Block(config) for _ in range(depth)
93
+ ])
94
+
95
+ def forward(self, x):
96
+ for block in self.blocks:
97
+ x = block(x)
98
+ return x
99
+
100
+ @dataclass
101
+ class GPTConfig(PretrainedConfig):
102
+ model_type: str = 'base_loop'
103
+ block_size: int = 1024
104
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
105
+ n_layer: int = 3
106
+ n_head: int = 32
107
+ n_embd: int = 2048
108
+ dropout: float = 0.0
109
+ bias: bool = False # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
110
+ intermediate_dim: int = 5120
111
+
112
+ def __init__(self, **kwargs):
113
+ super().__init__(**kwargs)
114
+
115
+ class GPT(nn.Module):
116
+
117
+ def __init__(self, config):
118
+ super().__init__()
119
+ assert config.vocab_size is not None
120
+ assert config.block_size is not None
121
+ self.config = config
122
+
123
+ self.transformer = nn.ModuleDict(dict(
124
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
125
+ wpe = nn.Embedding(config.block_size, config.n_embd),
126
+ drop = nn.Dropout(config.dropout),
127
+ h = SharedBlock(config.n_layer, config),
128
+ norm_f = nn.RMSNorm(config.n_embd),
129
+ ))
130
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
131
+ # with weight tying when using torch.compile() some warnings get generated:
132
+ # "UserWarning: functional_call was passed multiple values for tied weights.
133
+ # This behavior is deprecated and will be an error in future versions"
134
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
135
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
136
+
137
+ # init all weights
138
+ self.apply(self._init_weights)
139
+ # apply special scaled init to the residual projections, per GPT-2 paper
140
+ for pn, p in self.named_parameters():
141
+ if pn.endswith('c_proj.weight'):
142
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
143
+
144
+ # report number of parameters
145
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
146
+
147
+ def get_num_params(self, non_embedding=True):
148
+ """
149
+ Return the number of parameters in the model.
150
+ For non-embedding count (default), the position embeddings get subtracted.
151
+ The token embeddings would too, except due to the parameter sharing these
152
+ params are actually used as weights in the final layer, so we include them.
153
+ """
154
+ n_params = sum(p.numel() for p in self.parameters())
155
+ if non_embedding:
156
+ n_params -= self.transformer.wpe.weight.numel()
157
+ return n_params
158
+
159
+ def _init_weights(self, module):
160
+ if isinstance(module, nn.Linear):
161
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
162
+ if module.bias is not None:
163
+ torch.nn.init.zeros_(module.bias)
164
+ elif isinstance(module, nn.Embedding):
165
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
166
+
167
+ def forward(self, idx, targets=None, steps=8, **kwargs):
168
+ device = idx.device
169
+ b, t = idx.size()
170
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
171
+ pos = torch.arange(0, t, dtype=torch.long, device=device) # shape (t)
172
+
173
+ # forward the GPT model itself
174
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
175
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (t, n_embd)
176
+ x = self.transformer.drop(tok_emb + pos_emb)
177
+ for _ in range(steps):
178
+ x = self.transformer.h(x)
179
+
180
+ x = self.transformer.norm_f(x)
181
+
182
+ logits = self.lm_head(x)
183
+
184
+ loss = None
185
+ if targets is not None:
186
+ loss = F.cross_entropy(
187
+ logits.view(-1, logits.size(-1)),
188
+ targets.view(-1),
189
+ ignore_index=-1,
190
+ )
191
+
192
+ return logits, loss
193
+
194
+ # ---- HF wrapper -------------------------------------------------------------
195
+
196
+ from transformers.generation.utils import GenerationMixin
197
+
198
+ class Base_Loop_GPTForCausalLM(PreTrainedModel, GenerationMixin):
199
+ config_class = GPTConfig
200
+ main_input_name = "input_ids"
201
+ _tied_weights_keys = ["gpt.transformer.wte.weight", "gpt.lm_head.weight"]
202
+
203
+ def __init__(self, config: GPTConfig, **kwargs):
204
+ super().__init__(config)
205
+ self.gpt = GPT(config)
206
+ self.post_init()
207
+
208
+ # expose embeddings/heads for HF utilities
209
+ def get_input_embeddings(self):
210
+ return self.gpt.transformer.wte
211
+
212
+ def set_input_embeddings(self, new_emb):
213
+ self.gpt.transformer.wte = new_emb
214
+ self.gpt.lm_head.weight = new_emb.weight # keep tied
215
+
216
+ def get_output_embeddings(self):
217
+ return self.gpt.lm_head
218
+
219
+ def set_output_embeddings(self, new_out):
220
+ self.gpt.lm_head = new_out
221
+
222
+ def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
223
+ return {
224
+ "input_ids": input_ids,
225
+ "attention_mask": attention_mask,
226
+ # no labels during generation
227
+ }
228
+
229
+ def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
230
+ # pass the mask all the way through
231
+ logits, loss = self.gpt(
232
+ input_ids, targets=labels, attention_mask=attention_mask
233
+ )
234
+ return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits)
235
+
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff