kgrabko commited on
Commit
2355ccd
·
verified ·
1 Parent(s): 0bb2772

Upload gpt_pytorch.py

Browse files

JiRack GPT-2 model class for small models

Files changed (1) hide show
  1. gpt_pytorch.py +228 -0
gpt_pytorch.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2025 CMS Manhattan
2
+ # All rights reserved.
3
+ # Author: Konstantin Vladimirovich Grabko
4
+ # Email: grabko@cmsmanhattan.com
5
+ # Phone: +1(516)777-0945
6
+ #
7
+ # This file is part of a project authored by CMS Manhattan. You may use, distribute, and modify
8
+ # this code under the terms of the GNU GENERAL PUBLIC LICENSE, Version 3, 29 June 2007.
9
+ # Please read <http://www.gnu.org/licenses/>.
10
+
11
+ # JiRackPyTorch GPT-2 class — final clean version, December 2025 (translated comments)
12
+
13
+ import os
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+ from typing import Optional
18
+
19
+ VOCAB_SIZE = 50257
20
+ MODEL_DIM = 768
21
+ NUM_HEADS = 12
22
+ NUM_LAYERS = 6
23
+ MAX_SEQ_LEN = 8192
24
+ FFN_HIDDEN_DIM = 4 * MODEL_DIM
25
+ HEAD_DIM = MODEL_DIM // NUM_HEADS
26
+
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+
29
+
30
+ class LearnedPositionalEmbedding(nn.Module):
31
+ def __init__(self, max_seq_len: int, embed_dim: int):
32
+ super().__init__()
33
+ self.pos_emb = nn.Parameter(torch.zeros(max_seq_len, embed_dim))
34
+
35
+ def forward(self, x: torch.Tensor, pos_offset: int = 0) -> torch.Tensor:
36
+ seq_len = x.size(1)
37
+ pos = self.pos_emb[pos_offset : pos_offset + seq_len]
38
+ return x + pos.unsqueeze(0)
39
+
40
+
41
+ class MultiHeadAttention(nn.Module):
42
+ def __init__(self):
43
+ super().__init__()
44
+ self.q_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
45
+ self.k_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
46
+ self.v_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
47
+ self.out_proj = nn.Linear(MODEL_DIM, MODEL_DIM, bias=False)
48
+ self.scale = HEAD_DIM ** -0.5
49
+
50
+ def forward(self, x: torch.Tensor, past_kv=None):
51
+ B, T, _ = x.shape
52
+ q = self.q_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
53
+ k = self.k_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
54
+ v = self.v_proj(x).view(B, T, NUM_HEADS, HEAD_DIM).transpose(1, 2)
55
+
56
+ if past_kv is not None and past_kv[0] is not None:
57
+ past_k, past_v = past_kv
58
+ k = torch.cat([past_k, k], dim=2)
59
+ v = torch.cat([past_v, v], dim=2)
60
+
61
+ seqlen = k.size(2)
62
+
63
+ attn = torch.matmul(q, k.transpose(-2, -1)) * self.scale
64
+
65
+ if T == seqlen:
66
+ mask = torch.tril(torch.ones(T, seqlen, device=x.device, dtype=torch.bool))
67
+ mask = mask.view(1, 1, T, seqlen)
68
+ attn = attn.masked_fill(~mask, float('-inf'))
69
+
70
+ attn = F.softmax(attn, dim=-1)
71
+ out = torch.matmul(attn, v)
72
+ out = out.transpose(1, 2).contiguous().view(B, T, MODEL_DIM)
73
+ out = self.out_proj(out)
74
+
75
+ return out, (k, v)
76
+
77
+
78
+ class FeedForward(nn.Module):
79
+ def __init__(self):
80
+ super().__init__()
81
+ self.c_fc = nn.Linear(MODEL_DIM, FFN_HIDDEN_DIM, bias=False)
82
+ self.c_proj = nn.Linear(FFN_HIDDEN_DIM, MODEL_DIM, bias=False)
83
+
84
+ def forward(self, x):
85
+ return self.c_proj(F.gelu(self.c_fc(x), approximate='tanh'))
86
+
87
+
88
+ class TransformerBlock(nn.Module):
89
+ def __init__(self):
90
+ super().__init__()
91
+ self.attn = MultiHeadAttention()
92
+ self.ffn = FeedForward()
93
+ self.norm1 = nn.LayerNorm(MODEL_DIM)
94
+ self.norm2 = nn.LayerNorm(MODEL_DIM)
95
+
96
+ def forward(self, x, past_kv=None):
97
+ attn_out, new_kv = self.attn(self.norm1(x), past_kv)
98
+ x = x + attn_out
99
+ x = x + self.ffn(self.norm2(x))
100
+ return x, new_kv
101
+
102
+
103
+ class GPTPyTorch(nn.Module):
104
+ def __init__(self):
105
+ super().__init__()
106
+ self.token_emb = nn.Embedding(VOCAB_SIZE, MODEL_DIM)
107
+ self.pos_emb = LearnedPositionalEmbedding(MAX_SEQ_LEN, MODEL_DIM)
108
+ self.blocks = nn.ModuleList([TransformerBlock() for _ in range(NUM_LAYERS)])
109
+ self.ln_f = nn.LayerNorm(MODEL_DIM)
110
+ self.lm_head = nn.Linear(MODEL_DIM, VOCAB_SIZE, bias=False)
111
+
112
+ signature = "Konstantin V Gbabko . original author © 2025"
113
+ bytes_tensor = torch.tensor([ord(c) for c in signature], dtype=torch.uint8)
114
+ self.register_buffer("konstantin_gbabko_proof_of_authorship", bytes_tensor)
115
+ self.register_buffer("konstantin_gbabko_birth_date", torch.tensor([20251126], dtype=torch.int64))
116
+
117
+ self.lm_head.weight = self.token_emb.weight
118
+ self.apply(self._init_weights)
119
+
120
+ def _init_weights(self, module):
121
+ if isinstance(module, nn.Linear):
122
+ torch.nn.init.xavier_uniform_(module.weight)
123
+ elif isinstance(module, nn.Embedding):
124
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
125
+ elif isinstance(module, nn.LayerNorm):
126
+ nn.init.zeros_(module.bias)
127
+ nn.init.ones_(module.weight)
128
+
129
+ def forward(self, input_ids, past_kv: Optional[list] = None):
130
+ B, T = input_ids.shape
131
+ x = self.token_emb(input_ids)
132
+
133
+ # Robust None checking for offset computation
134
+ if past_kv is not None and past_kv[0] is not None:
135
+ pos_offset = past_kv[0][0].size(2)
136
+ else:
137
+ pos_offset = 0
138
+ x = self.pos_emb(x, pos_offset=pos_offset)
139
+
140
+ new_kv_cache = [] if past_kv is not None else None
141
+
142
+ for i, block in enumerate(self.blocks):
143
+ layer_past = past_kv[i] if (past_kv is not None and past_kv[i] is not None) else None
144
+ x, layer_kv = block(x, layer_past)
145
+ if new_kv_cache is not None:
146
+ new_kv_cache.append(layer_kv)
147
+
148
+ x = self.ln_f(x)
149
+ logits = self.lm_head(x)
150
+ return logits, new_kv_cache
151
+
152
+ @torch.no_grad()
153
+ def generate(
154
+ self,
155
+ input_ids: torch.Tensor,
156
+ max_new_tokens: int = 100,
157
+ temperature: float = 0.8,
158
+ top_p: float = 0.95,
159
+ repetition_penalty: float = 1.0,
160
+ do_sample: bool = True,
161
+ eos_token_id: int = 50256
162
+ ) -> torch.Tensor:
163
+ kv_cache = [None] * NUM_LAYERS
164
+ current_ids = input_ids.clone()
165
+
166
+ for step in range(max_new_tokens):
167
+ if step == 0:
168
+ input_for_model = current_ids
169
+ else:
170
+ input_for_model = current_ids[:, -1].unsqueeze(-1)
171
+
172
+ logits, kv_cache = self(input_for_model, kv_cache)
173
+ next_token_logits = logits[:, -1, :]
174
+
175
+ if temperature > 0:
176
+ next_token_logits = next_token_logits / temperature
177
+
178
+ if repetition_penalty != 1.0:
179
+ for i in range(current_ids.shape[0]):
180
+ unique_tokens = torch.unique(current_ids[i]).tolist()
181
+ for token_id in unique_tokens:
182
+ score = next_token_logits[i, token_id]
183
+ if score < 0:
184
+ next_token_logits[i, token_id] = score * repetition_penalty
185
+ else:
186
+ next_token_logits[i, token_id] = score / repetition_penalty
187
+
188
+ if do_sample and top_p < 1.0:
189
+ sorted_logits, sorted_indices = torch.sort(next_token_logits, descending=True)
190
+ cumulative_probs = torch.softmax(sorted_logits, dim=-1).cumsum(dim=-1)
191
+ sorted_indices_to_remove = cumulative_probs > top_p
192
+ sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
193
+ sorted_indices_to_remove[:, 0] = False
194
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
195
+ next_token_logits = next_token_logits.masked_fill(indices_to_remove, float('-inf'))
196
+
197
+ if do_sample and temperature > 0:
198
+ probs = torch.softmax(next_token_logits, dim=-1)
199
+ next_token = torch.multinomial(probs, num_samples=1)
200
+ else:
201
+ next_token = torch.argmax(next_token_logits, dim=-1, keepdim=True)
202
+
203
+ if next_token.item() == eos_token_id:
204
+ break
205
+
206
+ current_ids = torch.cat([current_ids, next_token], dim=1)
207
+
208
+ return current_ids
209
+
210
+
211
+ if __name__ == "__main__":
212
+ os.makedirs("models", exist_ok=True)
213
+
214
+ model = GPTPyTorch().to(device)
215
+ model.eval()
216
+
217
+ print(f"Device: {device}")
218
+ print(f"Total parameters: {sum(p.numel() for p in model.parameters()) / 1e6:.2f}M")
219
+
220
+ input_ids = torch.randint(0, VOCAB_SIZE, (1, 50), device=device)
221
+ logits, _ = model(input_ids)
222
+ print("logits shape:", logits.shape)
223
+
224
+ generated = model.generate(input_ids, max_new_tokens=100, temperature=0.8, top_p=0.9)
225
+ print("Generated sequence length:", generated.shape[1])
226
+
227
+ torch.save(model.state_dict(), "models/JiRack_H12_L6_V50257_D768_MSL8192_FF768x4.pt")
228
+ print("Model successfully saved to models/JiRack.pt")