FlameF0X commited on
Commit
a2898d9
·
verified ·
1 Parent(s): 5dcf306

Create model_classes.py

Browse files
Files changed (1) hide show
  1. model_classes.py +208 -0
model_classes.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model_classes.py
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import json
6
+
7
+ # ========================= RWKV-Mamba Hybrid =========================
8
+ class RWKVMambaHybrid(nn.Module):
9
+ """Combines RWKV time-mixing with Mamba state-space dynamics"""
10
+ def __init__(self, d_model, d_state=64):
11
+ super().__init__()
12
+ self.d_model = d_model
13
+ self.d_state = d_state
14
+ self.w_mix = nn.Parameter(torch.ones(d_model) * 0.5)
15
+ self.A = nn.Parameter(torch.randn(d_state, d_state) * 0.01)
16
+ self.B = nn.Parameter(torch.randn(d_state, d_model) * 0.01)
17
+ self.C = nn.Parameter(torch.randn(d_model, d_state) * 0.01)
18
+ self.D = nn.Parameter(torch.ones(d_model) * 0.1)
19
+
20
+ def forward(self, x):
21
+ B, T, C = x.shape
22
+ h = torch.zeros(B, C, device=x.device)
23
+ s = torch.zeros(B, self.d_state, device=x.device)
24
+ outputs = []
25
+
26
+ for t in range(T):
27
+ x_t = x[:, t, :]
28
+ h = self.w_mix * h + (1 - self.w_mix) * x_t
29
+ s = s @ self.A.T + x_t @ self.B.T
30
+ y_t = s @ self.C.T + h * self.D
31
+ outputs.append(y_t)
32
+
33
+ return torch.stack(outputs, dim=1)
34
+
35
+
36
+ # ========================= Full Attention =========================
37
+ class FullAttention(nn.Module):
38
+ """Standard Multi-Head Attention"""
39
+ def __init__(self, d_model, n_heads=16):
40
+ super().__init__()
41
+ self.d_model = d_model
42
+ self.n_heads = n_heads
43
+ self.head_dim = d_model // n_heads
44
+ assert d_model % n_heads == 0, "d_model must be divisible by n_heads"
45
+
46
+ self.qkv = nn.Linear(d_model, d_model * 3)
47
+ self.out_proj = nn.Linear(d_model, d_model)
48
+
49
+ def forward(self, x, mask=None):
50
+ B, T, C = x.shape
51
+ qkv = self.qkv(x)
52
+ q, k, v = qkv.chunk(3, dim=-1)
53
+
54
+ q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
55
+ k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
56
+ v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
57
+
58
+ attn = (q @ k.transpose(-2, -1)) / (self.head_dim ** 0.5)
59
+ if mask is not None:
60
+ mask = mask.expand(B, self.n_heads, T, T).bool()
61
+ attn = attn.masked_fill(mask == 0, float('-inf'))
62
+
63
+ attn = F.softmax(attn, dim=-1)
64
+ out = attn @ v
65
+ out = out.transpose(1, 2).contiguous().view(B, T, C)
66
+ return self.out_proj(out)
67
+
68
+
69
+ # ========================= i3 Hybrid Block =========================
70
+ class i3HybridBlock(nn.Module):
71
+ """Single hybrid block with RWKV-Mamba + FFN"""
72
+ def __init__(self, d_model, d_state=64, ffn_mult=4):
73
+ super().__init__()
74
+ self.ln1 = nn.LayerNorm(d_model)
75
+ self.hybrid = RWKVMambaHybrid(d_model, d_state)
76
+ self.ln2 = nn.LayerNorm(d_model)
77
+ d_ff = d_model * ffn_mult
78
+ self.ffn = nn.Sequential(
79
+ nn.Linear(d_model, d_ff),
80
+ nn.GELU(),
81
+ nn.Linear(d_ff, d_model)
82
+ )
83
+
84
+ def forward(self, x, mask=None):
85
+ x = x + self.hybrid(self.ln1(x))
86
+ x = x + self.ffn(self.ln2(x))
87
+ return x
88
+
89
+
90
+ # ========================= i3 Attention Block =========================
91
+ class i3AttentionBlock(nn.Module):
92
+ """Single attention block with MHA + FFN"""
93
+ def __init__(self, d_model, n_heads=16, ffn_mult=4):
94
+ super().__init__()
95
+ self.ln1 = nn.LayerNorm(d_model)
96
+ self.attn = FullAttention(d_model, n_heads)
97
+ self.ln2 = nn.LayerNorm(d_model)
98
+ d_ff = d_model * ffn_mult
99
+ self.ffn = nn.Sequential(
100
+ nn.Linear(d_model, d_ff),
101
+ nn.GELU(),
102
+ nn.Linear(d_ff, d_model)
103
+ )
104
+
105
+ def forward(self, x, mask=None):
106
+ x = x + self.attn(self.ln1(x), mask)
107
+ x = x + self.ffn(self.ln2(x))
108
+ return x
109
+
110
+
111
+ # ========================= i3 Model =========================
112
+ class i3Model(nn.Module):
113
+ """Full hybrid LLM: 10 Hybrid + 6 Attention blocks"""
114
+ def __init__(self, vocab_size, d_model=512, n_heads=16,
115
+ max_seq_len=256, d_state=32):
116
+ super().__init__()
117
+ self.vocab_size = vocab_size
118
+ self.d_model = d_model
119
+ self.max_seq_len = max_seq_len
120
+
121
+ self.embed = nn.Embedding(vocab_size, d_model)
122
+ self.pos_embed = nn.Embedding(max_seq_len, d_model)
123
+
124
+ hybrid_layers = [i3HybridBlock(d_model, d_state=d_state) for _ in range(10)]
125
+ attention_layers = [i3AttentionBlock(d_model, n_heads=n_heads) for _ in range(6)]
126
+ self.layers = nn.ModuleList(hybrid_layers + attention_layers)
127
+
128
+ self.ln_f = nn.LayerNorm(d_model)
129
+ self.head = nn.Linear(d_model, vocab_size)
130
+ self.apply(self._init_weights)
131
+
132
+ def _init_weights(self, module):
133
+ if isinstance(module, (nn.Linear, nn.Embedding)):
134
+ module.weight.data.normal_(mean=0.0, std=0.02)
135
+ if isinstance(module, nn.Linear) and module.bias is not None:
136
+ module.bias.data.zero_()
137
+
138
+ def forward(self, idx, targets=None):
139
+ B, T = idx.shape
140
+ assert T <= self.max_seq_len
141
+ pos = torch.arange(0, T, device=idx.device).unsqueeze(0)
142
+ x = self.embed(idx) + self.pos_embed(pos)
143
+ mask = torch.tril(torch.ones(T, T, device=idx.device)).view(1, 1, T, T)
144
+
145
+ for layer in self.layers:
146
+ x = layer(x, mask)
147
+
148
+ x = self.ln_f(x)
149
+ logits = self.head(x)
150
+ loss = None
151
+ if targets is not None:
152
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
153
+ return logits, loss
154
+
155
+ @torch.no_grad()
156
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
157
+ for _ in range(max_new_tokens):
158
+ idx_cond = idx if idx.size(1) <= self.max_seq_len else idx[:, -self.max_seq_len:]
159
+ logits, _ = self(idx_cond)
160
+ logits = logits[:, -1, :] / temperature
161
+
162
+ if top_k is not None:
163
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
164
+ logits[logits < v[:, [-1]]] = -float('Inf')
165
+
166
+ probs = F.softmax(logits, dim=-1)
167
+ idx_next = torch.multinomial(probs, num_samples=1)
168
+ idx = torch.cat((idx, idx_next), dim=1)
169
+ return idx
170
+
171
+
172
+ # ========================= ChunkTokenizer =========================
173
+ class ChunkTokenizer:
174
+ """Memory-efficient 2-3 character chunk tokenizer"""
175
+ def __init__(self):
176
+ self.chunk_to_idx = {}
177
+ self.idx_to_chunk = {}
178
+ self.vocab_size = 0
179
+ self.unk_token = '<UNK>'
180
+ self.unk_idx = 0
181
+
182
+ def load(self, path):
183
+ with open(path, 'r') as f:
184
+ data = json.load(f)
185
+ self.chunk_to_idx = data['chunk_to_idx']
186
+ self.idx_to_chunk = {int(k): v for k, v in data['idx_to_chunk'].items()}
187
+ self.vocab_size = data['vocab_size']
188
+ self.unk_token = data.get('unk_token', '<UNK>')
189
+ self.unk_idx = data.get('unk_idx', 0)
190
+
191
+ def encode(self, text):
192
+ text = text.lower()
193
+ pos = 0
194
+ indices = []
195
+ while pos < len(text):
196
+ for chunk_len in [3, 2, 1]:
197
+ chunk = text[pos:pos+chunk_len]
198
+ if chunk in self.chunk_to_idx:
199
+ indices.append(self.chunk_to_idx[chunk])
200
+ pos += chunk_len
201
+ break
202
+ else:
203
+ indices.append(self.unk_idx)
204
+ pos += 1
205
+ return indices
206
+
207
+ def decode(self, indices):
208
+ return ''.join([self.idx_to_chunk.get(int(i), self.unk_token) for i in indices])