Spaces:
Sleeping
Sleeping
| # model_classes.py | |
| import torch | |
| import torch.nn as nn | |
| import torch.nn.functional as F | |
| import json | |
| # ========================= RWKV-Mamba Hybrid ========================= | |
| class RWKVMambaHybrid(nn.Module): | |
| """Combines RWKV time-mixing with Mamba state-space dynamics""" | |
| def __init__(self, d_model, d_state=64): | |
| super().__init__() | |
| self.d_model = d_model | |
| self.d_state = d_state | |
| self.w_mix = nn.Parameter(torch.ones(d_model) * 0.5) | |
| self.A = nn.Parameter(torch.randn(d_state, d_state) * 0.01) | |
| self.B = nn.Parameter(torch.randn(d_state, d_model) * 0.01) | |
| self.C = nn.Parameter(torch.randn(d_model, d_state) * 0.01) | |
| self.D = nn.Parameter(torch.ones(d_model) * 0.1) | |
| def forward(self, x): | |
| B, T, C = x.shape | |
| h = torch.zeros(B, C, device=x.device) | |
| s = torch.zeros(B, self.d_state, device=x.device) | |
| outputs = [] | |
| for t in range(T): | |
| x_t = x[:, t, :] | |
| h = self.w_mix * h + (1 - self.w_mix) * x_t | |
| s = s @ self.A.T + x_t @ self.B.T | |
| y_t = s @ self.C.T + h * self.D | |
| outputs.append(y_t) | |
| return torch.stack(outputs, dim=1) | |
| # ========================= Full Attention ========================= | |
| class FullAttention(nn.Module): | |
| """Standard Multi-Head Attention""" | |
| def __init__(self, d_model, n_heads=16): | |
| super().__init__() | |
| self.d_model = d_model | |
| self.n_heads = n_heads | |
| self.head_dim = d_model // n_heads | |
| assert d_model % n_heads == 0, "d_model must be divisible by n_heads" | |
| self.qkv = nn.Linear(d_model, d_model * 3) | |
| self.out_proj = nn.Linear(d_model, d_model) | |
| def forward(self, x, mask=None): | |
| B, T, C = x.shape | |
| qkv = self.qkv(x) | |
| q, k, v = qkv.chunk(3, dim=-1) | |
| q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) | |
| attn = (q @ k.transpose(-2, -1)) / (self.head_dim ** 0.5) | |
| if mask is not None: | |
| mask = mask.expand(B, self.n_heads, T, T).bool() | |
| attn = attn.masked_fill(mask == 0, float('-inf')) | |
| attn = F.softmax(attn, dim=-1) | |
| out = attn @ v | |
| out = out.transpose(1, 2).contiguous().view(B, T, C) | |
| return self.out_proj(out) | |
| # ========================= i3 Hybrid Block ========================= | |
| class i3HybridBlock(nn.Module): | |
| """Single hybrid block with RWKV-Mamba + FFN""" | |
| def __init__(self, d_model, d_state=64, ffn_mult=4): | |
| super().__init__() | |
| self.ln1 = nn.LayerNorm(d_model) | |
| self.hybrid = RWKVMambaHybrid(d_model, d_state) | |
| self.ln2 = nn.LayerNorm(d_model) | |
| d_ff = d_model * ffn_mult | |
| self.ffn = nn.Sequential( | |
| nn.Linear(d_model, d_ff), | |
| nn.GELU(), | |
| nn.Linear(d_ff, d_model) | |
| ) | |
| def forward(self, x, mask=None): | |
| x = x + self.hybrid(self.ln1(x)) | |
| x = x + self.ffn(self.ln2(x)) | |
| return x | |
| # ========================= i3 Attention Block ========================= | |
| class i3AttentionBlock(nn.Module): | |
| """Single attention block with MHA + FFN""" | |
| def __init__(self, d_model, n_heads=16, ffn_mult=4): | |
| super().__init__() | |
| self.ln1 = nn.LayerNorm(d_model) | |
| self.attn = FullAttention(d_model, n_heads) | |
| self.ln2 = nn.LayerNorm(d_model) | |
| d_ff = d_model * ffn_mult | |
| self.ffn = nn.Sequential( | |
| nn.Linear(d_model, d_ff), | |
| nn.GELU(), | |
| nn.Linear(d_ff, d_model) | |
| ) | |
| def forward(self, x, mask=None): | |
| x = x + self.attn(self.ln1(x), mask) | |
| x = x + self.ffn(self.ln2(x)) | |
| return x | |
| # ========================= i3 Model ========================= | |
| class i3Model(nn.Module): | |
| """Full hybrid LLM: 10 Hybrid + 6 Attention blocks""" | |
| def __init__(self, vocab_size, d_model=512, n_heads=16, | |
| max_seq_len=256, d_state=32): | |
| super().__init__() | |
| self.vocab_size = vocab_size | |
| self.d_model = d_model | |
| self.max_seq_len = max_seq_len | |
| self.embed = nn.Embedding(vocab_size, d_model) | |
| self.pos_embed = nn.Embedding(max_seq_len, d_model) | |
| hybrid_layers = [i3HybridBlock(d_model, d_state=d_state) for _ in range(10)] | |
| attention_layers = [i3AttentionBlock(d_model, n_heads=n_heads) for _ in range(6)] | |
| self.layers = nn.ModuleList(hybrid_layers + attention_layers) | |
| self.ln_f = nn.LayerNorm(d_model) | |
| self.head = nn.Linear(d_model, vocab_size) | |
| self.apply(self._init_weights) | |
| def _init_weights(self, module): | |
| if isinstance(module, (nn.Linear, nn.Embedding)): | |
| module.weight.data.normal_(mean=0.0, std=0.02) | |
| if isinstance(module, nn.Linear) and module.bias is not None: | |
| module.bias.data.zero_() | |
| def forward(self, idx, targets=None): | |
| B, T = idx.shape | |
| assert T <= self.max_seq_len | |
| pos = torch.arange(0, T, device=idx.device).unsqueeze(0) | |
| x = self.embed(idx) + self.pos_embed(pos) | |
| mask = torch.tril(torch.ones(T, T, device=idx.device)).view(1, 1, T, T) | |
| for layer in self.layers: | |
| x = layer(x, mask) | |
| x = self.ln_f(x) | |
| logits = self.head(x) | |
| loss = None | |
| if targets is not None: | |
| loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1)) | |
| return logits, loss | |
| def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): | |
| """Standard generation returning full sequence at end""" | |
| for _ in range(max_new_tokens): | |
| idx_cond = idx if idx.size(1) <= self.max_seq_len else idx[:, -self.max_seq_len:] | |
| logits, _ = self(idx_cond) | |
| logits = logits[:, -1, :] / temperature | |
| if top_k is not None: | |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | |
| logits[logits < v[:, [-1]]] = -float('Inf') | |
| probs = F.softmax(logits, dim=-1) | |
| idx_next = torch.multinomial(probs, num_samples=1) | |
| idx = torch.cat((idx, idx_next), dim=1) | |
| return idx | |
| def generate_stream(self, idx, max_new_tokens, temperature=1.0, top_k=None): | |
| """Generator that yields the sequence state at every step""" | |
| for _ in range(max_new_tokens): | |
| # Crop context if needed | |
| idx_cond = idx if idx.size(1) <= self.max_seq_len else idx[:, -self.max_seq_len:] | |
| # Forward pass | |
| logits, _ = self(idx_cond) | |
| logits = logits[:, -1, :] / temperature | |
| # Sampling | |
| if top_k is not None: | |
| v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | |
| logits[logits < v[:, [-1]]] = -float('Inf') | |
| probs = F.softmax(logits, dim=-1) | |
| idx_next = torch.multinomial(probs, num_samples=1) | |
| # Append and yield | |
| idx = torch.cat((idx, idx_next), dim=1) | |
| yield idx | |
| # ========================= ChunkTokenizer ========================= | |
| class ChunkTokenizer: | |
| """Memory-efficient 2-3 character chunk tokenizer""" | |
| def __init__(self): | |
| self.chunk_to_idx = {} | |
| self.idx_to_chunk = {} | |
| self.vocab_size = 0 | |
| self.unk_token = '<UNK>' | |
| self.unk_idx = 0 | |
| def load(self, path): | |
| with open(path, 'r') as f: | |
| data = json.load(f) | |
| self.chunk_to_idx = data['chunk_to_idx'] | |
| self.idx_to_chunk = {int(k): v for k, v in data['idx_to_chunk'].items()} | |
| self.vocab_size = data['vocab_size'] | |
| self.unk_token = data.get('unk_token', '<UNK>') | |
| self.unk_idx = data.get('unk_idx', 0) | |
| def encode(self, text): | |
| text = text.lower() | |
| pos = 0 | |
| indices = [] | |
| while pos < len(text): | |
| for chunk_len in [3, 2, 1]: | |
| chunk = text[pos:pos+chunk_len] | |
| if chunk in self.chunk_to_idx: | |
| indices.append(self.chunk_to_idx[chunk]) | |
| pos += chunk_len | |
| break | |
| else: | |
| indices.append(self.unk_idx) | |
| pos += 1 | |
| return indices | |
| def decode(self, indices): | |
| return ''.join([self.idx_to_chunk.get(int(i), self.unk_token) for i in indices]) |