import torch import torch.nn as nn import torch.nn.functional as F import json, os, numpy as np # ============================================================================ class ChunkTokenizer: def __init__(self): self.chunk_to_idx = {} self.idx_to_chunk = {} self.vocab_size = 0 def load(self, path): with open(path, 'r') as f: vocab_data = json.load(f) self.chunk_to_idx = vocab_data['chunk_to_idx'] self.idx_to_chunk = {int(k): v for k, v in vocab_data['idx_to_chunk'].items()} self.vocab_size = vocab_data['vocab_size'] print(f"Loaded tokenizer ({self.vocab_size} tokens)") def encode(self, text): text = text.lower() pos, indices = 0, [] while pos < len(text): for size in (3, 2, 1): chunk = text[pos:pos+size] if chunk in self.chunk_to_idx: indices.append(self.chunk_to_idx[chunk]) pos += size break else: pos += 1 return indices def decode(self, indices): return ''.join([self.idx_to_chunk.get(int(i), '') for i in indices]) # ============================================================================ class LoRPtLinear(nn.Module): def __init__(self, in_features, out_features, rank=64): super().__init__() self.lora_A = nn.Parameter(torch.randn(out_features, rank) * 0.02) self.lora_B = nn.Parameter(torch.randn(rank, in_features) * 0.02) self.bias = nn.Parameter(torch.zeros(out_features)) def forward(self, x): return F.linear(x, self.lora_A @ self.lora_B, self.bias) class RWKVMambaHybrid(nn.Module): def __init__(self, d_model, d_state=32): super().__init__() self.d_model = d_model self.d_state = d_state self.w_mix = nn.Parameter(torch.ones(d_model) * 0.5) self.A = nn.Parameter(torch.randn(d_state, d_state) * 0.01) self.B = nn.Parameter(torch.randn(d_state, d_model) * 0.01) self.C = nn.Parameter(torch.randn(d_model, d_state) * 0.01) self.D = nn.Parameter(torch.ones(d_model) * 0.1) def forward(self, x): B, T, C = x.shape h = torch.zeros(B, C, device=x.device) s = torch.zeros(B, self.d_state, device=x.device) outputs = [] for t in range(T): x_t = x[:, t, :] h = self.w_mix * h + (1 - self.w_mix) * x_t s = s @ self.A.T + x_t @ self.B.T y_t = s @ self.C.T + h * self.D outputs.append(y_t) return torch.stack(outputs, dim=1) class KQVAttention(nn.Module): def __init__(self, d_model, n_heads=16, rank=64): super().__init__() self.d_model = d_model self.n_heads = n_heads self.head_dim = d_model // n_heads self.q_down = nn.Linear(d_model, rank) self.q_up = nn.Linear(rank, d_model) self.k_down = nn.Linear(d_model, rank) self.k_up = nn.Linear(rank, d_model) self.v_down = nn.Linear(d_model, rank) self.v_up = nn.Linear(rank, d_model) self.out_proj = nn.Linear(d_model, d_model) def forward(self, x, mask=None): B, T, C = x.shape q = self.q_up(self.q_down(x)) k = self.k_up(self.k_down(x)) v = self.v_up(self.v_down(x)) q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) attn = (q @ k.transpose(-2, -1)) / np.sqrt(self.head_dim) if mask is not None: attn = attn.masked_fill(mask == 0, float('-inf')) attn = F.softmax(attn, dim=-1) out = attn @ v out = out.transpose(1, 2).contiguous().view(B, T, C) return self.out_proj(out) class i3Block(nn.Module): def __init__(self, d_model, n_heads=16, d_state=32, rank=64, ffn_mult=4): super().__init__() self.hybrid = RWKVMambaHybrid(d_model, d_state) self.ln1 = nn.LayerNorm(d_model) self.attn = KQVAttention(d_model, n_heads, rank) self.ln2 = nn.LayerNorm(d_model) d_ff = d_model * ffn_mult self.ffn = nn.Sequential( LoRPtLinear(d_model, d_ff, rank), nn.GELU(), LoRPtLinear(d_ff, d_model, rank) ) self.ln3 = nn.LayerNorm(d_model) def forward(self, x, mask=None): x = x + self.hybrid(self.ln1(x)) x = x + self.attn(self.ln2(x), mask) x = x + self.ffn(self.ln3(x)) return x class i3Model(nn.Module): def __init__(self, vocab_size, d_model=512, n_layers=24, n_heads=16, max_seq_len=256, rank=64, d_state=32): super().__init__() self.vocab_size = vocab_size self.d_model = d_model self.max_seq_len = max_seq_len self.embed = nn.Embedding(vocab_size, d_model) self.pos_embed = nn.Embedding(max_seq_len, d_model) self.layers = nn.ModuleList([ i3Block(d_model, n_heads, d_state, rank) for _ in range(n_layers) ]) self.ln_f = nn.LayerNorm(d_model) self.head = LoRPtLinear(d_model, vocab_size, rank) def forward(self, idx): B, T = idx.shape pos = torch.arange(0, T, device=idx.device).unsqueeze(0) x = self.embed(idx) + self.pos_embed(pos) mask = torch.tril(torch.ones(T, T, device=idx.device)).view(1, 1, T, T) for layer in self.layers: x = layer(x, mask) x = self.ln_f(x) return self.head(x) @torch.no_grad() def generate(self, idx, max_new_tokens=100, temperature=0.8, top_k=40): for _ in range(max_new_tokens): idx_cond = idx[:, -self.max_seq_len:] logits = self(idx_cond)[:, -1, :] / temperature v, _ = torch.topk(logits, top_k) logits[logits < v[:, [-1]]] = -float("inf") probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, 1) idx = torch.cat((idx, idx_next), dim=1) return idx # ============================================================================ device = torch.device("cuda" if torch.cuda.is_available() else "cpu") tokenizer = ChunkTokenizer() tokenizer.load("tokenizer.json") model = i3Model( vocab_size=tokenizer.vocab_size, d_model=512, n_layers=24, n_heads=16, max_seq_len=256, rank=64, d_state=32 ).to(device) state_dict = torch.load("pytorch_model.bin", map_location=device) model.load_state_dict(state_dict) model.eval() print("✓ Model loaded successfully") # ============================================================================ @torch.no_grad() def infer(prompt, max_new_tokens=100, temperature=0.8, top_k=40): input_ids = torch.tensor([tokenizer.encode(prompt)], dtype=torch.long).to(device) output = model.generate(input_ids, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k) return tokenizer.decode(output[0].cpu().numpy()) def chat_loop(): print("=== i3 Interactive Chat ([INST] format) ===") history = "" while True: user_input = input("[You] ") if user_input.strip().lower() in {"quit", "exit"}: break prompt = f"{history}[INST] {user_input.strip()} [/INST]" reply = infer(prompt, max_new_tokens=120) reply_clean = reply.replace(prompt.lower(), "").strip() print("[i3]:", reply_clean) history += f"[INST] {user_input.strip()} [/INST] {reply_clean} " # ============================================================================ print("\nExample:") prompt = "[INST] What can we do to make people happier [/INST]" print("Prompt:", prompt) print("Generated:", infer(prompt)) # Optionally start a chat loop: # chat_loop()