|
|
import os |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import gradio as gr |
|
|
import json |
|
|
import requests |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RWKVMambaHybrid(nn.Module): |
|
|
def __init__(self, d_model, d_state=64): |
|
|
super().__init__() |
|
|
self.d_model = d_model |
|
|
self.d_state = d_state |
|
|
self.w_mix = nn.Parameter(torch.ones(d_model) * 0.5) |
|
|
self.A = nn.Parameter(torch.randn(d_state, d_state) * 0.01) |
|
|
self.B = nn.Parameter(torch.randn(d_state, d_model) * 0.01) |
|
|
self.C = nn.Parameter(torch.randn(d_model, d_state) * 0.01) |
|
|
self.D = nn.Parameter(torch.ones(d_model) * 0.1) |
|
|
|
|
|
def forward(self, x): |
|
|
B, T, C = x.shape |
|
|
h = torch.zeros(B, C, device=x.device) |
|
|
s = torch.zeros(B, self.d_state, device=x.device) |
|
|
outputs = [] |
|
|
for t in range(T): |
|
|
x_t = x[:, t, :] |
|
|
h = self.w_mix * h + (1 - self.w_mix) * x_t |
|
|
s = s @ self.A.T + x_t @ self.B.T |
|
|
y_t = s @ self.C.T + h * self.D |
|
|
outputs.append(y_t) |
|
|
return torch.stack(outputs, dim=1) |
|
|
|
|
|
class FullAttention(nn.Module): |
|
|
def __init__(self, d_model, n_heads=16): |
|
|
super().__init__() |
|
|
self.d_model = d_model |
|
|
self.n_heads = n_heads |
|
|
self.head_dim = d_model // n_heads |
|
|
self.qkv = nn.Linear(d_model, d_model*3) |
|
|
self.out_proj = nn.Linear(d_model, d_model) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
B, T, C = x.shape |
|
|
qkv = self.qkv(x) |
|
|
q, k, v = qkv.chunk(3, dim=-1) |
|
|
q = q.view(B, T, self.n_heads, self.head_dim).transpose(1,2) |
|
|
k = k.view(B, T, self.n_heads, self.head_dim).transpose(1,2) |
|
|
v = v.view(B, T, self.n_heads, self.head_dim).transpose(1,2) |
|
|
attn = (q @ k.transpose(-2,-1)) / (self.head_dim**0.5) |
|
|
if mask is not None: |
|
|
mask = mask.expand(B, self.n_heads, T, T).bool() |
|
|
attn = attn.masked_fill(mask==0, float('-inf')) |
|
|
attn = F.softmax(attn, dim=-1) |
|
|
out = attn @ v |
|
|
out = out.transpose(1,2).contiguous().view(B,T,C) |
|
|
return self.out_proj(out) |
|
|
|
|
|
class i3HybridBlock(nn.Module): |
|
|
def __init__(self, d_model, d_state=64, ffn_mult=4): |
|
|
super().__init__() |
|
|
self.ln1 = nn.LayerNorm(d_model) |
|
|
self.hybrid = RWKVMambaHybrid(d_model, d_state) |
|
|
self.ln2 = nn.LayerNorm(d_model) |
|
|
d_ff = d_model * ffn_mult |
|
|
self.ffn = nn.Sequential(nn.Linear(d_model,d_ff), nn.GELU(), nn.Linear(d_ff,d_model)) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
x = x + self.hybrid(self.ln1(x)) |
|
|
x = x + self.ffn(self.ln2(x)) |
|
|
return x |
|
|
|
|
|
class i3AttentionBlock(nn.Module): |
|
|
def __init__(self, d_model, n_heads=16, ffn_mult=4): |
|
|
super().__init__() |
|
|
self.ln1 = nn.LayerNorm(d_model) |
|
|
self.attn = FullAttention(d_model,n_heads) |
|
|
self.ln2 = nn.LayerNorm(d_model) |
|
|
d_ff = d_model * ffn_mult |
|
|
self.ffn = nn.Sequential(nn.Linear(d_model,d_ff), nn.GELU(), nn.Linear(d_ff,d_model)) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
x = x + self.attn(self.ln1(x), mask) |
|
|
x = x + self.ffn(self.ln2(x)) |
|
|
return x |
|
|
|
|
|
class i3Model(nn.Module): |
|
|
def __init__(self, vocab_size, d_model=512, n_heads=16, max_seq_len=256, d_state=32): |
|
|
super().__init__() |
|
|
self.vocab_size = vocab_size |
|
|
self.d_model = d_model |
|
|
self.max_seq_len = max_seq_len |
|
|
self.embed = nn.Embedding(vocab_size,d_model) |
|
|
self.pos_embed = nn.Embedding(max_seq_len,d_model) |
|
|
hybrid_layers = [i3HybridBlock(d_model,d_state) for _ in range(10)] |
|
|
attention_layers = [i3AttentionBlock(d_model,n_heads) for _ in range(6)] |
|
|
self.layers = nn.ModuleList(hybrid_layers + attention_layers) |
|
|
self.ln_f = nn.LayerNorm(d_model) |
|
|
self.head = nn.Linear(d_model,vocab_size) |
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self,module): |
|
|
if isinstance(module,(nn.Linear,nn.Embedding)): |
|
|
module.weight.data.normal_(0,0.02) |
|
|
if isinstance(module,nn.Linear) and module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
|
|
|
def forward(self, idx, targets=None): |
|
|
B,T = idx.shape |
|
|
pos = torch.arange(0,T,device=idx.device).unsqueeze(0) |
|
|
x = self.embed(idx)+self.pos_embed(pos) |
|
|
mask = torch.tril(torch.ones(T,T,device=idx.device)).view(1,1,T,T) |
|
|
for layer in self.layers: |
|
|
x = layer(x,mask) |
|
|
x = self.ln_f(x) |
|
|
logits = self.head(x) |
|
|
loss=None |
|
|
if targets is not None: |
|
|
loss = F.cross_entropy(logits.view(-1,logits.size(-1)), targets.view(-1)) |
|
|
return logits, loss |
|
|
|
|
|
@torch.no_grad() |
|
|
def generate(self, idx, max_new_tokens=100, temperature=1.0, top_k=None): |
|
|
for _ in range(max_new_tokens): |
|
|
idx_cond = idx if idx.size(1)<=self.max_seq_len else idx[:,-self.max_seq_len:] |
|
|
logits,_ = self(idx_cond) |
|
|
logits = logits[:,-1,:]/temperature |
|
|
if top_k is not None: |
|
|
v,_ = torch.topk(logits,min(top_k,logits.size(-1))) |
|
|
logits[logits<v[:,[-1]]]=-float('Inf') |
|
|
probs = F.softmax(logits,dim=-1) |
|
|
idx_next = torch.multinomial(probs,1) |
|
|
idx = torch.cat((idx,idx_next),dim=1) |
|
|
return idx |
|
|
|
|
|
class ChunkTokenizer: |
|
|
def __init__(self, vocab_path=None): |
|
|
self.chunk_to_idx={} |
|
|
self.idx_to_chunk={} |
|
|
self.unk_token='<UNK>' |
|
|
self.unk_idx=0 |
|
|
if vocab_path and os.path.exists(vocab_path): |
|
|
with open(vocab_path,'r') as f: |
|
|
data=json.load(f) |
|
|
self.chunk_to_idx=data['chunk_to_idx'] |
|
|
self.idx_to_chunk={int(k):v for k,v in data['idx_to_chunk'].items()} |
|
|
self.vocab_size=data['vocab_size'] |
|
|
else: |
|
|
|
|
|
self.chunk_to_idx={'<UNK>':0,'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,' ':27} |
|
|
self.idx_to_chunk={v:k for k,v in self.chunk_to_idx.items()} |
|
|
self.vocab_size=len(self.chunk_to_idx) |
|
|
|
|
|
def encode(self,text): |
|
|
text=text.lower() |
|
|
idxs=[] |
|
|
pos=0 |
|
|
while pos<len(text): |
|
|
chunk=text[pos:pos+3] if pos+3<=len(text) else text[pos:] |
|
|
if chunk in self.chunk_to_idx: |
|
|
idxs.append(self.chunk_to_idx[chunk]) |
|
|
pos+=len(chunk) |
|
|
else: |
|
|
idxs.append(self.unk_idx) |
|
|
pos+=1 |
|
|
return idxs |
|
|
|
|
|
def decode(self,indices): |
|
|
return ''.join([self.idx_to_chunk.get(int(i),self.unk_token) for i in indices]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_NAME = "your-hf-username/i3-80m" |
|
|
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
vocab_file = "chunk_vocab_combined.json" |
|
|
tokenizer = ChunkTokenizer(vocab_file) |
|
|
vocab_size = tokenizer.vocab_size |
|
|
|
|
|
model = i3Model(vocab_size=vocab_size) |
|
|
|
|
|
if os.path.exists("model.safetensors"): |
|
|
from safetensors.torch import load_file |
|
|
state_dict = load_file("model.safetensors") |
|
|
model.load_state_dict(state_dict) |
|
|
elif os.path.exists("pytorch_model.bin"): |
|
|
state_dict = torch.load("pytorch_model.bin", map_location=DEVICE) |
|
|
model.load_state_dict(state_dict) |
|
|
else: |
|
|
|
|
|
url_bin = f"https://huggingface.co/{MODEL_NAME}/resolve/main/pytorch_model.bin" |
|
|
r = requests.get(url_bin) |
|
|
with open("pytorch_model.bin",'wb') as f: |
|
|
f.write(r.content) |
|
|
state_dict = torch.load("pytorch_model.bin", map_location=DEVICE) |
|
|
model.load_state_dict(state_dict) |
|
|
|
|
|
model.to(DEVICE) |
|
|
model.eval() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_text(prompt, max_tokens, temperature, top_k): |
|
|
idx = torch.tensor([tokenizer.encode(prompt)],dtype=torch.long).to(DEVICE) |
|
|
out_idx = model.generate(idx, max_new_tokens=max_tokens, temperature=temperature, top_k=top_k) |
|
|
text = tokenizer.decode(out_idx[0].cpu()) |
|
|
return text |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("### i3-80M Model Demo") |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
prompt = gr.Textbox(label="Prompt", lines=3) |
|
|
generate_btn = gr.Button("Generate") |
|
|
output = gr.Textbox(label="Generated Text", lines=10) |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("#### Dev Panel") |
|
|
max_tokens = gr.Slider(10,512,value=100,step=1,label="Max Tokens") |
|
|
temperature = gr.Slider(0.1,2.0,value=0.8,step=0.05,label="Temperature") |
|
|
top_k = gr.Slider(1,100,value=40,step=1,label="Top-k") |
|
|
generate_btn.click(generate_text, inputs=[prompt,max_tokens,temperature,top_k], outputs=output) |
|
|
|
|
|
demo.launch() |
|
|
|