FlameF0X commited on
Commit
5dcf306
·
verified ·
1 Parent(s): 2401a0f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -220
app.py CHANGED
@@ -1,230 +1,95 @@
1
- import os
2
  import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
  import gradio as gr
6
  import json
7
- import requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # ============================================================
10
- # ==================== MODEL + TOKENIZER =====================
11
- # ============================================================
12
-
13
- class RWKVMambaHybrid(nn.Module):
14
- def __init__(self, d_model, d_state=64):
15
- super().__init__()
16
- self.d_model = d_model
17
- self.d_state = d_state
18
- self.w_mix = nn.Parameter(torch.ones(d_model) * 0.5)
19
- self.A = nn.Parameter(torch.randn(d_state, d_state) * 0.01)
20
- self.B = nn.Parameter(torch.randn(d_state, d_model) * 0.01)
21
- self.C = nn.Parameter(torch.randn(d_model, d_state) * 0.01)
22
- self.D = nn.Parameter(torch.ones(d_model) * 0.1)
23
-
24
- def forward(self, x):
25
- B, T, C = x.shape
26
- h = torch.zeros(B, C, device=x.device)
27
- s = torch.zeros(B, self.d_state, device=x.device)
28
- outputs = []
29
- for t in range(T):
30
- x_t = x[:, t, :]
31
- h = self.w_mix * h + (1 - self.w_mix) * x_t
32
- s = s @ self.A.T + x_t @ self.B.T
33
- y_t = s @ self.C.T + h * self.D
34
- outputs.append(y_t)
35
- return torch.stack(outputs, dim=1)
36
-
37
- class FullAttention(nn.Module):
38
- def __init__(self, d_model, n_heads=16):
39
- super().__init__()
40
- self.d_model = d_model
41
- self.n_heads = n_heads
42
- self.head_dim = d_model // n_heads
43
- self.qkv = nn.Linear(d_model, d_model*3)
44
- self.out_proj = nn.Linear(d_model, d_model)
45
-
46
- def forward(self, x, mask=None):
47
- B, T, C = x.shape
48
- qkv = self.qkv(x)
49
- q, k, v = qkv.chunk(3, dim=-1)
50
- q = q.view(B, T, self.n_heads, self.head_dim).transpose(1,2)
51
- k = k.view(B, T, self.n_heads, self.head_dim).transpose(1,2)
52
- v = v.view(B, T, self.n_heads, self.head_dim).transpose(1,2)
53
- attn = (q @ k.transpose(-2,-1)) / (self.head_dim**0.5)
54
- if mask is not None:
55
- mask = mask.expand(B, self.n_heads, T, T).bool()
56
- attn = attn.masked_fill(mask==0, float('-inf'))
57
- attn = F.softmax(attn, dim=-1)
58
- out = attn @ v
59
- out = out.transpose(1,2).contiguous().view(B,T,C)
60
- return self.out_proj(out)
61
-
62
- class i3HybridBlock(nn.Module):
63
- def __init__(self, d_model, d_state=64, ffn_mult=4):
64
- super().__init__()
65
- self.ln1 = nn.LayerNorm(d_model)
66
- self.hybrid = RWKVMambaHybrid(d_model, d_state)
67
- self.ln2 = nn.LayerNorm(d_model)
68
- d_ff = d_model * ffn_mult
69
- self.ffn = nn.Sequential(nn.Linear(d_model,d_ff), nn.GELU(), nn.Linear(d_ff,d_model))
70
-
71
- def forward(self, x, mask=None):
72
- x = x + self.hybrid(self.ln1(x))
73
- x = x + self.ffn(self.ln2(x))
74
- return x
75
-
76
- class i3AttentionBlock(nn.Module):
77
- def __init__(self, d_model, n_heads=16, ffn_mult=4):
78
- super().__init__()
79
- self.ln1 = nn.LayerNorm(d_model)
80
- self.attn = FullAttention(d_model,n_heads)
81
- self.ln2 = nn.LayerNorm(d_model)
82
- d_ff = d_model * ffn_mult
83
- self.ffn = nn.Sequential(nn.Linear(d_model,d_ff), nn.GELU(), nn.Linear(d_ff,d_model))
84
-
85
- def forward(self, x, mask=None):
86
- x = x + self.attn(self.ln1(x), mask)
87
- x = x + self.ffn(self.ln2(x))
88
- return x
89
-
90
- class i3Model(nn.Module):
91
- def __init__(self, vocab_size, d_model=512, n_heads=16, max_seq_len=256, d_state=32):
92
- super().__init__()
93
- self.vocab_size = vocab_size
94
- self.d_model = d_model
95
- self.max_seq_len = max_seq_len
96
- self.embed = nn.Embedding(vocab_size,d_model)
97
- self.pos_embed = nn.Embedding(max_seq_len,d_model)
98
- hybrid_layers = [i3HybridBlock(d_model,d_state) for _ in range(10)]
99
- attention_layers = [i3AttentionBlock(d_model,n_heads) for _ in range(6)]
100
- self.layers = nn.ModuleList(hybrid_layers + attention_layers)
101
- self.ln_f = nn.LayerNorm(d_model)
102
- self.head = nn.Linear(d_model,vocab_size)
103
- self.apply(self._init_weights)
104
-
105
- def _init_weights(self,module):
106
- if isinstance(module,(nn.Linear,nn.Embedding)):
107
- module.weight.data.normal_(0,0.02)
108
- if isinstance(module,nn.Linear) and module.bias is not None:
109
- module.bias.data.zero_()
110
-
111
- def forward(self, idx, targets=None):
112
- B,T = idx.shape
113
- pos = torch.arange(0,T,device=idx.device).unsqueeze(0)
114
- x = self.embed(idx)+self.pos_embed(pos)
115
- mask = torch.tril(torch.ones(T,T,device=idx.device)).view(1,1,T,T)
116
- for layer in self.layers:
117
- x = layer(x,mask)
118
- x = self.ln_f(x)
119
- logits = self.head(x)
120
- loss=None
121
- if targets is not None:
122
- loss = F.cross_entropy(logits.view(-1,logits.size(-1)), targets.view(-1))
123
- return logits, loss
124
-
125
- @torch.no_grad()
126
- def generate(self, idx, max_new_tokens=100, temperature=1.0, top_k=None):
127
- for _ in range(max_new_tokens):
128
- idx_cond = idx if idx.size(1)<=self.max_seq_len else idx[:,-self.max_seq_len:]
129
- logits,_ = self(idx_cond)
130
- logits = logits[:,-1,:]/temperature
131
- if top_k is not None:
132
- v,_ = torch.topk(logits,min(top_k,logits.size(-1)))
133
- logits[logits<v[:,[-1]]]=-float('Inf')
134
- probs = F.softmax(logits,dim=-1)
135
- idx_next = torch.multinomial(probs,1)
136
- idx = torch.cat((idx,idx_next),dim=1)
137
- return idx
138
-
139
- class ChunkTokenizer:
140
- def __init__(self, vocab_path=None):
141
- self.chunk_to_idx={}
142
- self.idx_to_chunk={}
143
- self.unk_token='<UNK>'
144
- self.unk_idx=0
145
- if vocab_path and os.path.exists(vocab_path):
146
- with open(vocab_path,'r') as f:
147
- data=json.load(f)
148
- self.chunk_to_idx=data['chunk_to_idx']
149
- self.idx_to_chunk={int(k):v for k,v in data['idx_to_chunk'].items()}
150
- self.vocab_size=data['vocab_size']
151
- else:
152
- # minimal fallback vocab
153
- self.chunk_to_idx={'<UNK>':0,'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26,' ':27}
154
- self.idx_to_chunk={v:k for k,v in self.chunk_to_idx.items()}
155
- self.vocab_size=len(self.chunk_to_idx)
156
-
157
- def encode(self,text):
158
- text=text.lower()
159
- idxs=[]
160
- pos=0
161
- while pos<len(text):
162
- chunk=text[pos:pos+3] if pos+3<=len(text) else text[pos:]
163
- if chunk in self.chunk_to_idx:
164
- idxs.append(self.chunk_to_idx[chunk])
165
- pos+=len(chunk)
166
- else:
167
- idxs.append(self.unk_idx)
168
- pos+=1
169
- return idxs
170
-
171
- def decode(self,indices):
172
- return ''.join([self.idx_to_chunk.get(int(i),self.unk_token) for i in indices])
173
-
174
- # ============================================================
175
- # ===================== LOAD MODEL ===========================
176
- # ============================================================
177
-
178
- MODEL_NAME = "your-hf-username/i3-80m" # Replace with HF repo ID
179
- DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
180
-
181
- vocab_file = "chunk_vocab_combined.json"
182
- tokenizer = ChunkTokenizer(vocab_file)
183
- vocab_size = tokenizer.vocab_size
184
-
185
- model = i3Model(vocab_size=vocab_size)
186
- # load local safetensors or pytorch_model.bin if exists
187
- if os.path.exists("model.safetensors"):
188
- from safetensors.torch import load_file
189
- state_dict = load_file("model.safetensors")
190
- model.load_state_dict(state_dict)
191
- elif os.path.exists("pytorch_model.bin"):
192
- state_dict = torch.load("pytorch_model.bin", map_location=DEVICE)
193
- model.load_state_dict(state_dict)
194
- else:
195
- # download from HF
196
- url_bin = f"https://huggingface.co/{MODEL_NAME}/resolve/main/pytorch_model.bin"
197
- r = requests.get(url_bin)
198
- with open("pytorch_model.bin",'wb') as f:
199
- f.write(r.content)
200
- state_dict = torch.load("pytorch_model.bin", map_location=DEVICE)
201
- model.load_state_dict(state_dict)
202
-
203
- model.to(DEVICE)
204
  model.eval()
205
 
206
- # ============================================================
207
- # ===================== GRADIO UI ============================
208
- # ============================================================
209
-
210
- def generate_text(prompt, max_tokens, temperature, top_k):
211
- idx = torch.tensor([tokenizer.encode(prompt)],dtype=torch.long).to(DEVICE)
212
  out_idx = model.generate(idx, max_new_tokens=max_tokens, temperature=temperature, top_k=top_k)
213
- text = tokenizer.decode(out_idx[0].cpu())
214
- return text
215
 
 
216
  with gr.Blocks() as demo:
217
- gr.Markdown("### i3-80M Model Demo")
 
218
  with gr.Row():
219
- with gr.Column(scale=3):
220
- prompt = gr.Textbox(label="Prompt", lines=3)
221
- generate_btn = gr.Button("Generate")
222
- output = gr.Textbox(label="Generated Text", lines=10)
223
- with gr.Column(scale=1):
224
- gr.Markdown("#### Dev Panel")
225
- max_tokens = gr.Slider(10,512,value=100,step=1,label="Max Tokens")
226
- temperature = gr.Slider(0.1,2.0,value=0.8,step=0.05,label="Temperature")
227
- top_k = gr.Slider(1,100,value=40,step=1,label="Top-k")
228
- generate_btn.click(generate_text, inputs=[prompt,max_tokens,temperature,top_k], outputs=output)
229
-
230
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ from pathlib import Path
 
3
  import gradio as gr
4
  import json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+ # -------------------- DEVICE --------------------
8
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
+
10
+ # -------------------- MODEL CONFIG --------------------
11
+ MODEL_NAME = "FlameF0X/i3-80m" # HuggingFace repo name
12
+ LOCAL_SAFETENSORS = Path("model.safetensors")
13
+ LOCAL_BIN = Path("pytorch_model.bin")
14
+ VOCAB_JSON = Path("chunk_vocab_combined.json")
15
+
16
+ # -------------------- LOAD VOCAB --------------------
17
+ with open(VOCAB_JSON, 'r') as f:
18
+ vocab_data = json.load(f)
19
+ VOCAB_SIZE = vocab_data["vocab_size"]
20
+
21
+ # -------------------- IMPORT YOUR MODEL CLASS --------------------
22
+ # Make sure i3Model is in the same folder or installed as a package
23
+ from app_classes import i3Model, ChunkTokenizer
24
+
25
+ tokenizer = ChunkTokenizer()
26
+ tokenizer.load(VOCAB_JSON)
27
+
28
+ model = i3Model(
29
+ vocab_size=VOCAB_SIZE,
30
+ d_model=512,
31
+ n_heads=16,
32
+ max_seq_len=256,
33
+ d_state=32
34
+ ).to(DEVICE)
35
+
36
+ # -------------------- LOAD WEIGHTS --------------------
37
+ try:
38
+ if LOCAL_SAFETENSORS.exists():
39
+ from safetensors.torch import load_file
40
+ state_dict = load_file(LOCAL_SAFETENSORS)
41
+ model.load_state_dict(state_dict)
42
+ print("✅ Loaded weights from local safetensors")
43
+ elif LOCAL_BIN.exists():
44
+ state_dict = torch.load(LOCAL_BIN, map_location=DEVICE, weights_only=False)
45
+ model.load_state_dict(state_dict)
46
+ print("✅ Loaded weights from local .bin")
47
+ else:
48
+ # HuggingFace fallback
49
+ print("⚡ Downloading model from HuggingFace...")
50
+ bin_file = hf_hub_download(repo_id=MODEL_NAME, filename="pytorch_model.bin")
51
+ state_dict = torch.load(bin_file, map_location=DEVICE, weights_only=False)
52
+ model.load_state_dict(state_dict)
53
+ print("✅ Loaded weights from HuggingFace")
54
+ except Exception as e:
55
+ raise RuntimeError(f"Failed to load model weights: {e}")
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  model.eval()
58
 
59
+ # -------------------- GENERATION FUNCTION --------------------
60
+ def generate_text(prompt, max_tokens=100, temperature=0.8, top_k=40):
61
+ idx = torch.tensor([tokenizer.encode(prompt)], dtype=torch.long).to(DEVICE)
 
 
 
62
  out_idx = model.generate(idx, max_new_tokens=max_tokens, temperature=temperature, top_k=top_k)
63
+ return tokenizer.decode(out_idx[0].cpu())
 
64
 
65
+ # -------------------- GRADIO UI --------------------
66
  with gr.Blocks() as demo:
67
+ gr.Markdown("### i3-80M Text Generation")
68
+
69
  with gr.Row():
70
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Type something...")
71
+ max_tokens_input = gr.Slider(10, 500, value=100, step=10, label="Max Tokens")
72
+ temp_input = gr.Slider(0.1, 2.0, value=0.8, step=0.05, label="Temperature")
73
+ topk_input = gr.Slider(1, 100, value=40, step=1, label="Top-k Sampling")
74
+
75
+ output_text = gr.Textbox(label="Generated Text")
76
+
77
+ generate_btn = gr.Button("Generate")
78
+
79
+ # Connect UI
80
+ generate_btn.click(
81
+ generate_text,
82
+ inputs=[prompt_input, max_tokens_input, temp_input, topk_input],
83
+ outputs=[output_text]
84
+ )
85
+
86
+ # Developer Panel (shows model info)
87
+ with gr.Accordion("Dev Panel: Model Info", open=False):
88
+ gr.Markdown(f"**Device:** {DEVICE}")
89
+ gr.Markdown(f"**Vocab size:** {VOCAB_SIZE}")
90
+ total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
91
+ gr.Markdown(f"**Total Parameters:** {total_params:,} ({total_params/1e6:.2f}M)")
92
+
93
+ # -------------------- RUN --------------------
94
+ if __name__ == "__main__":
95
+ demo.launch()