Alic-Li commited on
Commit
4445e27
·
verified ·
1 Parent(s): ec0c4f0

Upload 6 files

Browse files
MiniMind2_tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|im_start|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
MiniMind2_tokenizer/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
MiniMind2_tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|im_start|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "<|im_end|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<|im_start|>",
33
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% else %}{{ '<|im_start|>system\\nYou are a helpful assistant<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "<|im_end|>",
36
+ "extra_special_tokens": {},
37
+ "legacy": true,
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "sp_model_kwargs": {},
41
+ "spaces_between_special_tokens": false,
42
+ "tokenizer_class": "PreTrainedTokenizer",
43
+ "unk_token": "<|endoftext|>"
44
+ }
app.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import types, torch, copy
2
+ from typing import List
3
+ torch._C._jit_set_autocast_mode(False)
4
+ import torch.nn as nn
5
+ from torch.nn import functional as F
6
+ from transformers import AutoTokenizer
7
+ import gradio as gr
8
+ MyModule = torch.jit.ScriptModule
9
+ MyFunction = torch.jit.script_method
10
+ MyStatic = torch.jit.script
11
+
12
+ ########################################################################################################
13
+
14
+ args = types.SimpleNamespace()
15
+ args.MODEL_NAME = "./sft-2048.pth"
16
+ args.n_layer = 8
17
+ args.n_embd = 512
18
+ args.vocab_size = 6400
19
+ args.head_size = 64
20
+ GEN_TEMP = 1.0
21
+ GEN_TOP_P = 0.3
22
+ GEN_alpha_presence = 0.5
23
+ GEN_alpha_frequency = 0.5
24
+ GEN_penalty_decay = 0.996
25
+ CHUNK_LEN = 16
26
+ DTYPE = torch.float32
27
+ HEAD_SIZE = args.head_size
28
+ STATE_NAME = None
29
+
30
+ ########################################################################################################
31
+
32
+ class RWKV_x070(MyModule):
33
+ def __init__(self, args):
34
+ super().__init__()
35
+ self.args = args
36
+ self.n_embd = args.n_embd
37
+ self.n_layer = args.n_layer
38
+ self.eval()
39
+
40
+ self.z = torch.load(args.MODEL_NAME, map_location='cuda')
41
+ z = self.z
42
+ self.n_head, self.head_size = z['blocks.0.att.r_k'].shape
43
+
44
+ keys = list(z.keys())
45
+ for k in keys:
46
+ if 'key.weight' in k or 'value.weight' in k or 'receptance.weight' in k or 'output.weight' in k or 'head.weight' in k:
47
+ z[k] = z[k].t()
48
+ z[k] = z[k].squeeze().to(dtype=DTYPE)
49
+ if k.endswith('att.r_k'): z[k] = z[k].flatten()
50
+ assert self.head_size == args.head_size
51
+
52
+ z['emb.weight'] = F.layer_norm(z['emb.weight'], (args.n_embd,), weight=z['blocks.0.ln0.weight'], bias=z['blocks.0.ln0.bias'])
53
+
54
+ for i in range(self.n_layer): # !!! merge emb residual !!!
55
+ z[f'blocks.{i}.ffn.s_emb.weight'] = z[f'blocks.{i}.ffn.s_emb.weight'] + z['emb.weight'] @ z[f'blocks.{i}.ffn.s_emb_x.weight'].t()
56
+
57
+ z['blocks.0.att.v0'] = z['blocks.0.att.a0'] # actually ignored
58
+ z['blocks.0.att.v1'] = z['blocks.0.att.a1'] # actually ignored
59
+ z['blocks.0.att.v2'] = z['blocks.0.att.a2'] # actually ignored
60
+
61
+ def forward(self, idx, state, full_output=False):
62
+ if state == None:
63
+ state = [None for _ in range(args.n_layer * 3)]
64
+ for i in range(args.n_layer): # state: 0=att_x_prev 1=att_kv 2=ffn_x_prev
65
+ state[i*3+0] = torch.zeros(args.n_embd, dtype=DTYPE, requires_grad=False, device="cuda")
66
+ state[i*3+1] = torch.zeros((args.n_embd // args.head_size, args.head_size, args.head_size), dtype=torch.float, requires_grad=False, device="cuda")
67
+ state[i*3+2] = torch.zeros(args.n_embd, dtype=DTYPE, requires_grad=False, device="cuda")
68
+
69
+ if type(idx) is list:
70
+ if len(idx) > 1:
71
+ return self.forward_seq(idx, state, full_output)
72
+ else:
73
+ return self.forward_one(idx[0], state)
74
+ else:
75
+ return self.forward_one(idx, state)
76
+
77
+ @MyFunction
78
+ def forward_one(self, idx:int, state:List[torch.Tensor]):
79
+ with torch.no_grad():
80
+ z = self.z
81
+ x = z['emb.weight'][idx]
82
+
83
+ v_first = torch.empty_like(x)
84
+ for i in range(self.n_layer):
85
+ bbb = f'blocks.{i}.'
86
+ att = f'blocks.{i}.att.'
87
+ ffn = f'blocks.{i}.ffn.'
88
+
89
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln1.weight'], bias=z[bbb+'ln1.bias'])
90
+
91
+ xx, state[i*3+0], state[i*3+1], v_first = RWKV_x070_TMix_one(i, self.n_head, self.head_size, xx, state[i*3+0], v_first, state[i*3+1],
92
+ z[att+'x_r'], z[att+'x_w'], z[att+'x_k'], z[att+'x_v'], z[att+'x_a'], z[att+'x_g'],
93
+ z[att+'w0'], z[att+'w1'], z[att+'w2'], z[att+'a0'], z[att+'a1'], z[att+'a2'], z[att+'v0'], z[att+'v1'], z[att+'v2'],
94
+ z[att+'g1'], z[att+'g2'], z[att+'k_k'], z[att+'k_a'], z[att+'r_k'],
95
+ z[att+'receptance.weight'], z[att+'key.weight'], z[att+'value.weight'], z[att+'output.weight'],
96
+ z[att+'ln_x.weight'], z[att+'ln_x.bias'])
97
+ x = x + xx
98
+
99
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln2.weight'], bias=z[bbb+'ln2.bias'])
100
+
101
+ xx, state[i*3+2] = RWKV_x070_CMix_one(xx, state[i*3+2], z[ffn+'x_k'], z[ffn+'key.weight'], z[ffn+'value.weight'], z[ffn+'s_emb.weight'][idx], z[ffn+'s1'], z[ffn+'s2'], z[ffn+'s0'])
102
+ x = x + xx
103
+
104
+ x = F.layer_norm(x, (self.n_embd,), weight=z['ln_out.weight'], bias=z['ln_out.bias'])
105
+ x = x @ z['head.weight']
106
+ return x, state
107
+
108
+ @MyFunction
109
+ def forward_seq(self, idx:List[int], state:List[torch.Tensor], full_output:bool=False):
110
+ with torch.no_grad():
111
+ z = self.z
112
+ x = z['emb.weight'][idx]
113
+
114
+ v_first = torch.empty_like(x)
115
+ for i in range(self.n_layer):
116
+ bbb = f'blocks.{i}.'
117
+ att = f'blocks.{i}.att.'
118
+ ffn = f'blocks.{i}.ffn.'
119
+
120
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln1.weight'], bias=z[bbb+'ln1.bias'])
121
+
122
+ xx, state[i*3+0], state[i*3+1], v_first = RWKV_x070_TMix_seq(i, self.n_head, self.head_size, xx, state[i*3+0], v_first, state[i*3+1],
123
+ z[att+'x_r'], z[att+'x_w'], z[att+'x_k'], z[att+'x_v'], z[att+'x_a'], z[att+'x_g'],
124
+ z[att+'w0'], z[att+'w1'], z[att+'w2'], z[att+'a0'], z[att+'a1'], z[att+'a2'], z[att+'v0'], z[att+'v1'], z[att+'v2'],
125
+ z[att+'g1'], z[att+'g2'], z[att+'k_k'], z[att+'k_a'], z[att+'r_k'],
126
+ z[att+'receptance.weight'], z[att+'key.weight'], z[att+'value.weight'], z[att+'output.weight'],
127
+ z[att+'ln_x.weight'], z[att+'ln_x.bias'])
128
+ x = x + xx
129
+
130
+ xx = F.layer_norm(x, (self.n_embd,), weight=z[bbb+'ln2.weight'], bias=z[bbb+'ln2.bias'])
131
+
132
+ xx, state[i*3+2] = RWKV_x070_CMix_seq(xx, state[i*3+2], z[ffn+'x_k'], z[ffn+'key.weight'], z[ffn+'value.weight'], z[ffn+'s_emb.weight'][idx], z[ffn+'s1'], z[ffn+'s2'], z[ffn+'s0'])
133
+ x = x + xx
134
+
135
+ if not full_output: x = x[-1,:]
136
+ x = F.layer_norm(x, (self.n_embd,), weight=z['ln_out.weight'], bias=z['ln_out.bias'])
137
+ x = x @ z['head.weight']
138
+ return x, state
139
+
140
+ ########################################################################################################
141
+
142
+ @MyStatic
143
+ def RWKV_x070_TMix_one(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
144
+ xx = x_prev - x
145
+ xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
146
+
147
+ r = xr @ R_
148
+ w = torch.tanh(xw @ w1) @ w2
149
+ k = xk @ K_
150
+ v = xv @ V_
151
+ a = torch.sigmoid(a0 + (xa @ a1) @ a2)
152
+ g = torch.sigmoid(xg @ g1) @ g2
153
+
154
+ kk = torch.nn.functional.normalize((k * k_k).view(H,N), dim=-1, p=2.0).view(H*N)
155
+ k = k * (1 + (a-1) * k_a)
156
+ if layer_id == 0: v_first = v
157
+ else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
158
+ w = torch.exp(-0.606531 * torch.sigmoid((w0 + w).float())) # 0.606531 = exp(-0.5)
159
+
160
+ vk = v.view(H,N,1) @ k.view(H,1,N)
161
+ ab = (-kk).view(H,N,1) @ (kk*a).view(H,1,N)
162
+ state = state * w.view(H,1,N) + state @ ab.float() + vk.float()
163
+ xx = (state.to(dtype=x.dtype) @ r.view(H,N,1))
164
+
165
+ xx = torch.nn.functional.group_norm(xx.view(1,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(H*N)
166
+ xx = xx + ((r * k * r_k).view(H,N).sum(dim=-1, keepdim=True) * v.view(H,N)).view(H*N)
167
+ return (xx * g) @ O_, x, state, v_first
168
+
169
+ @MyStatic
170
+ def RWKV_x070_TMix_seq(layer_id: int, H:int, N:int, x, x_prev, v_first, state, x_r, x_w, x_k, x_v, x_a, x_g, w0, w1, w2, a0, a1, a2, v0, v1, v2, g1, g2, k_k, k_a, r_k, R_, K_, V_, O_, ln_w, ln_b):
171
+ T = x.shape[0]
172
+ xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
173
+ xr, xw, xk, xv, xa, xg = x+xx*x_r, x+xx*x_w, x+xx*x_k, x+xx*x_v, x+xx*x_a, x+xx*x_g
174
+
175
+ r = xr @ R_
176
+ w = torch.tanh(xw @ w1) @ w2
177
+ k = xk @ K_
178
+ v = xv @ V_
179
+ a = torch.sigmoid(a0 + (xa @ a1) @ a2)
180
+ g = torch.sigmoid(xg @ g1) @ g2
181
+
182
+ kk = torch.nn.functional.normalize((k * k_k).view(T,H,N), dim=-1, p=2.0).view(T,H*N)
183
+ k = k * (1 + (a-1) * k_a)
184
+ if layer_id == 0: v_first = v
185
+ else: v = v + (v_first - v) * torch.sigmoid(v0 + (xv @ v1) @ v2)
186
+
187
+ ######## cuda-free method
188
+ w = torch.exp(-0.606531 * torch.sigmoid((w0 + w).float())) # 0.606531 = exp(-0.5)
189
+ for t in range(T):
190
+ r_, w_, k_, v_, kk_, a_ = r[t], w[t], k[t], v[t], kk[t], a[t]
191
+ vk = v_.view(H,N,1) @ k_.view(H,1,N)
192
+ ab = (-kk_).view(H,N,1) @ (kk_*a_).view(H,1,N)
193
+ state = state * w_.view(H,1,N) + state @ ab.float() + vk.float()
194
+ xx[t] = (state.to(dtype=x.dtype) @ r_.view(H,N,1)).view(H*N)
195
+
196
+ # w = -torch.nn.functional.softplus(-(w0 + w)) - 0.5
197
+ # xx = RWKV7_OP(state, r, w, k, v, -kk, kk*a)
198
+
199
+ xx = torch.nn.functional.group_norm(xx.view(T,H*N), num_groups=H, weight=ln_w, bias=ln_b, eps = 64e-5).view(T,H*N)
200
+ xx = xx + ((r * k * r_k).view(T,H,N).sum(dim=-1, keepdim=True) * v.view(T,H,N)).view(T,H*N)
201
+ return (xx * g) @ O_, x[-1,:], state, v_first
202
+
203
+ ########################################################################################################
204
+
205
+ @MyStatic
206
+ def RWKV_x070_CMix_one(x, x_prev, x_k, K_, V_, semb_, s1_, s2_, s0_):
207
+ xx = x_prev - x
208
+ k = x + xx * x_k
209
+ k = torch.relu(k @ K_) ** 2
210
+ ss = (x @ s1_) @ semb_.view(32,32)
211
+ k = k * ((ss @ s2_) + s0_)
212
+ return k @ V_, x
213
+
214
+ @MyStatic
215
+ def RWKV_x070_CMix_seq(x, x_prev, x_k, K_, V_, semb_, s1_, s2_, s0_):
216
+ T,C = x.shape
217
+ xx = torch.cat((x_prev.unsqueeze(0), x[:-1,:])) - x
218
+ k = x + xx * x_k
219
+ k = torch.relu(k @ K_) ** 2
220
+ ss = (x @ s1_).view(T,1,32) @ semb_.view(T,32,32)
221
+ k = k * ((ss.view(T,32) @ s2_) + s0_)
222
+ return k @ V_, x[-1,:]
223
+
224
+ @MyStatic
225
+ def sample_logits(logits, temperature:float=1.0, top_p:float=1.0, top_k:int=0):
226
+ probs = F.softmax(logits.float(), dim=-1)
227
+ sorted_probs, sorted_ids = torch.sort(probs, descending=True)
228
+
229
+ if top_k > 0:
230
+ probs[sorted_ids[top_k:]] = 0
231
+
232
+ if top_p < 1:
233
+ cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
234
+ cutoff_index = torch.searchsorted(cumulative_probs, top_p)
235
+ cutoff = sorted_probs[cutoff_index]
236
+ probs[probs < cutoff] = 0
237
+
238
+ if top_p > 0:
239
+ idx = torch.where(probs == cutoff)[0]
240
+ if len(idx) > 0:
241
+ probs[idx] = cutoff + (top_p - torch.sum(probs).item()) / len(idx)
242
+ # assert abs(torch.sum(probs).item() - top_p) < 1e-6
243
+
244
+ if temperature != 1.0:
245
+ probs = probs ** (1.0 / temperature)
246
+
247
+ return torch.multinomial(probs, num_samples=1).item()
248
+
249
+ tokenizer = AutoTokenizer.from_pretrained("./MiniMind2_tokenizer")
250
+
251
+ model_tokens = []
252
+ model_state = None
253
+ model = RWKV_x070(args)
254
+
255
+ # if STATE_NAME is not None:
256
+ # GEN_TOP_P = 0.2
257
+ # GEN_alpha_presence = 0.3
258
+ # GEN_alpha_frequency = 0.3
259
+
260
+ # args = model.args
261
+ # state_raw = torch.load(STATE_NAME + '.pth')
262
+ # state_init = [None for i in range(args.n_layer * 3)]
263
+ # for i in range(args.n_layer):
264
+ # dd = model.strategy[i]
265
+ # dev = dd.device
266
+ # atype = dd.atype
267
+ # state_init[i*3+0] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
268
+ # state_init[i*3+1] = state_raw[f'blocks.{i}.att.time_state'].transpose(1,2).to(dtype=torch.float, device=dev).requires_grad_(False).contiguous()
269
+ # state_init[i*3+2] = torch.zeros(args.n_embd, dtype=atype, requires_grad=False, device=dev).contiguous()
270
+ # model_state = copy.deepcopy(state_init)
271
+
272
+ def run_rnn(ctx, state):
273
+ ctx = ctx.replace("\r\n", "\n")
274
+ tokens = tokenizer.encode(ctx)
275
+ tokens = [int(x) for x in tokens]
276
+
277
+ current_state = copy.deepcopy(state) if state is not None else None
278
+
279
+ while len(tokens) > 0:
280
+ out, current_state = model.forward(tokens[:CHUNK_LEN], current_state)
281
+ tokens = tokens[CHUNK_LEN:]
282
+
283
+ return out, current_state
284
+
285
+ def generate_response(message, history, temperature=1.0, top_p=0.3):
286
+ global model_tokens, model_state
287
+ model_state = None
288
+
289
+ ctx = ""
290
+ for human, assistant in history:
291
+ ctx += f"<|im_start|>user\n{human}<|im_end|>\n<|im_start|>assistant\n{assistant}<!--eos--><|im_end|>\n"
292
+
293
+ ctx += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
294
+
295
+ out, model_state = run_rnn(ctx, model_state)
296
+
297
+ occurrence = {}
298
+ out_tokens = []
299
+ out_last = 0
300
+ response = ""
301
+
302
+ eos_token_id = tokenizer.eos_token_id
303
+ im_end_id = tokenizer.encode("<|im_end|>")[0]
304
+ for i in range(99999):
305
+ logits = out.clone()
306
+ for n in occurrence:
307
+ logits[n] -= GEN_alpha_presence + occurrence[n] * GEN_alpha_frequency
308
+
309
+ logits[0] -= 1e10
310
+
311
+ token = sample_logits(logits, temperature=temperature, top_p=top_p)
312
+
313
+ if token == im_end_id:
314
+ break
315
+
316
+ out, model_state = model.forward([token], model_state)
317
+
318
+ out_tokens += [token]
319
+ for xxx in occurrence:
320
+ occurrence[xxx] *= GEN_penalty_decay
321
+ occurrence[token] = 1 + (occurrence[token] if token in occurrence else 0)
322
+
323
+ tmp = tokenizer.decode(out_tokens[out_last:])
324
+ if "\ufffd" not in tmp:
325
+ response += tmp
326
+ cleaned_response = response.replace("<|im_end|>", "")
327
+ yield cleaned_response
328
+ out_last = i + 1
329
+
330
+ if token == eos_token_id:
331
+ break
332
+
333
+ def chat_with_bot(message, history, temperature, top_p):
334
+ response = ""
335
+ for partial_response in generate_response(message, history, temperature, top_p):
336
+ response = partial_response
337
+ yield response
338
+
339
+ with gr.Blocks(title="MiniRWKV_7 DE 34.2M 🪿 2vGPU Space") as demo:
340
+ gr.Markdown("# MiniRWKV_7 DE 34.2M 🪿 ")
341
+ gr.Markdown("### Only 34.2M Params!!! Use 2V CPU Backend to run this model. ")
342
+
343
+ with gr.Row():
344
+ with gr.Column(scale=3):
345
+ chatbot = gr.Chatbot(
346
+ label="对话记录",
347
+ height=1000,
348
+ )
349
+
350
+ with gr.Column(scale=1):
351
+ msg = gr.Textbox(
352
+ label="输入消息",
353
+ placeholder="请输入您的问题...",
354
+ lines=3
355
+ )
356
+
357
+ with gr.Row():
358
+ send_btn = gr.Button("发送", variant="primary")
359
+ clear_btn = gr.Button("清除历史")
360
+
361
+ gr.Markdown("### 参数调节")
362
+ temperature_slider = gr.Slider(
363
+ minimum=0.1,
364
+ maximum=2.0,
365
+ value=GEN_TEMP,
366
+ step=0.1,
367
+ label="Temperature"
368
+ )
369
+ top_p_slider = gr.Slider(
370
+ minimum=0.0,
371
+ maximum=2.0,
372
+ value=GEN_TOP_P,
373
+ step=0.05,
374
+ label="Top-P"
375
+ )
376
+
377
+
378
+ def respond(message, chat_history, temperature, top_p):
379
+ if not message:
380
+ return "", chat_history
381
+
382
+ chat_history.append((message, ""))
383
+
384
+ response = ""
385
+ for partial_response in chat_with_bot(message, chat_history[:-1], temperature, top_p):
386
+ response = partial_response
387
+ cleaned_response = response.replace("<|im_end|>", "")
388
+ chat_history[-1] = (message, cleaned_response)
389
+ yield "", chat_history
390
+
391
+ def clear_history():
392
+ global model_tokens, model_state
393
+ model_tokens = []
394
+ model_state = None
395
+ return []
396
+
397
+ msg.submit(respond, [msg, chatbot, temperature_slider, top_p_slider], [msg, chatbot])
398
+ send_btn.click(respond, [msg, chatbot, temperature_slider, top_p_slider], [msg, chatbot])
399
+ clear_btn.click(clear_history, None, chatbot)
400
+
401
+ if __name__ == "__main__":
402
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch --index-url https://download.pytorch.org/whl/cpu
2
+ transformers
sft-2048.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c1e27de7156cb3c965a88409932805950e8839c998251183c7380390dd302fb
3
+ size 182949245