Files changed (6) hide show
  1. .gitattributes +35 -1
  2. README.md +6 -6
  3. app.py +628 -116
  4. config.py +230 -0
  5. gitattributes +0 -36
  6. requirements.txt +2 -10
.gitattributes CHANGED
@@ -1 +1,35 @@
1
- assets/qwen.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: Close-SFG
3
- emoji: ๐Ÿ“Š
4
- colorFrom: red
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.27.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Close SFG
3
+ emoji: ๐Ÿƒ
4
+ colorFrom: green
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.39.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,124 +1,636 @@
1
- # server.py
2
- import torch
3
- import threading
4
  import time
5
- import numpy as np
6
- import re
7
  import json
8
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
9
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # === ะœะพะดะตะปัŒ ===
12
- model_name = "Qwen/Qwen2.5-0.5B-Instruct"
13
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
-
15
- model = AutoModelForCausalLM.from_pretrained(
16
- model_name,
17
- device_map="cpu",
18
- torch_dtype=torch.float16,
19
- load_in_4bit=True,
20
- bnb_4bit_quant_type="nf4",
21
- bnb_4bit_compute_dtype=torch.float16,
22
- )
23
-
24
- try:
25
- model = torch.compile(model, mode="reduce-overhead", fullgraph=True)
26
- print("โœ… torch.compile ะฐะบั‚ะธะฒะธั€ะพะฒะฐะฝ")
27
- except:
28
- pass
29
-
30
- # === Tools ===
31
- tools = [{
32
- "name": "get_weather",
33
- "parameters": {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]}
34
- }]
35
-
36
- def execute_tool_call(call):
37
- city = call.get("arguments", {}).get("city", "ะฝะตะธะทะฒะตัั‚ะตะฝ")
38
- return f"๐ŸŒค๏ธ ะŸะพะณะพะดะฐ ะฒ {city}: 22ยฐC, ัะพะปะฝะตั‡ะฝะพ. (ัะธะผัƒะปัั†ะธั)"
39
-
40
- # === NumPy-ะฟะฐั€ัะตั€ ===
41
- def find_and_replace_tool_calls_numpy(buffer):
42
- chars = np.array(list(buffer), dtype='U1')
43
- indices = np.where(chars == '<tool_call>')[0]
44
- if len(indices) < 2 or len(indices) % 2 != 0:
45
- return buffer, False
46
-
47
- new_buffer = buffer
48
- replaced = False
49
- for i in range(0, len(indices) - 1, 2):
50
- start, end = indices[i], indices[i + 1] + 2
51
- if end > len(buffer): continue
52
- block = buffer[start:end]
53
- content = buffer[start+2:end-2].strip()
54
  try:
55
- json_match = re.search(r'\{.*\}', content, re.DOTALL)
56
- if json_match:
57
- data = json.loads(json_match.group())
58
- result = execute_tool_call(data)
59
- new_buffer = new_buffer.replace(block, f"\n\nโœ… {result}\n\n")
60
- replaced = True
61
- except:
62
- pass
63
- return new_buffer, replaced
64
-
65
- # === ะ“ะตะฝะตั€ะฐั†ะธั ั "GPU-ัั„ั„ะตะบั‚ะพะผ" ===
66
- def generate_stream(prompt, max_new_tokens=128, temperature=0.7, top_p=0.9):
67
- messages = [{"role": "user", "content": prompt}]
68
- inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to(model.device)
69
-
70
- streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
71
- thread = threading.Thread(target=model.generate, kwargs={
72
- "input_ids": inputs,
73
- "max_new_tokens": max_new_tokens,
74
- "temperature": temperature,
75
- "top_p": top_p,
76
- "do_sample": True,
77
- "pad_token_id": tokenizer.pad_token_id,
78
- "eos_token_id": tokenizer.eos_token_id,
79
- "streamer": streamer,
80
- "use_cache": True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  })
82
- thread.start()
83
-
84
- buffer = ""
85
- full_text = ""
86
- last_yield = time.time()
87
-
88
- for token in streamer:
89
- buffer += token
90
- full_text += token
91
-
92
- # NumPy ะพะฑั€ะฐะฑะพั‚ะบะฐ
93
- if "<tool_call>" in buffer:
94
- processed, changed = find_and_replace_tool_calls_numpy(full_text)
95
- if changed:
96
- full_text = processed
97
- buffer = ""
98
- yield full_text
99
- continue
100
-
101
- now = time.time()
102
- if (len(buffer) >= 30 or
103
- any(p in buffer for p in ".!?;\n") or
104
- now - last_yield > 0.7):
105
- yield full_text
106
- buffer = ""
107
- last_yield = now
108
-
109
- if full_text:
110
- yield full_text
111
-
112
- # === Gradio ===
113
- with gr.Blocks() as demo:
114
- prompt = gr.Textbox(label="ะ’ะฒะพะด", placeholder="ะกะฟั€ะพัะธ ั‡ั‚ะพ-ะฝะธะฑัƒะดัŒ...")
115
- max_t = gr.Slider(64, 256, 128, step=32, label="Max Tokens")
116
- temp = gr.Slider(0.1, 1.5, 0.7, step=0.1, label="Temperature")
117
- top_p = gr.Slider(0.5, 1.0, 0.9, step=0.05, label="Top-p")
118
- btn = gr.Button("๐Ÿš€ GPU-ั€ะตะถะธะผ")
119
- output = gr.Textbox(label="ะžั‚ะฒะตั‚")
120
-
121
- btn.click(generate_stream, [prompt, max_t, temp, top_p], output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  if __name__ == "__main__":
124
- demo.launch()
 
 
1
+ import uuid
 
 
2
  import time
 
 
3
  import json
 
4
  import gradio as gr
5
+ import modelscope_studio.components.antd as antd
6
+ import modelscope_studio.components.antdx as antdx
7
+ import modelscope_studio.components.base as ms
8
+ import modelscope_studio.components.pro as pro
9
+ import dashscope
10
+ from config import DEFAULT_LOCALE, DEFAULT_SETTINGS, DEFAULT_THEME, DEFAULT_SUGGESTIONS, save_history, get_text, user_config, bot_config, welcome_config, api_key, MODEL_OPTIONS_MAP
11
+ from ui_components.logo import Logo
12
+ from ui_components.settings_header import SettingsHeader
13
+ from ui_components.thinking_button import ThinkingButton
14
+ from dashscope import Generation
15
+
16
+ dashscope.api_key = api_key
17
+
18
+
19
+ def format_history(history, sys_prompt):
20
+ # messages = [{
21
+ # "role": "system",
22
+ # "content": sys_prompt,
23
+ # }]
24
+ messages = []
25
+ for item in history:
26
+ if item["role"] == "user":
27
+ messages.append({"role": "user", "content": item["content"]})
28
+ elif item["role"] == "assistant":
29
+ contents = [{
30
+ "type": "text",
31
+ "text": content["content"]
32
+ } for content in item["content"] if content["type"] == "text"]
33
+ messages.append({
34
+ "role":
35
+ "assistant",
36
+ "content":
37
+ contents[0]["text"] if len(contents) > 0 else ""
38
+ })
39
+ return messages
40
+
41
+
42
+ class Gradio_Events:
43
+
44
+ @staticmethod
45
+ def submit(state_value):
46
+
47
+ history = state_value["conversation_contexts"][
48
+ state_value["conversation_id"]]["history"]
49
+ settings = state_value["conversation_contexts"][
50
+ state_value["conversation_id"]]["settings"]
51
+ enable_thinking = state_value["conversation_contexts"][
52
+ state_value["conversation_id"]]["enable_thinking"]
53
+ model = settings.get("model")
54
+ messages = format_history(history,
55
+ sys_prompt=settings.get("sys_prompt", ""))
56
+
57
+ history.append({
58
+ "role":
59
+ "assistant",
60
+ "content": [],
61
+ "key":
62
+ str(uuid.uuid4()),
63
+ "header":
64
+ MODEL_OPTIONS_MAP.get(model, {}).get("label", None),
65
+ "loading":
66
+ True,
67
+ "status":
68
+ "pending"
69
+ })
70
+
71
+ yield {
72
+ chatbot: gr.update(value=history),
73
+ state: gr.update(value=state_value),
74
+ }
75
+ try:
76
+ response = Generation.call(
77
+ model=model,
78
+ messages=messages,
79
+ stream=True,
80
+ result_format='message',
81
+ incremental_output=True,
82
+ enable_thinking=enable_thinking,
83
+ thinking_budget=settings.get("thinking_budget", 1) * 1024)
84
+ start_time = time.time()
85
+ reasoning_content = ""
86
+ answer_content = ""
87
+ is_thinking = False
88
+ is_answering = False
89
+ contents = [None, None]
90
+ for chunk in response:
91
+ if (not chunk.output.choices[0].message.get("content")
92
+ and not chunk.output.choices[0].message.get(
93
+ "reasoning_content")):
94
+ pass
95
+ else:
96
+ delta = chunk.output.choices[0].message
97
+ if hasattr(
98
+ delta,
99
+ 'reasoning_content') and delta.reasoning_content:
100
+ if not is_thinking:
101
+ contents[0] = {
102
+ "type": "tool",
103
+ "content": "",
104
+ "options": {
105
+ "title": get_text("Thinking...", "ๆ€่€ƒไธญ..."),
106
+ "status": "pending"
107
+ },
108
+ "copyable": False,
109
+ "editable": False
110
+ }
111
+ is_thinking = True
112
+ reasoning_content += delta.reasoning_content
113
+ if hasattr(delta, 'content') and delta.content:
114
+ if not is_answering:
115
+ thought_cost_time = "{:.2f}".format(time.time() -
116
+ start_time)
117
+ if contents[0]:
118
+ contents[0]["options"]["title"] = get_text(
119
+ f"End of Thought ({thought_cost_time}s)",
120
+ f"ๅทฒๆทฑๅบฆๆ€่€ƒ (็”จๆ—ถ{thought_cost_time}s)")
121
+ contents[0]["options"]["status"] = "done"
122
+ contents[1] = {
123
+ "type": "text",
124
+ "content": "",
125
+ }
126
+
127
+ is_answering = True
128
+ answer_content += delta.content
129
+
130
+ if contents[0]:
131
+ contents[0]["content"] = reasoning_content
132
+ if contents[1]:
133
+ contents[1]["content"] = answer_content
134
+ history[-1]["content"] = [
135
+ content for content in contents if content
136
+ ]
137
+
138
+ history[-1]["loading"] = False
139
+ yield {
140
+ chatbot: gr.update(value=history),
141
+ state: gr.update(value=state_value)
142
+ }
143
+ print("model: ", model, "-", "reasoning_content: ",
144
+ reasoning_content, "\n", "content: ", answer_content)
145
+ history[-1]["status"] = "done"
146
+ cost_time = "{:.2f}".format(time.time() - start_time)
147
+ history[-1]["footer"] = get_text(f"{cost_time}s",
148
+ f"็”จๆ—ถ{cost_time}s")
149
+ yield {
150
+ chatbot: gr.update(value=history),
151
+ state: gr.update(value=state_value),
152
+ }
153
+ except Exception as e:
154
+ print("model: ", model, "-", "Error: ", e)
155
+ history[-1]["loading"] = False
156
+ history[-1]["status"] = "done"
157
+ history[-1]["content"] += [{
158
+ "type":
159
+ "text",
160
+ "content":
161
+ f'<span style="color: var(--color-red-500)">{str(e)}</span>'
162
+ }]
163
+ yield {
164
+ chatbot: gr.update(value=history),
165
+ state: gr.update(value=state_value)
166
+ }
167
+ raise e
168
+
169
+ @staticmethod
170
+ def add_message(input_value, settings_form_value, thinking_btn_state_value,
171
+ state_value):
172
+ if not state_value["conversation_id"]:
173
+ random_id = str(uuid.uuid4())
174
+ history = []
175
+ state_value["conversation_id"] = random_id
176
+ state_value["conversation_contexts"][
177
+ state_value["conversation_id"]] = {
178
+ "history": history
179
+ }
180
+ state_value["conversations"].append({
181
+ "label": input_value,
182
+ "key": random_id
183
+ })
184
+
185
+ history = state_value["conversation_contexts"][
186
+ state_value["conversation_id"]]["history"]
187
+
188
+ state_value["conversation_contexts"][
189
+ state_value["conversation_id"]] = {
190
+ "history": history,
191
+ "settings": settings_form_value,
192
+ "enable_thinking": thinking_btn_state_value["enable_thinking"]
193
+ }
194
+ history.append({
195
+ "role": "user",
196
+ "content": input_value,
197
+ "key": str(uuid.uuid4())
198
+ })
199
+ yield Gradio_Events.preprocess_submit(clear_input=True)(state_value)
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  try:
202
+ for chunk in Gradio_Events.submit(state_value):
203
+ yield chunk
204
+ except Exception as e:
205
+ raise e
206
+ finally:
207
+ yield Gradio_Events.postprocess_submit(state_value)
208
+
209
+ @staticmethod
210
+ def preprocess_submit(clear_input=True):
211
+
212
+ def preprocess_submit_handler(state_value):
213
+ history = state_value["conversation_contexts"][
214
+ state_value["conversation_id"]]["history"]
215
+ return {
216
+ **({
217
+ input:
218
+ gr.update(value=None, loading=True) if clear_input else gr.update(loading=True),
219
+ } if clear_input else {}),
220
+ conversations:
221
+ gr.update(active_key=state_value["conversation_id"],
222
+ items=list(
223
+ map(
224
+ lambda item: {
225
+ **item,
226
+ "disabled":
227
+ True if item["key"] != state_value[
228
+ "conversation_id"] else False,
229
+ }, state_value["conversations"]))),
230
+ add_conversation_btn:
231
+ gr.update(disabled=True),
232
+ clear_btn:
233
+ gr.update(disabled=True),
234
+ conversation_delete_menu_item:
235
+ gr.update(disabled=True),
236
+ chatbot:
237
+ gr.update(value=history,
238
+ bot_config=bot_config(
239
+ disabled_actions=['edit', 'retry', 'delete']),
240
+ user_config=user_config(
241
+ disabled_actions=['edit', 'delete'])),
242
+ state:
243
+ gr.update(value=state_value),
244
+ }
245
+
246
+ return preprocess_submit_handler
247
+
248
+ @staticmethod
249
+ def postprocess_submit(state_value):
250
+ history = state_value["conversation_contexts"][
251
+ state_value["conversation_id"]]["history"]
252
+ return {
253
+ input:
254
+ gr.update(loading=False),
255
+ conversation_delete_menu_item:
256
+ gr.update(disabled=False),
257
+ clear_btn:
258
+ gr.update(disabled=False),
259
+ conversations:
260
+ gr.update(items=state_value["conversations"]),
261
+ add_conversation_btn:
262
+ gr.update(disabled=False),
263
+ chatbot:
264
+ gr.update(value=history,
265
+ bot_config=bot_config(),
266
+ user_config=user_config()),
267
+ state:
268
+ gr.update(value=state_value),
269
+ }
270
+
271
+ @staticmethod
272
+ def cancel(state_value):
273
+ history = state_value["conversation_contexts"][
274
+ state_value["conversation_id"]]["history"]
275
+ history[-1]["loading"] = False
276
+ history[-1]["status"] = "done"
277
+ history[-1]["footer"] = get_text("Chat completion paused", "ๅฏน่ฏๅทฒๆš‚ๅœ")
278
+ return Gradio_Events.postprocess_submit(state_value)
279
+
280
+ @staticmethod
281
+ def delete_message(state_value, e: gr.EventData):
282
+ index = e._data["payload"][0]["index"]
283
+ history = state_value["conversation_contexts"][
284
+ state_value["conversation_id"]]["history"]
285
+ history = history[:index] + history[index + 1:]
286
+
287
+ state_value["conversation_contexts"][
288
+ state_value["conversation_id"]]["history"] = history
289
+
290
+ return gr.update(value=state_value)
291
+
292
+ @staticmethod
293
+ def edit_message(state_value, chatbot_value, e: gr.EventData):
294
+ index = e._data["payload"][0]["index"]
295
+ history = state_value["conversation_contexts"][
296
+ state_value["conversation_id"]]["history"]
297
+ history[index]["content"] = chatbot_value[index]["content"]
298
+ return gr.update(value=state_value)
299
+
300
+ @staticmethod
301
+ def regenerate_message(settings_form_value, thinking_btn_state_value,
302
+ state_value, e: gr.EventData):
303
+ index = e._data["payload"][0]["index"]
304
+ history = state_value["conversation_contexts"][
305
+ state_value["conversation_id"]]["history"]
306
+ history = history[:index]
307
+
308
+ state_value["conversation_contexts"][
309
+ state_value["conversation_id"]] = {
310
+ "history": history,
311
+ "settings": settings_form_value,
312
+ "enable_thinking": thinking_btn_state_value["enable_thinking"]
313
+ }
314
+
315
+ yield Gradio_Events.preprocess_submit()(state_value)
316
+ try:
317
+ for chunk in Gradio_Events.submit(state_value):
318
+ yield chunk
319
+ except Exception as e:
320
+ raise e
321
+ finally:
322
+ yield Gradio_Events.postprocess_submit(state_value)
323
+
324
+ @staticmethod
325
+ def select_suggestion(input_value, e: gr.EventData):
326
+ input_value = input_value[:-1] + e._data["payload"][0]
327
+ return gr.update(value=input_value)
328
+
329
+ @staticmethod
330
+ def apply_prompt(e: gr.EventData):
331
+ return gr.update(value=e._data["payload"][0]["value"]["description"])
332
+
333
+ @staticmethod
334
+ def new_chat(thinking_btn_state, state_value):
335
+ if not state_value["conversation_id"]:
336
+ return gr.skip()
337
+ state_value["conversation_id"] = ""
338
+ thinking_btn_state["enable_thinking"] = True
339
+ return gr.update(active_key=state_value["conversation_id"]), gr.update(
340
+ value=None), gr.update(value=DEFAULT_SETTINGS), gr.update(
341
+ value=thinking_btn_state), gr.update(value=state_value)
342
+
343
+ @staticmethod
344
+ def select_conversation(thinking_btn_state_value, state_value,
345
+ e: gr.EventData):
346
+ active_key = e._data["payload"][0]
347
+ if state_value["conversation_id"] == active_key or (
348
+ active_key not in state_value["conversation_contexts"]):
349
+ return gr.skip()
350
+ state_value["conversation_id"] = active_key
351
+ thinking_btn_state_value["enable_thinking"] = state_value[
352
+ "conversation_contexts"][active_key]["enable_thinking"]
353
+ return gr.update(active_key=active_key), gr.update(
354
+ value=state_value["conversation_contexts"][active_key]["history"]
355
+ ), gr.update(value=state_value["conversation_contexts"][active_key]
356
+ ["settings"]), gr.update(
357
+ value=thinking_btn_state_value), gr.update(
358
+ value=state_value)
359
+
360
+ @staticmethod
361
+ def click_conversation_menu(state_value, e: gr.EventData):
362
+ conversation_id = e._data["payload"][0]["key"]
363
+ operation = e._data["payload"][1]["key"]
364
+ if operation == "delete":
365
+ del state_value["conversation_contexts"][conversation_id]
366
+
367
+ state_value["conversations"] = [
368
+ item for item in state_value["conversations"]
369
+ if item["key"] != conversation_id
370
+ ]
371
+
372
+ if state_value["conversation_id"] == conversation_id:
373
+ state_value["conversation_id"] = ""
374
+ return gr.update(
375
+ items=state_value["conversations"],
376
+ active_key=state_value["conversation_id"]), gr.update(
377
+ value=None), gr.update(value=state_value)
378
+ else:
379
+ return gr.update(
380
+ items=state_value["conversations"]), gr.skip(), gr.update(
381
+ value=state_value)
382
+ return gr.skip()
383
+
384
+ @staticmethod
385
+ def toggle_settings_header(settings_header_state_value):
386
+ settings_header_state_value[
387
+ "open"] = not settings_header_state_value["open"]
388
+ return gr.update(value=settings_header_state_value)
389
+
390
+ @staticmethod
391
+ def clear_conversation_history(state_value):
392
+ if not state_value["conversation_id"]:
393
+ return gr.skip()
394
+ state_value["conversation_contexts"][
395
+ state_value["conversation_id"]]["history"] = []
396
+ return gr.update(value=None), gr.update(value=state_value)
397
+
398
+ @staticmethod
399
+ def update_browser_state(state_value):
400
+
401
+ return gr.update(value=dict(
402
+ conversations=state_value["conversations"],
403
+ conversation_contexts=state_value["conversation_contexts"]))
404
+
405
+ @staticmethod
406
+ def apply_browser_state(browser_state_value, state_value):
407
+ state_value["conversations"] = browser_state_value["conversations"]
408
+ state_value["conversation_contexts"] = browser_state_value[
409
+ "conversation_contexts"]
410
+ return gr.update(
411
+ items=browser_state_value["conversations"]), gr.update(
412
+ value=state_value)
413
+
414
+
415
+ css = """
416
+ .gradio-container {
417
+ padding: 0 !important;
418
+ }
419
+ .gradio-container > main.fillable {
420
+ padding: 0 !important;
421
+ }
422
+ #chatbot {
423
+ height: calc(100vh - 21px - 16px);
424
+ max-height: 1500px;
425
+ }
426
+ #chatbot .chatbot-conversations {
427
+ height: 100vh;
428
+ background-color: var(--ms-gr-ant-color-bg-layout);
429
+ padding-left: 4px;
430
+ padding-right: 4px;
431
+ }
432
+ #chatbot .chatbot-conversations .chatbot-conversations-list {
433
+ padding-left: 0;
434
+ padding-right: 0;
435
+ }
436
+ #chatbot .chatbot-chat {
437
+ padding: 32px;
438
+ padding-bottom: 0;
439
+ height: 100%;
440
+ }
441
+ @media (max-width: 768px) {
442
+ #chatbot .chatbot-chat {
443
+ padding: 0;
444
+ }
445
+ }
446
+ #chatbot .chatbot-chat .chatbot-chat-messages {
447
+ flex: 1;
448
+ }
449
+ #chatbot .setting-form-thinking-budget .ms-gr-ant-form-item-control-input-content {
450
+ display: flex;
451
+ flex-wrap: wrap;
452
+ }
453
+ """
454
+
455
+ model_options_map_json = json.dumps(MODEL_OPTIONS_MAP)
456
+ js = "function init() { window.MODEL_OPTIONS_MAP=" + model_options_map_json + "}"
457
+
458
+ with gr.Blocks(css=css, js=js, fill_width=True) as demo:
459
+ state = gr.State({
460
+ "conversation_contexts": {},
461
+ "conversations": [],
462
+ "conversation_id": "",
463
  })
464
+
465
+ with ms.Application(), antdx.XProvider(
466
+ theme=DEFAULT_THEME, locale=DEFAULT_LOCALE), ms.AutoLoading():
467
+ with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"):
468
+ # Left Column
469
+ with antd.Col(md=dict(flex="0 0 260px", span=24, order=0),
470
+ span=0,
471
+ elem_style=dict(width=0),
472
+ order=1):
473
+ with ms.Div(elem_classes="chatbot-conversations"):
474
+ with antd.Flex(vertical=True,
475
+ gap="small",
476
+ elem_style=dict(height="100%")):
477
+ # Logo
478
+ Logo()
479
+
480
+ # New Conversation Button
481
+ with antd.Button(value=None,
482
+ color="primary",
483
+ variant="filled",
484
+ block=True) as add_conversation_btn:
485
+ ms.Text(get_text("New Conversation", "ๆ–ฐๅปบๅฏน่ฏ"))
486
+ with ms.Slot("icon"):
487
+ antd.Icon("PlusOutlined")
488
+
489
+ # Conversations List
490
+ with antdx.Conversations(
491
+ elem_classes="chatbot-conversations-list",
492
+ ) as conversations:
493
+ with ms.Slot('menu.items'):
494
+ with antd.Menu.Item(
495
+ label="Delete", key="delete",
496
+ danger=True
497
+ ) as conversation_delete_menu_item:
498
+ with ms.Slot("icon"):
499
+ antd.Icon("DeleteOutlined")
500
+ # Right Column
501
+ with antd.Col(flex=1, elem_style=dict(height="100%")):
502
+ with antd.Flex(vertical=True,
503
+ gap="small",
504
+ elem_classes="chatbot-chat"):
505
+ # Chatbot
506
+ chatbot = pro.Chatbot(elem_classes="chatbot-chat-messages",
507
+ height=0,
508
+ welcome_config=welcome_config(),
509
+ user_config=user_config(),
510
+ bot_config=bot_config())
511
+
512
+ # Input
513
+ with antdx.Suggestion(
514
+ items=DEFAULT_SUGGESTIONS,
515
+ # onKeyDown Handler in Javascript
516
+ should_trigger="""(e, { onTrigger, onKeyDown }) => {
517
+ switch(e.key) {
518
+ case '/':
519
+ onTrigger()
520
+ break
521
+ case 'ArrowRight':
522
+ case 'ArrowLeft':
523
+ case 'ArrowUp':
524
+ case 'ArrowDown':
525
+ break;
526
+ default:
527
+ onTrigger(false)
528
+ }
529
+ onKeyDown(e)
530
+ }""") as suggestion:
531
+ with ms.Slot("children"):
532
+ with antdx.Sender(placeholder=get_text(
533
+ "Enter \"/\" to get suggestions",
534
+ "่พ“ๅ…ฅ \"/\" ่Žทๅ–ๆ็คบ"), ) as input:
535
+ with ms.Slot("header"):
536
+ settings_header_state, settings_form = SettingsHeader(
537
+ )
538
+ with ms.Slot("prefix"):
539
+ with antd.Flex(
540
+ gap=4,
541
+ wrap=True,
542
+ elem_style=dict(maxWidth='40vw')):
543
+ with antd.Button(
544
+ value=None,
545
+ type="text") as setting_btn:
546
+ with ms.Slot("icon"):
547
+ antd.Icon("SettingOutlined")
548
+ with antd.Button(
549
+ value=None,
550
+ type="text") as clear_btn:
551
+ with ms.Slot("icon"):
552
+ antd.Icon("ClearOutlined")
553
+ thinking_btn_state = ThinkingButton()
554
+
555
+ # Events Handler
556
+ # Browser State Handler
557
+ if save_history:
558
+ browser_state = gr.BrowserState(
559
+ {
560
+ "conversation_contexts": {},
561
+ "conversations": [],
562
+ },
563
+ storage_key="qwen3_chat_demo_storage")
564
+ state.change(fn=Gradio_Events.update_browser_state,
565
+ inputs=[state],
566
+ outputs=[browser_state])
567
+
568
+ demo.load(fn=Gradio_Events.apply_browser_state,
569
+ inputs=[browser_state, state],
570
+ outputs=[conversations, state])
571
+
572
+ # Conversations Handler
573
+ add_conversation_btn.click(fn=Gradio_Events.new_chat,
574
+ inputs=[thinking_btn_state, state],
575
+ outputs=[
576
+ conversations, chatbot, settings_form,
577
+ thinking_btn_state, state
578
+ ])
579
+ conversations.active_change(fn=Gradio_Events.select_conversation,
580
+ inputs=[thinking_btn_state, state],
581
+ outputs=[
582
+ conversations, chatbot, settings_form,
583
+ thinking_btn_state, state
584
+ ])
585
+ conversations.menu_click(fn=Gradio_Events.click_conversation_menu,
586
+ inputs=[state],
587
+ outputs=[conversations, chatbot, state])
588
+ # Chatbot Handler
589
+ chatbot.welcome_prompt_select(fn=Gradio_Events.apply_prompt,
590
+ outputs=[input])
591
+
592
+ chatbot.delete(fn=Gradio_Events.delete_message,
593
+ inputs=[state],
594
+ outputs=[state])
595
+ chatbot.edit(fn=Gradio_Events.edit_message,
596
+ inputs=[state, chatbot],
597
+ outputs=[state])
598
+
599
+ regenerating_event = chatbot.retry(
600
+ fn=Gradio_Events.regenerate_message,
601
+ inputs=[settings_form, thinking_btn_state, state],
602
+ outputs=[
603
+ input, clear_btn, conversation_delete_menu_item,
604
+ add_conversation_btn, conversations, chatbot, state
605
+ ])
606
+
607
+ # Input Handler
608
+ submit_event = input.submit(
609
+ fn=Gradio_Events.add_message,
610
+ inputs=[input, settings_form, thinking_btn_state, state],
611
+ outputs=[
612
+ input, clear_btn, conversation_delete_menu_item,
613
+ add_conversation_btn, conversations, chatbot, state
614
+ ])
615
+ input.cancel(fn=Gradio_Events.cancel,
616
+ inputs=[state],
617
+ outputs=[
618
+ input, conversation_delete_menu_item, clear_btn,
619
+ conversations, add_conversation_btn, chatbot, state
620
+ ],
621
+ cancels=[submit_event, regenerating_event],
622
+ queue=False)
623
+ # Input Actions Handler
624
+ setting_btn.click(fn=Gradio_Events.toggle_settings_header,
625
+ inputs=[settings_header_state],
626
+ outputs=[settings_header_state])
627
+ clear_btn.click(fn=Gradio_Events.clear_conversation_history,
628
+ inputs=[state],
629
+ outputs=[chatbot, state])
630
+ suggestion.select(fn=Gradio_Events.select_suggestion,
631
+ inputs=[input],
632
+ outputs=[input])
633
 
634
  if __name__ == "__main__":
635
+ demo.queue(default_concurrency_limit=100,
636
+ max_size=100).launch(ssr_mode=False, max_threads=100)
config.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from modelscope_studio.components.pro.chatbot import ChatbotActionConfig, ChatbotBotConfig, ChatbotUserConfig, ChatbotWelcomeConfig
3
+
4
+ # Env
5
+ is_cn = os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio'
6
+ api_key = os.getenv('API_KEY')
7
+
8
+
9
+ def get_text(text: str, cn_text: str):
10
+ if is_cn:
11
+ return cn_text
12
+ return text
13
+
14
+
15
+ # Save history in browser
16
+ save_history = True
17
+
18
+
19
+ # Chatbot Config
20
+ def user_config(disabled_actions=None):
21
+ return ChatbotUserConfig(
22
+ class_names=dict(content="user-message-content"),
23
+ actions=[
24
+ "copy", "edit",
25
+ ChatbotActionConfig(
26
+ action="delete",
27
+ popconfirm=dict(title=get_text("Delete the message", "ๅˆ ้™คๆถˆๆฏ"),
28
+ description=get_text(
29
+ "Are you sure to delete this message?",
30
+ "็กฎ่ฎคๅˆ ้™ค่ฏฅๆถˆๆฏ๏ผŸ"),
31
+ okButtonProps=dict(danger=True)))
32
+ ],
33
+ disabled_actions=disabled_actions)
34
+
35
+
36
+ def bot_config(disabled_actions=None):
37
+ return ChatbotBotConfig(actions=[
38
+ "copy", "edit",
39
+ ChatbotActionConfig(
40
+ action="retry",
41
+ popconfirm=dict(
42
+ title=get_text("Regenerate the message", "้‡ๆ–ฐ็”Ÿๆˆๆถˆๆฏ"),
43
+ description=get_text(
44
+ "Regenerate the message will also delete all subsequent messages.",
45
+ "้‡ๆ–ฐ็”Ÿๆˆๆถˆๆฏไผšๅˆ ้™คๆ‰€ๆœ‰ๅŽ็ปญๆถˆๆฏใ€‚"),
46
+ okButtonProps=dict(danger=True))),
47
+ ChatbotActionConfig(action="delete",
48
+ popconfirm=dict(
49
+ title=get_text("Delete the message", "ๅˆ ้™คๆถˆๆฏ"),
50
+ description=get_text(
51
+ "Are you sure to delete this message?",
52
+ "็กฎ่ฎคๅˆ ้™ค่ฏฅๆถˆๆฏ๏ผŸ"),
53
+ okButtonProps=dict(danger=True)))
54
+ ],
55
+ avatar="./assets/qwen.png",
56
+ disabled_actions=disabled_actions)
57
+
58
+
59
+ def welcome_config():
60
+ return ChatbotWelcomeConfig(
61
+ variant="borderless",
62
+ icon="./assets/qwen.png",
63
+ title=get_text("Hello, I'm Qwen3", "ไฝ ๅฅฝ๏ผŒๆˆ‘ๆ˜ฏ Qwen3"),
64
+ description=get_text("Select a model and enter text to get started.",
65
+ "้€‰ๆ‹ฉๆจกๅž‹ๅนถ่พ“ๅ…ฅๆ–‡ๆœฌ๏ผŒๅผ€ๅง‹ๅฏน่ฏๅงใ€‚"),
66
+ prompts=dict(
67
+ title=get_text("How can I help you today?", "ๆœ‰ไป€ไนˆๆˆ‘่ƒฝๅธฎๅŠฉไฝ ็š„ๅ—?"),
68
+ styles={
69
+ "list": {
70
+ "width": '100%',
71
+ },
72
+ "item": {
73
+ "flex": 1,
74
+ },
75
+ },
76
+ items=[{
77
+ "label":
78
+ get_text("๐Ÿ“… Make a plan", "๐Ÿ“… ๅˆถๅฎš่ฎกๅˆ’"),
79
+ "children": [{
80
+ "description":
81
+ get_text("Help me with a plan to start a business",
82
+ "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๅˆ›ไธš่ฎกๅˆ’")
83
+ }, {
84
+ "description":
85
+ get_text("Help me with a plan to achieve my goals",
86
+ "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๅฎž็Žฐ็›ฎๆ ‡็š„่ฎกๅˆ’")
87
+ }, {
88
+ "description":
89
+ get_text("Help me with a plan for a successful interview",
90
+ "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๆˆๅŠŸ็š„้ข่ฏ•่ฎกๅˆ’")
91
+ }]
92
+ }, {
93
+ "label":
94
+ get_text("๐Ÿ–‹ Help me write", "๐Ÿ–‹ ๅธฎๆˆ‘ๅ†™"),
95
+ "children": [{
96
+ "description":
97
+ get_text("Help me write a story with a twist ending",
98
+ "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€ไธชๅธฆๆœ‰ๆ„ๅค–็ป“ๅฑ€็š„ๆ•…ไบ‹")
99
+ }, {
100
+ "description":
101
+ get_text("Help me write a blog post on mental health",
102
+ "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€็ฏ‡ๅ…ณไบŽๅฟƒ็†ๅฅๅบท็š„ๅšๅฎขๆ–‡็ซ ")
103
+ }, {
104
+ "description":
105
+ get_text("Help me write a letter to my future self",
106
+ "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€ๅฐ็ป™ๆœชๆฅ่‡ชๅทฑ็š„ไฟก")
107
+ }]
108
+ }]),
109
+ )
110
+
111
+
112
+ DEFAULT_SUGGESTIONS = [{
113
+ "label":
114
+ get_text('Make a plan', 'ๅˆถๅฎš่ฎกๅˆ’'),
115
+ "value":
116
+ get_text('Make a plan', 'ๅˆถๅฎš่ฎกๅˆ’'),
117
+ "children": [{
118
+ "label":
119
+ get_text("Start a business", "ๅผ€ๅง‹ๅˆ›ไธš"),
120
+ "value":
121
+ get_text("Help me with a plan to start a business", "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๅˆ›ไธš่ฎกๅˆ’")
122
+ }, {
123
+ "label":
124
+ get_text("Achieve my goals", "ๅฎž็Žฐๆˆ‘็š„็›ฎๆ ‡"),
125
+ "value":
126
+ get_text("Help me with a plan to achieve my goals", "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๅฎž็Žฐ็›ฎๆ ‡็š„่ฎกๅˆ’")
127
+ }, {
128
+ "label":
129
+ get_text("Successful interview", "ๆˆๅŠŸ็š„้ข่ฏ•"),
130
+ "value":
131
+ get_text("Help me with a plan for a successful interview",
132
+ "ๅธฎๅŠฉๆˆ‘ๅˆถๅฎšไธ€ไธชๆˆๅŠŸ็š„้ข่ฏ•่ฎกๅˆ’")
133
+ }]
134
+ }, {
135
+ "label":
136
+ get_text('Help me write', 'ๅธฎๆˆ‘ๅ†™'),
137
+ "value":
138
+ get_text("Help me write", 'ๅธฎๆˆ‘ๅ†™'),
139
+ "children": [{
140
+ "label":
141
+ get_text("Story with a twist ending", "ๅธฆๆœ‰ๆ„ๅค–็ป“ๅฑ€็š„ๆ•…ไบ‹"),
142
+ "value":
143
+ get_text("Help me write a story with a twist ending",
144
+ "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€ไธชๅธฆๆœ‰ๆ„ๅค–็ป“ๅฑ€็š„ๆ•…ไบ‹")
145
+ }, {
146
+ "label":
147
+ get_text("Blog post on mental health", "ๅ…ณไบŽๅฟƒ็†ๅฅๅบท็š„ๅšๅฎขๆ–‡็ซ "),
148
+ "value":
149
+ get_text("Help me write a blog post on mental health",
150
+ "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€็ฏ‡ๅ…ณไบŽๅฟƒ็†ๅฅๅบท็š„ๅšๅฎขๆ–‡็ซ ")
151
+ }, {
152
+ "label":
153
+ get_text("Letter to my future self", "็ป™ๆœชๆฅ่‡ชๅทฑ็š„ไฟก"),
154
+ "value":
155
+ get_text("Help me write a letter to my future self", "ๅธฎๅŠฉๆˆ‘ๅ†™ไธ€ๅฐ็ป™ๆœชๆฅ่‡ชๅทฑ็š„ไฟก")
156
+ }]
157
+ }]
158
+
159
+ DEFAULT_SYS_PROMPT = "You are a helpful and harmless assistant."
160
+
161
+ MIN_THINKING_BUDGET = 1
162
+
163
+ MAX_THINKING_BUDGET = 38
164
+
165
+ DEFAULT_THINKING_BUDGET = 38
166
+
167
+ DEFAULT_MODEL = "qwen3-235b-a22b"
168
+
169
+ MODEL_OPTIONS = [
170
+ {
171
+ "label": get_text("Qwen3-235B-A22B", "้€šไน‰ๅƒ้—ฎ3-235B-A22B"),
172
+ "modelId": "Qwen/Qwen3-235B-A22B",
173
+ "value": "qwen3-235b-a22b"
174
+ },
175
+ {
176
+ "label": get_text("Qwen3-32B", "้€šไน‰ๅƒ้—ฎ3-32B"),
177
+ "modelId": "Qwen/Qwen3-32B",
178
+ "value": "qwen3-32b"
179
+ },
180
+ {
181
+ "label": get_text("Qwen3-30B-A3B", "้€šไน‰ๅƒ้—ฎ3-30B-A3B"),
182
+ "modelId": "Qwen/Qwen3-30B-A3B",
183
+ "value": "qwen3-30b-a3b"
184
+ },
185
+ {
186
+ "label": get_text("Qwen3-14B", "้€šไน‰ๅƒ้—ฎ3-14B"),
187
+ "modelId": "Qwen/Qwen3-14B",
188
+ "value": "qwen3-14b"
189
+ },
190
+ {
191
+ "label": get_text("Qwen3-8B", "้€šไน‰ๅƒ้—ฎ3-8B"),
192
+ "modelId": "Qwen/Qwen3-8B",
193
+ "value": "qwen3-8b"
194
+ },
195
+ {
196
+ "label": get_text("Qwen3-4B", "้€šไน‰ๅƒ้—ฎ3-4B"),
197
+ "modelId": "Qwen/Qwen3-4B",
198
+ "value": "qwen3-4b"
199
+ },
200
+ {
201
+ "label": get_text("Qwen3-1.7B", "้€šไน‰ๅƒ้—ฎ3-1.7B"),
202
+ "modelId": "Qwen/Qwen3-1.7B",
203
+ "value": "qwen3-1.7b"
204
+ },
205
+ {
206
+ "label": get_text("Qwen3-0.6B", "้€šไน‰ๅƒ้—ฎ3-0.6B"),
207
+ "modelId": "Qwen/Qwen3-0.6B",
208
+ "value": "qwen3-0.6b"
209
+ },
210
+ ]
211
+
212
+ for model in MODEL_OPTIONS:
213
+ model[
214
+ "link"] = is_cn and f"https://modelscope.cn/models/{model['modelId']}" or f"https://huggingface.co/{model['modelId']}"
215
+
216
+ MODEL_OPTIONS_MAP = {model["value"]: model for model in MODEL_OPTIONS}
217
+
218
+ DEFAULT_LOCALE = 'zh_CN' if is_cn else 'en_US'
219
+
220
+ DEFAULT_THEME = {
221
+ "token": {
222
+ "colorPrimary": "#6A57FF",
223
+ }
224
+ }
225
+
226
+ DEFAULT_SETTINGS = {
227
+ "model": DEFAULT_MODEL,
228
+ "sys_prompt": DEFAULT_SYS_PROMPT,
229
+ "thinking_budget": DEFAULT_THINKING_BUDGET
230
+ }
gitattributes DELETED
@@ -1,36 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- assets/qwen.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,11 +1,3 @@
1
- vllm
2
- accelerate
3
- bitsandbytes
4
- peft
5
- transformers
6
- sentencepiece
7
- torch
8
  gradio
9
- pillow
10
- matplotlib
11
- transformers-stream-generator
 
 
 
 
 
 
 
 
1
  gradio
2
+ modelscope_studio
3
+ dashscope