FrostIce commited on
Commit
5f507e8
·
verified ·
1 Parent(s): 5ec5ecc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -642
app.py CHANGED
@@ -1,646 +1,10 @@
1
- import uuid
2
- import time
3
- import json
4
  import gradio as gr
5
- import modelscope_studio.components.antd as antd
6
- import modelscope_studio.components.antdx as antdx
7
- import modelscope_studio.components.base as ms
8
- import modelscope_studio.components.pro as pro
9
- import dashscope
10
- from config import DEFAULT_LOCALE, DEFAULT_SETTINGS, DEFAULT_THEME, DEFAULT_SUGGESTIONS, save_history, get_text, user_config, bot_config, welcome_config, api_key, MODEL_OPTIONS_MAP
11
- from ui_components.logo import Logo
12
- from ui_components.settings_header import SettingsHeader
13
- from ui_components.thinking_button import ThinkingButton
14
- from dashscope import Generation
15
 
16
- dashscope.api_key = api_key
17
 
 
 
 
18
 
19
- def format_history(history, sys_prompt):
20
- # messages = [{
21
- # "role": "system",
22
- # "content": sys_prompt,
23
- # }]
24
- messages = []
25
- for item in history:
26
- if item["role"] == "user":
27
- messages.append({"role": "user", "content": item["content"]})
28
- elif item["role"] == "assistant":
29
- contents = [{
30
- "type": "text",
31
- "text": content["content"]
32
- } for content in item["content"] if content["type"] == "text"]
33
- messages.append({
34
- "role":
35
- "assistant",
36
- "content":
37
- contents[0]["text"] if len(contents) > 0 else ""
38
- })
39
- return messages
40
-
41
-
42
- class Gradio_Events:
43
-
44
- @staticmethod
45
- def submit(state_value):
46
-
47
- history = state_value["conversation_contexts"][
48
- state_value["conversation_id"]]["history"]
49
- settings = state_value["conversation_contexts"][
50
- state_value["conversation_id"]]["settings"]
51
- enable_thinking = state_value["conversation_contexts"][
52
- state_value["conversation_id"]]["enable_thinking"]
53
- model = settings.get("model")
54
- messages = format_history(history,
55
- sys_prompt=settings.get("sys_prompt", ""))
56
-
57
- history.append({
58
- "role":
59
- "assistant",
60
- "content": [],
61
- "key":
62
- str(uuid.uuid4()),
63
- "header":
64
- MODEL_OPTIONS_MAP.get(model, {}).get("label", None),
65
- "loading":
66
- True,
67
- "status":
68
- "pending"
69
- })
70
-
71
- yield {
72
- chatbot: gr.update(value=history),
73
- state: gr.update(value=state_value),
74
- }
75
- try:
76
- response = Generation.call(
77
- model=model,
78
- messages=messages,
79
- stream=True,
80
- result_format='message',
81
- incremental_output=True,
82
- enable_thinking=enable_thinking,
83
- thinking_budget=settings.get("thinking_budget", 1) * 1024)
84
- start_time = time.time()
85
- reasoning_content = ""
86
- answer_content = ""
87
- is_thinking = False
88
- is_answering = False
89
- contents = [None, None]
90
- for chunk in response:
91
- if (not chunk.output.choices[0].message.get("content")
92
- and not chunk.output.choices[0].message.get(
93
- "reasoning_content")):
94
- pass
95
- else:
96
- delta = chunk.output.choices[0].message
97
- if hasattr(
98
- delta,
99
- 'reasoning_content') and delta.reasoning_content:
100
- if not is_thinking:
101
- contents[0] = {
102
- "type": "tool",
103
- "content": "",
104
- "options": {
105
- "title": get_text("Thinking...", "思考中..."),
106
- "status": "pending"
107
- },
108
- "copyable": False,
109
- "editable": False
110
- }
111
- is_thinking = True
112
- reasoning_content += delta.reasoning_content
113
- if hasattr(delta, 'content') and delta.content:
114
- if not is_answering:
115
- thought_cost_time = "{:.2f}".format(time.time() -
116
- start_time)
117
- if contents[0]:
118
- contents[0]["options"]["title"] = get_text(
119
- f"End of Thought ({thought_cost_time}s)",
120
- f"已深度思考 (用时{thought_cost_time}s)")
121
- contents[0]["options"]["status"] = "done"
122
- contents[1] = {
123
- "type": "text",
124
- "content": "",
125
- }
126
-
127
- is_answering = True
128
- answer_content += delta.content
129
-
130
- if contents[0]:
131
- contents[0]["content"] = reasoning_content
132
- if contents[1]:
133
- contents[1]["content"] = answer_content
134
- history[-1]["content"] = [
135
- content for content in contents if content
136
- ]
137
-
138
- history[-1]["loading"] = False
139
- yield {
140
- chatbot: gr.update(value=history),
141
- state: gr.update(value=state_value)
142
- }
143
- print("model: ", model, "-", "reasoning_content: ",
144
- reasoning_content, "\n", "content: ", answer_content)
145
- history[-1]["status"] = "done"
146
- cost_time = "{:.2f}".format(time.time() - start_time)
147
- history[-1]["footer"] = get_text(f"{cost_time}s",
148
- f"用时{cost_time}s")
149
- yield {
150
- chatbot: gr.update(value=history),
151
- state: gr.update(value=state_value),
152
- }
153
- except Exception as e:
154
- print("model: ", model, "-", "Error: ", e)
155
- history[-1]["loading"] = False
156
- history[-1]["status"] = "done"
157
- history[-1]["content"] += [{
158
- "type":
159
- "text",
160
- "content":
161
- f'<span style="color: var(--color-red-500)">{str(e)}</span>'
162
- }]
163
- yield {
164
- chatbot: gr.update(value=history),
165
- state: gr.update(value=state_value)
166
- }
167
- raise e
168
-
169
- @staticmethod
170
- def add_message(input_value, settings_form_value, thinking_btn_state_value,
171
- state_value):
172
- if not state_value["conversation_id"]:
173
- random_id = str(uuid.uuid4())
174
- history = []
175
- state_value["conversation_id"] = random_id
176
- state_value["conversation_contexts"][
177
- state_value["conversation_id"]] = {
178
- "history": history
179
- }
180
- state_value["conversations"].append({
181
- "label": input_value,
182
- "key": random_id
183
- })
184
-
185
- history = state_value["conversation_contexts"][
186
- state_value["conversation_id"]]["history"]
187
-
188
- state_value["conversation_contexts"][
189
- state_value["conversation_id"]] = {
190
- "history": history,
191
- "settings": settings_form_value,
192
- "enable_thinking": thinking_btn_state_value["enable_thinking"]
193
- }
194
- history.append({
195
- "role": "user",
196
- "content": input_value,
197
- "key": str(uuid.uuid4())
198
- })
199
- yield Gradio_Events.preprocess_submit(clear_input=True)(state_value)
200
-
201
- try:
202
- for chunk in Gradio_Events.submit(state_value):
203
- yield chunk
204
- except Exception as e:
205
- raise e
206
- finally:
207
- yield Gradio_Events.postprocess_submit(state_value)
208
-
209
- @staticmethod
210
- def preprocess_submit(clear_input=True):
211
-
212
- def preprocess_submit_handler(state_value):
213
- history = state_value["conversation_contexts"][
214
- state_value["conversation_id"]]["history"]
215
- return {
216
- **({
217
- input:
218
- gr.update(value=None, loading=True) if clear_input else gr.update(loading=True),
219
- } if clear_input else {}),
220
- conversations:
221
- gr.update(active_key=state_value["conversation_id"],
222
- items=list(
223
- map(
224
- lambda item: {
225
- **item,
226
- "disabled":
227
- True if item["key"] != state_value[
228
- "conversation_id"] else False,
229
- }, state_value["conversations"]))),
230
- add_conversation_btn:
231
- gr.update(disabled=True),
232
- clear_btn:
233
- gr.update(disabled=True),
234
- conversation_delete_menu_item:
235
- gr.update(disabled=True),
236
- chatbot:
237
- gr.update(value=history,
238
- bot_config=bot_config(
239
- disabled_actions=['edit', 'retry', 'delete']),
240
- user_config=user_config(
241
- disabled_actions=['edit', 'delete'])),
242
- state:
243
- gr.update(value=state_value),
244
- }
245
-
246
- return preprocess_submit_handler
247
-
248
- @staticmethod
249
- def postprocess_submit(state_value):
250
- history = state_value["conversation_contexts"][
251
- state_value["conversation_id"]]["history"]
252
- return {
253
- input:
254
- gr.update(loading=False),
255
- conversation_delete_menu_item:
256
- gr.update(disabled=False),
257
- clear_btn:
258
- gr.update(disabled=False),
259
- conversations:
260
- gr.update(items=state_value["conversations"]),
261
- add_conversation_btn:
262
- gr.update(disabled=False),
263
- chatbot:
264
- gr.update(value=history,
265
- bot_config=bot_config(),
266
- user_config=user_config()),
267
- state:
268
- gr.update(value=state_value),
269
- }
270
-
271
- @staticmethod
272
- def cancel(state_value):
273
- history = state_value["conversation_contexts"][
274
- state_value["conversation_id"]]["history"]
275
- history[-1]["loading"] = False
276
- history[-1]["status"] = "done"
277
- history[-1]["footer"] = get_text("Chat completion paused", "对话已暂停")
278
- return Gradio_Events.postprocess_submit(state_value)
279
-
280
- @staticmethod
281
- def delete_message(state_value, e: gr.EventData):
282
- index = e._data["payload"][0]["index"]
283
- history = state_value["conversation_contexts"][
284
- state_value["conversation_id"]]["history"]
285
- history = history[:index] + history[index + 1:]
286
-
287
- state_value["conversation_contexts"][
288
- state_value["conversation_id"]]["history"] = history
289
-
290
- return gr.update(value=state_value)
291
-
292
- @staticmethod
293
- def edit_message(state_value, chatbot_value, e: gr.EventData):
294
- index = e._data["payload"][0]["index"]
295
- history = state_value["conversation_contexts"][
296
- state_value["conversation_id"]]["history"]
297
- history[index]["content"] = chatbot_value[index]["content"]
298
- return gr.update(value=state_value)
299
-
300
- @staticmethod
301
- def regenerate_message(settings_form_value, thinking_btn_state_value,
302
- state_value, e: gr.EventData):
303
- index = e._data["payload"][0]["index"]
304
- history = state_value["conversation_contexts"][
305
- state_value["conversation_id"]]["history"]
306
- history = history[:index]
307
-
308
- state_value["conversation_contexts"][
309
- state_value["conversation_id"]] = {
310
- "history": history,
311
- "settings": settings_form_value,
312
- "enable_thinking": thinking_btn_state_value["enable_thinking"]
313
- }
314
-
315
- yield Gradio_Events.preprocess_submit()(state_value)
316
- try:
317
- for chunk in Gradio_Events.submit(state_value):
318
- yield chunk
319
- except Exception as e:
320
- raise e
321
- finally:
322
- yield Gradio_Events.postprocess_submit(state_value)
323
-
324
- @staticmethod
325
- def select_suggestion(input_value, e: gr.EventData):
326
- input_value = input_value[:-1] + e._data["payload"][0]
327
- return gr.update(value=input_value)
328
-
329
- @staticmethod
330
- def apply_prompt(e: gr.EventData):
331
- return gr.update(value=e._data["payload"][0]["value"]["description"])
332
-
333
- @staticmethod
334
- def new_chat(thinking_btn_state, state_value):
335
- if not state_value["conversation_id"]:
336
- return gr.skip()
337
- state_value["conversation_id"] = ""
338
- thinking_btn_state["enable_thinking"] = True
339
- return gr.update(active_key=state_value["conversation_id"]), gr.update(
340
- value=None), gr.update(value=DEFAULT_SETTINGS), gr.update(
341
- value=thinking_btn_state), gr.update(value=state_value)
342
-
343
- @staticmethod
344
- def select_conversation(thinking_btn_state_value, state_value,
345
- e: gr.EventData):
346
- active_key = e._data["payload"][0]
347
- if state_value["conversation_id"] == active_key or (
348
- active_key not in state_value["conversation_contexts"]):
349
- return gr.skip()
350
- state_value["conversation_id"] = active_key
351
- thinking_btn_state_value["enable_thinking"] = state_value[
352
- "conversation_contexts"][active_key]["enable_thinking"]
353
- return gr.update(active_key=active_key), gr.update(
354
- value=state_value["conversation_contexts"][active_key]["history"]
355
- ), gr.update(value=state_value["conversation_contexts"][active_key]
356
- ["settings"]), gr.update(
357
- value=thinking_btn_state_value), gr.update(
358
- value=state_value)
359
-
360
- @staticmethod
361
- def click_conversation_menu(state_value, e: gr.EventData):
362
- conversation_id = e._data["payload"][0]["key"]
363
- operation = e._data["payload"][1]["key"]
364
- if operation == "delete":
365
- del state_value["conversation_contexts"][conversation_id]
366
-
367
- state_value["conversations"] = [
368
- item for item in state_value["conversations"]
369
- if item["key"] != conversation_id
370
- ]
371
-
372
- if state_value["conversation_id"] == conversation_id:
373
- state_value["conversation_id"] = ""
374
- return gr.update(
375
- items=state_value["conversations"],
376
- active_key=state_value["conversation_id"]), gr.update(
377
- value=None), gr.update(value=state_value)
378
- else:
379
- return gr.update(
380
- items=state_value["conversations"]), gr.skip(), gr.update(
381
- value=state_value)
382
- return gr.skip()
383
-
384
- @staticmethod
385
- def toggle_settings_header(settings_header_state_value):
386
- settings_header_state_value[
387
- "open"] = not settings_header_state_value["open"]
388
- return gr.update(value=settings_header_state_value)
389
-
390
- @staticmethod
391
- def clear_conversation_history(state_value):
392
- if not state_value["conversation_id"]:
393
- return gr.skip()
394
- state_value["conversation_contexts"][
395
- state_value["conversation_id"]]["history"] = []
396
- return gr.update(value=None), gr.update(value=state_value)
397
-
398
- @staticmethod
399
- def update_browser_state(state_value):
400
-
401
- return gr.update(value=dict(
402
- conversations=state_value["conversations"],
403
- conversation_contexts=state_value["conversation_contexts"]))
404
-
405
- @staticmethod
406
- def apply_browser_state(browser_state_value, state_value):
407
- state_value["conversations"] = browser_state_value["conversations"]
408
- state_value["conversation_contexts"] = browser_state_value[
409
- "conversation_contexts"]
410
- return gr.update(
411
- items=browser_state_value["conversations"]), gr.update(
412
- value=state_value)
413
-
414
-
415
- css = """
416
- .gradio-container {
417
- padding: 0 !important;
418
- }
419
-
420
- .gradio-container > main.fillable {
421
- padding: 0 !important;
422
- }
423
-
424
- #chatbot {
425
- height: calc(100vh - 21px - 16px);
426
- max-height: 1500px;
427
- }
428
-
429
- #chatbot .chatbot-conversations {
430
- height: 100vh;
431
- background-color: var(--ms-gr-ant-color-bg-layout);
432
- padding-left: 4px;
433
- padding-right: 4px;
434
- }
435
-
436
-
437
- #chatbot .chatbot-conversations .chatbot-conversations-list {
438
- padding-left: 0;
439
- padding-right: 0;
440
- }
441
-
442
- #chatbot .chatbot-chat {
443
- padding: 32px;
444
- padding-bottom: 0;
445
- height: 100%;
446
- }
447
-
448
- @media (max-width: 768px) {
449
- #chatbot .chatbot-chat {
450
- padding: 0;
451
- }
452
- }
453
-
454
- #chatbot .chatbot-chat .chatbot-chat-messages {
455
- flex: 1;
456
- }
457
-
458
-
459
- #chatbot .setting-form-thinking-budget .ms-gr-ant-form-item-control-input-content {
460
- display: flex;
461
- flex-wrap: wrap;
462
- }
463
- """
464
-
465
- model_options_map_json = json.dumps(MODEL_OPTIONS_MAP)
466
- js = "function init() { window.MODEL_OPTIONS_MAP=" + model_options_map_json + "}"
467
-
468
- with gr.Blocks(css=css, js=js, fill_width=True) as demo:
469
- state = gr.State({
470
- "conversation_contexts": {},
471
- "conversations": [],
472
- "conversation_id": "",
473
- })
474
-
475
- with ms.Application(), antdx.XProvider(
476
- theme=DEFAULT_THEME, locale=DEFAULT_LOCALE), ms.AutoLoading():
477
- with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"):
478
- # Left Column
479
- with antd.Col(md=dict(flex="0 0 260px", span=24, order=0),
480
- span=0,
481
- elem_style=dict(width=0),
482
- order=1):
483
- with ms.Div(elem_classes="chatbot-conversations"):
484
- with antd.Flex(vertical=True,
485
- gap="small",
486
- elem_style=dict(height="100%")):
487
- # Logo
488
- Logo()
489
-
490
- # New Conversation Button
491
- with antd.Button(value=None,
492
- color="primary",
493
- variant="filled",
494
- block=True) as add_conversation_btn:
495
- ms.Text(get_text("New Conversation", "新建对话"))
496
- with ms.Slot("icon"):
497
- antd.Icon("PlusOutlined")
498
-
499
- # Conversations List
500
- with antdx.Conversations(
501
- elem_classes="chatbot-conversations-list",
502
- ) as conversations:
503
- with ms.Slot('menu.items'):
504
- with antd.Menu.Item(
505
- label="Delete", key="delete",
506
- danger=True
507
- ) as conversation_delete_menu_item:
508
- with ms.Slot("icon"):
509
- antd.Icon("DeleteOutlined")
510
- # Right Column
511
- with antd.Col(flex=1, elem_style=dict(height="100%")):
512
- with antd.Flex(vertical=True,
513
- gap="small",
514
- elem_classes="chatbot-chat"):
515
- # Chatbot
516
- chatbot = pro.Chatbot(elem_classes="chatbot-chat-messages",
517
- height=0,
518
- welcome_config=welcome_config(),
519
- user_config=user_config(),
520
- bot_config=bot_config())
521
-
522
- # Input
523
- with antdx.Suggestion(
524
- items=DEFAULT_SUGGESTIONS,
525
- # onKeyDown Handler in Javascript
526
- should_trigger="""(e, { onTrigger, onKeyDown }) => {
527
- switch(e.key) {
528
- case '/':
529
- onTrigger()
530
- break
531
- case 'ArrowRight':
532
- case 'ArrowLeft':
533
- case 'ArrowUp':
534
- case 'ArrowDown':
535
- break;
536
- default:
537
- onTrigger(false)
538
- }
539
- onKeyDown(e)
540
- }""") as suggestion:
541
- with ms.Slot("children"):
542
- with antdx.Sender(placeholder=get_text(
543
- "Enter \"/\" to get suggestions",
544
- "输入 \"/\" 获取提示"), ) as input:
545
- with ms.Slot("header"):
546
- settings_header_state, settings_form = SettingsHeader(
547
- )
548
- with ms.Slot("prefix"):
549
- with antd.Flex(
550
- gap=4,
551
- wrap=True,
552
- elem_style=dict(maxWidth='40vw')):
553
- with antd.Button(
554
- value=None,
555
- type="text") as setting_btn:
556
- with ms.Slot("icon"):
557
- antd.Icon("SettingOutlined")
558
- with antd.Button(
559
- value=None,
560
- type="text") as clear_btn:
561
- with ms.Slot("icon"):
562
- antd.Icon("ClearOutlined")
563
- thinking_btn_state = ThinkingButton()
564
-
565
- # Events Handler
566
- # Browser State Handler
567
- if save_history:
568
- browser_state = gr.BrowserState(
569
- {
570
- "conversation_contexts": {},
571
- "conversations": [],
572
- },
573
- storage_key="qwen3_chat_demo_storage")
574
- state.change(fn=Gradio_Events.update_browser_state,
575
- inputs=[state],
576
- outputs=[browser_state])
577
-
578
- demo.load(fn=Gradio_Events.apply_browser_state,
579
- inputs=[browser_state, state],
580
- outputs=[conversations, state])
581
-
582
- # Conversations Handler
583
- add_conversation_btn.click(fn=Gradio_Events.new_chat,
584
- inputs=[thinking_btn_state, state],
585
- outputs=[
586
- conversations, chatbot, settings_form,
587
- thinking_btn_state, state
588
- ])
589
- conversations.active_change(fn=Gradio_Events.select_conversation,
590
- inputs=[thinking_btn_state, state],
591
- outputs=[
592
- conversations, chatbot, settings_form,
593
- thinking_btn_state, state
594
- ])
595
- conversations.menu_click(fn=Gradio_Events.click_conversation_menu,
596
- inputs=[state],
597
- outputs=[conversations, chatbot, state])
598
- # Chatbot Handler
599
- chatbot.welcome_prompt_select(fn=Gradio_Events.apply_prompt,
600
- outputs=[input])
601
-
602
- chatbot.delete(fn=Gradio_Events.delete_message,
603
- inputs=[state],
604
- outputs=[state])
605
- chatbot.edit(fn=Gradio_Events.edit_message,
606
- inputs=[state, chatbot],
607
- outputs=[state])
608
-
609
- regenerating_event = chatbot.retry(
610
- fn=Gradio_Events.regenerate_message,
611
- inputs=[settings_form, thinking_btn_state, state],
612
- outputs=[
613
- input, clear_btn, conversation_delete_menu_item,
614
- add_conversation_btn, conversations, chatbot, state
615
- ])
616
-
617
- # Input Handler
618
- submit_event = input.submit(
619
- fn=Gradio_Events.add_message,
620
- inputs=[input, settings_form, thinking_btn_state, state],
621
- outputs=[
622
- input, clear_btn, conversation_delete_menu_item,
623
- add_conversation_btn, conversations, chatbot, state
624
- ])
625
- input.cancel(fn=Gradio_Events.cancel,
626
- inputs=[state],
627
- outputs=[
628
- input, conversation_delete_menu_item, clear_btn,
629
- conversations, add_conversation_btn, chatbot, state
630
- ],
631
- cancels=[submit_event, regenerating_event],
632
- queue=False)
633
- # Input Actions Handler
634
- setting_btn.click(fn=Gradio_Events.toggle_settings_header,
635
- inputs=[settings_header_state],
636
- outputs=[settings_header_state])
637
- clear_btn.click(fn=Gradio_Events.clear_conversation_history,
638
- inputs=[state],
639
- outputs=[chatbot, state])
640
- suggestion.select(fn=Gradio_Events.select_suggestion,
641
- inputs=[input],
642
- outputs=[input])
643
-
644
- if __name__ == "__main__":
645
- demo.queue(default_concurrency_limit=100,
646
- max_size=100).launch(ssr_mode=False, max_threads=100)
 
1
+ from llama_cpp import Llama
 
 
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
3
 
4
+ llm = Llama(model_path="./qwen1_8b.gguf")
5
 
6
+ def chat_fn(prompt):
7
+ result = llm(prompt, max_tokens=200)
8
+ return result['choices'][0]['text']
9
 
10
+ gr.Interface(fn=chat_fn, inputs="text", outputs="text", title="Qwen 1.8B на CPU").launch()