shuai bai commited on
Commit
eb7f245
·
verified ·
1 Parent(s): 0e17540

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -69
app.py CHANGED
@@ -1,70 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
70
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import os
6
+ import numpy as np
7
+ from urllib3.exceptions import HTTPError
8
+ os.system('pip install dashscope modelscope oss2 -U')
9
+
10
+ from argparse import ArgumentParser
11
+ from pathlib import Path
12
+
13
+ import copy
14
  import gradio as gr
15
+ import oss2
16
+ import os
17
+ import re
18
+ import secrets
19
+ import tempfile
20
+ import requests
21
+ from http import HTTPStatus
22
+ from dashscope import MultiModalConversation
23
+ import dashscope
24
+
25
+ API_KEY = os.environ['API_KEY']
26
+ dashscope.api_key = API_KEY
27
+
28
+ REVISION = 'v1.0.4'
29
+ BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
30
+ PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
31
+
32
+
33
+ def _get_args():
34
+ parser = ArgumentParser()
35
+ parser.add_argument("--revision", type=str, default=REVISION)
36
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
37
+
38
+ parser.add_argument("--share", action="store_true", default=False,
39
+ help="Create a publicly shareable link for the interface.")
40
+ parser.add_argument("--inbrowser", action="store_true", default=False,
41
+ help="Automatically launch the interface in a new tab on the default browser.")
42
+ parser.add_argument("--server-port", type=int, default=7860,
43
+ help="Demo server port.")
44
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
45
+ help="Demo server name.")
46
+
47
+ args = parser.parse_args()
48
+ return args
49
+
50
+ def _parse_text(text):
51
+ lines = text.split("\n")
52
+ lines = [line for line in lines if line != ""]
53
+ count = 0
54
+ for i, line in enumerate(lines):
55
+ if "```" in line:
56
+ count += 1
57
+ items = line.split("`")
58
+ if count % 2 == 1:
59
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
60
+ else:
61
+ lines[i] = f"<br></code></pre>"
62
+ else:
63
+ if i > 0:
64
+ if count % 2 == 1:
65
+ line = line.replace("`", r"\`")
66
+ line = line.replace("<", "&lt;")
67
+ line = line.replace(">", "&gt;")
68
+ line = line.replace(" ", "&nbsp;")
69
+ line = line.replace("*", "&ast;")
70
+ line = line.replace("_", "&lowbar;")
71
+ line = line.replace("-", "&#45;")
72
+ line = line.replace(".", "&#46;")
73
+ line = line.replace("!", "&#33;")
74
+ line = line.replace("(", "&#40;")
75
+ line = line.replace(")", "&#41;")
76
+ line = line.replace("$", "&#36;")
77
+ lines[i] = "<br>" + line
78
+ text = "".join(lines)
79
+ return text
80
+
81
+
82
+ def _remove_image_special(text):
83
+ text = text.replace('<ref>', '').replace('</ref>', '')
84
+ return re.sub(r'<box>.*?(</box>|$)', '', text)
85
+
86
+
87
+ def is_video_file(filename):
88
+ video_extensions = ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.mpeg']
89
+ return any(filename.lower().endswith(ext) for ext in video_extensions)
90
+
91
+
92
+ def _launch_demo(args):
93
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
94
+ Path(tempfile.gettempdir()) / "gradio"
95
+ )
96
+
97
+ def predict(_chatbot, task_history):
98
+ chat_query = _chatbot[-1][0]
99
+ query = task_history[-1][0]
100
+ if len(chat_query) == 0:
101
+ _chatbot.pop()
102
+ task_history.pop()
103
+ return _chatbot
104
+ print("User: " + _parse_text(query))
105
+ history_cp = copy.deepcopy(task_history)
106
+ full_response = ""
107
+ messages = []
108
+ content = []
109
+ for q, a in history_cp:
110
+ if isinstance(q, (tuple, list)):
111
+ if is_video_file(q[0]):
112
+ content.append({'video': f'file://{q[0]}'})
113
+ else:
114
+ content.append({'image': f'file://{q[0]}'})
115
+ else:
116
+ content.append({'text': q})
117
+ messages.append({'role': 'user', 'content': content})
118
+ messages.append({'role': 'assistant', 'content': [{'text': a}]})
119
+ content = []
120
+ messages.pop()
121
+ responses = MultiModalConversation.call(
122
+ model='qwen3-vl-235b-a22b-instruct', messages=messages, stream=True,
123
+ )
124
+ for response in responses:
125
+ if not response.status_code == HTTPStatus.OK:
126
+ raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
127
+ response = response.output.choices[0].message.content
128
+ response_text = []
129
+ for ele in response:
130
+ if 'text' in ele:
131
+ response_text.append(ele['text'])
132
+ elif 'box' in ele:
133
+ response_text.append(ele['box'])
134
+ response_text = ''.join(response_text)
135
+ full_response += response_text # 累积
136
+ _chatbot[-1] = (_parse_text(chat_query), _remove_image_special(full_response))
137
+ yield _chatbot
138
+
139
+ task_history[-1] = (query, full_response)
140
+ print("Qwen3-VL-Chat: " + _parse_text(full_response))
141
+ yield _chatbot
142
+
143
+
144
+ def regenerate(_chatbot, task_history):
145
+ if not task_history:
146
+ return _chatbot
147
+ item = task_history[-1]
148
+ if item[1] is None:
149
+ return _chatbot
150
+ task_history[-1] = (item[0], None)
151
+ chatbot_item = _chatbot.pop(-1)
152
+ if chatbot_item[0] is None:
153
+ _chatbot[-1] = (_chatbot[-1][0], None)
154
+ else:
155
+ _chatbot.append((chatbot_item[0], None))
156
+ _chatbot_gen = predict(_chatbot, task_history)
157
+ for _chatbot in _chatbot_gen:
158
+ yield _chatbot
159
+
160
+ def add_text(history, task_history, text):
161
+ task_text = text
162
+ history = history if history is not None else []
163
+ task_history = task_history if task_history is not None else []
164
+ history = history + [(_parse_text(text), None)]
165
+ task_history = task_history + [(task_text, None)]
166
+ return history, task_history, ""
167
+
168
+ def add_file(history, task_history, file):
169
+ history = history if history is not None else []
170
+ task_history = task_history if task_history is not None else []
171
+ history = history + [((file.name,), None)]
172
+ task_history = task_history + [((file.name,), None)]
173
+ return history, task_history
174
+
175
+ def reset_user_input():
176
+ return gr.update(value="")
177
+
178
+ def reset_state(task_history):
179
+ task_history.clear()
180
+ return []
181
+
182
+ with gr.Blocks() as demo:
183
+ gr.Markdown("""<center><font size=3> Qwen3-VL-235B-A22B-Instruct Demo </center>""")
184
+
185
+ chatbot = gr.Chatbot(label='Qwen3-VL-235B-A22B-Instruct', elem_classes="control-height", height=500)
186
+ query = gr.Textbox(lines=2, label='Input')
187
+ task_history = gr.State([])
188
+
189
+ with gr.Row():
190
+ addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image", "video"])
191
+ submit_btn = gr.Button("🚀 Submit (发送)")
192
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
193
+ empty_bin = gr.Button("🧹 Clear History (清除历史)")
194
+
195
+ submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
196
+ predict, [chatbot, task_history], [chatbot], show_progress=True
197
+ )
198
+ submit_btn.click(reset_user_input, [], [query])
199
+ empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
200
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
201
+ addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
202
+
203
+
204
+ demo.queue(default_concurrency_limit=40).launch(
205
+ share=args.share,
206
+ # inbrowser=args.inbrowser,
207
+ # server_port=args.server_port,
208
+ # server_name=args.server_name,
209
+ )
210
+
211
+
212
+ def main():
213
+ args = _get_args()
214
+ _launch_demo(args)
215
+
216
+
217
+ if __name__ == '__main__':
218
+ main()