GiantPandas commited on
Commit
2fb3bcc
·
verified ·
1 Parent(s): ab93b0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -23
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import time
4
  import os
5
  import sys
@@ -10,9 +9,26 @@ import base64
10
  from datetime import datetime
11
  import subprocess
12
  import time
13
-
 
 
14
  import numpy as np
15
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
  # Function to encode the image
@@ -41,7 +57,7 @@ def array_to_image_path(image_array):
41
  from openai import OpenAI
42
  # 设置 OpenAI 的 API 密钥和 API 基础 URL 以使用 vLLM 的 API 服务器。
43
  openai_api_key = "EMPTY"
44
- openai_api_base = "http://47.117.17.202:12345/v1"
45
 
46
 
47
  client = OpenAI(
@@ -95,27 +111,216 @@ def run_example(image, text_input=None):
95
 
96
 
97
 
98
- css = """
99
- #output {
100
- height: 500px;
101
- overflow: auto;
102
- border: 1px solid #ccc;
103
- }
104
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106
- with gr.Blocks(css=css) as demo:
107
- gr.Markdown("Test")
108
- with gr.Tab(label="Input"):
109
  with gr.Row():
110
- with gr.Column():
111
- input_img = gr.Image(label="Input Picture")
112
- text_input = gr.Textbox(label="Text Prompt")
113
- submit_btn = gr.Button(value="Submit")
114
- with gr.Column():
115
- output_text = gr.Textbox(label="Output Text")
116
-
117
- submit_btn.click(run_example, [input_img, text_input], [output_text])
118
- demo.queue()
119
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
 
 
 
 
1
  import gradio as gr
 
2
  import time
3
  import os
4
  import sys
 
9
  from datetime import datetime
10
  import subprocess
11
  import time
12
+ from http import HTTPStatus
13
+ from urllib3.exceptions import HTTPError
14
+ from pathlib import Path
15
  import numpy as np
16
  from PIL import Image
17
+ import os
18
+ import numpy as np
19
+ from urllib3.exceptions import HTTPError
20
+
21
+ from argparse import ArgumentParser
22
+ from pathlib import Path
23
+
24
+ import copy
25
+ import gradio as gr
26
+ import os
27
+ import re
28
+ import secrets
29
+ import tempfile
30
+ import requests
31
+ from http import HTTPStatus
32
 
33
 
34
  # Function to encode the image
 
57
  from openai import OpenAI
58
  # 设置 OpenAI 的 API 密钥和 API 基础 URL 以使用 vLLM 的 API 服务器。
59
  openai_api_key = "EMPTY"
60
+ openai_api_base = "http://47.117.17.202:9999/v1"
61
 
62
 
63
  client = OpenAI(
 
111
 
112
 
113
 
114
+ REVISION = 'v1.0.4'
115
+ BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
116
+ PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
117
+
118
+
119
+ def _get_args():
120
+ parser = ArgumentParser()
121
+ parser.add_argument("--revision", type=str, default=REVISION)
122
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
123
+
124
+ parser.add_argument("--share", action="store_true", default=False,
125
+ help="Create a publicly shareable link for the interface.")
126
+ parser.add_argument("--inbrowser", action="store_true", default=False,
127
+ help="Automatically launch the interface in a new tab on the default browser.")
128
+ parser.add_argument("--server-port", type=int, default=7860,
129
+ help="Demo server port.")
130
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
131
+ help="Demo server name.")
132
+
133
+ args = parser.parse_args()
134
+ return args
135
+
136
+ def _parse_text(text):
137
+ lines = text.split("\n")
138
+ lines = [line for line in lines if line != ""]
139
+ count = 0
140
+ for i, line in enumerate(lines):
141
+ if "```" in line:
142
+ count += 1
143
+ items = line.split("`")
144
+ if count % 2 == 1:
145
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
146
+ else:
147
+ lines[i] = f"<br></code></pre>"
148
+ else:
149
+ if i > 0:
150
+ if count % 2 == 1:
151
+ line = line.replace("`", r"\`")
152
+ line = line.replace("<", "&lt;")
153
+ line = line.replace(">", "&gt;")
154
+ line = line.replace(" ", "&nbsp;")
155
+ line = line.replace("*", "&ast;")
156
+ line = line.replace("_", "&lowbar;")
157
+ line = line.replace("-", "&#45;")
158
+ line = line.replace(".", "&#46;")
159
+ line = line.replace("!", "&#33;")
160
+ line = line.replace("(", "&#40;")
161
+ line = line.replace(")", "&#41;")
162
+ line = line.replace("$", "&#36;")
163
+ lines[i] = "<br>" + line
164
+ text = "".join(lines)
165
+ return text
166
+
167
+
168
+ def _remove_image_special(text):
169
+ text = text.replace('<ref>', '').replace('</ref>', '')
170
+ return re.sub(r'<box>.*?(</box>|$)', '', text)
171
+
172
+
173
+ def is_video_file(filename):
174
+ video_extensions = ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.mpeg']
175
+ return any(filename.lower().endswith(ext) for ext in video_extensions)
176
+
177
+
178
+ def _launch_demo(args):
179
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
180
+ Path(tempfile.gettempdir()) / "gradio"
181
+ )
182
+
183
+ def predict(_chatbot, task_history):
184
+ chat_query = _chatbot[-1][0]
185
+ query = task_history[-1][0]
186
+ if len(chat_query) == 0:
187
+ _chatbot.pop()
188
+ task_history.pop()
189
+ return _chatbot
190
+ print("User: " + _parse_text(query))
191
+ history_cp = copy.deepcopy(task_history)
192
+ full_response = ""
193
+ messages = []
194
+ content = []
195
+ for q, a in history_cp:
196
+ if isinstance(q, (tuple, list)):
197
+ if is_video_file(q[0]):
198
+ content.append({'video': f'file://{q[0]}'})
199
+ else:
200
+ content.append({'image': f'file://{q[0]}'})
201
+ else:
202
+ content.append({'text': q})
203
+ messages.append({'role': 'user', 'content': content})
204
+ messages.append({'role': 'assistant', 'content': [{'text': a}]})
205
+ content = []
206
+ messages.pop()
207
+ response = client.chat.completions.create(
208
+ model="Qwen2_5VL",
209
+ messages=messages,
210
+ extra_body={},
211
+ extra_headers={
212
+ "apikey": "empty"
213
+ },
214
+ stream=True,
215
+ temperature=0.7,
216
+ top_p=1.0,
217
+ )
218
+ for response in responses:
219
+ if not response.status_code == HTTPStatus.OK:
220
+ raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
221
+ response = response.output.choices[0].message.content
222
+ response_text = []
223
+ for ele in response:
224
+ if 'text' in ele:
225
+ response_text.append(ele['text'])
226
+ elif 'box' in ele:
227
+ response_text.append(ele['box'])
228
+ response_text = ''.join(response_text)
229
+ _chatbot[-1] = (_parse_text(chat_query), _remove_image_special(response_text))
230
+ yield _chatbot
231
+
232
+ if len(response) > 1:
233
+ result_image = response[-1]['result_image']
234
+ resp = requests.get(result_image)
235
+ os.makedirs(uploaded_file_dir, exist_ok=True)
236
+ name = f"tmp{secrets.token_hex(20)}.jpg"
237
+ filename = os.path.join(uploaded_file_dir, name)
238
+ with open(filename, 'wb') as f:
239
+ f.write(resp.content)
240
+ response = ''.join(r['box'] if 'box' in r else r['text'] for r in response[:-1])
241
+ _chatbot.append((None, (filename,)))
242
+ else:
243
+ response = response[0]['text']
244
+ _chatbot[-1] = (_parse_text(chat_query), response)
245
+ full_response = _parse_text(response)
246
+
247
+ task_history[-1] = (query, full_response)
248
+ print("Qwen2.5-VL-Chat: " + _parse_text(full_response))
249
+ yield _chatbot
250
+
251
+
252
+ def regenerate(_chatbot, task_history):
253
+ if not task_history:
254
+ return _chatbot
255
+ item = task_history[-1]
256
+ if item[1] is None:
257
+ return _chatbot
258
+ task_history[-1] = (item[0], None)
259
+ chatbot_item = _chatbot.pop(-1)
260
+ if chatbot_item[0] is None:
261
+ _chatbot[-1] = (_chatbot[-1][0], None)
262
+ else:
263
+ _chatbot.append((chatbot_item[0], None))
264
+ _chatbot_gen = predict(_chatbot, task_history)
265
+ for _chatbot in _chatbot_gen:
266
+ yield _chatbot
267
+
268
+ def add_text(history, task_history, text):
269
+ task_text = text
270
+ history = history if history is not None else []
271
+ task_history = task_history if task_history is not None else []
272
+ history = history + [(_parse_text(text), None)]
273
+ task_history = task_history + [(task_text, None)]
274
+ return history, task_history, ""
275
+
276
+ def add_file(history, task_history, file):
277
+ history = history if history is not None else []
278
+ task_history = task_history if task_history is not None else []
279
+ history = history + [((file.name,), None)]
280
+ task_history = task_history + [((file.name,), None)]
281
+ return history, task_history
282
+
283
+ def reset_user_input():
284
+ return gr.update(value="")
285
+
286
+ def reset_state(task_history):
287
+ task_history.clear()
288
+ return []
289
+
290
+ with gr.Blocks() as demo:
291
+ gr.Markdown("""<center><font size=3> Qwen2.5-VL-32B-Instruct Demo </center>""")
292
+
293
+ chatbot = gr.Chatbot(label='Qwen2.5-VL-32B-Instruct', elem_classes="control-height", height=500)
294
+ query = gr.Textbox(lines=2, label='Input')
295
+ task_history = gr.State([])
296
 
 
 
 
297
  with gr.Row():
298
+ addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image", "video"])
299
+ submit_btn = gr.Button("🚀 Submit (发送)")
300
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
301
+ empty_bin = gr.Button("🧹 Clear History (清除历史)")
302
+
303
+ submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
304
+ predict, [chatbot, task_history], [chatbot], show_progress=True
305
+ )
306
+ submit_btn.click(reset_user_input, [], [query])
307
+ empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
308
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
309
+ addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
310
+
311
+
312
+ demo.queue(default_concurrency_limit=40).launch(
313
+ share=args.share,
314
+ # inbrowser=args.inbrowser,
315
+ # server_port=args.server_port,
316
+ # server_name=args.server_name,
317
+ )
318
+
319
+
320
+ def main():
321
+ args = _get_args()
322
+ _launch_demo(args)
323
 
324
 
325
+ if __name__ == '__main__':
326
+ main()