GiantPandas commited on
Commit
ab93b0a
·
verified ·
1 Parent(s): ad49cc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -234
app.py CHANGED
@@ -49,243 +49,73 @@ client = OpenAI(
49
  base_url=openai_api_base,
50
  )
51
 
52
- # Copyright (c) Alibaba Cloud.
53
- #
54
- # This source code is licensed under the license found in the
55
- # LICENSE file in the root directory of this source tree.
56
- import os
57
- import numpy as np
58
- from urllib3.exceptions import HTTPError
59
-
60
-
61
- from argparse import ArgumentParser
62
- from pathlib import Path
63
-
64
- import copy
65
- import gradio as gr
66
- import os
67
- import re
68
- import secrets
69
- import tempfile
70
- import requests
71
- from http import HTTPStatus
72
-
73
-
74
- REVISION = 'v1.0.4'
75
- BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
76
- PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
77
-
78
-
79
- def _get_args():
80
- parser = ArgumentParser()
81
- parser.add_argument("--revision", type=str, default=REVISION)
82
- parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
83
 
84
- parser.add_argument("--share", action="store_true", default=False,
85
- help="Create a publicly shareable link for the interface.")
86
- parser.add_argument("--inbrowser", action="store_true", default=False,
87
- help="Automatically launch the interface in a new tab on the default browser.")
88
- parser.add_argument("--server-port", type=int, default=7860,
89
- help="Demo server port.")
90
- parser.add_argument("--server-name", type=str, default="127.0.0.1",
91
- help="Demo server name.")
92
-
93
- args = parser.parse_args()
94
- return args
95
-
96
- def _parse_text(text):
97
- lines = text.split("\n")
98
- lines = [line for line in lines if line != ""]
99
- count = 0
100
- for i, line in enumerate(lines):
101
- if "```" in line:
102
- count += 1
103
- items = line.split("`")
104
- if count % 2 == 1:
105
- lines[i] = f'<pre><code class="language-{items[-1]}">'
106
- else:
107
- lines[i] = f"<br></code></pre>"
108
- else:
109
- if i > 0:
110
- if count % 2 == 1:
111
- line = line.replace("`", r"\`")
112
- line = line.replace("<", "&lt;")
113
- line = line.replace(">", "&gt;")
114
- line = line.replace(" ", "&nbsp;")
115
- line = line.replace("*", "&ast;")
116
- line = line.replace("_", "&lowbar;")
117
- line = line.replace("-", "&#45;")
118
- line = line.replace(".", "&#46;")
119
- line = line.replace("!", "&#33;")
120
- line = line.replace("(", "&#40;")
121
- line = line.replace(")", "&#41;")
122
- line = line.replace("$", "&#36;")
123
- lines[i] = "<br>" + line
124
- text = "".join(lines)
125
- return text
126
-
127
-
128
- def _remove_image_special(text):
129
- text = text.replace('<ref>', '').replace('</ref>', '')
130
- return re.sub(r'<box>.*?(</box>|$)', '', text)
131
-
132
-
133
- def is_video_file(filename):
134
- video_extensions = ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv', '.webm', '.mpeg']
135
- return any(filename.lower().endswith(ext) for ext in video_extensions)
136
-
137
-
138
- def _launch_demo(args):
139
- uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
140
- Path(tempfile.gettempdir()) / "gradio"
141
- )
142
-
143
- def predict(_chatbot, task_history):
144
- chat_query = _chatbot[-1][0]
145
- query = task_history[-1][0]
146
- if len(chat_query) == 0:
147
- _chatbot.pop()
148
- task_history.pop()
149
- return _chatbot
150
- print("User: " + _parse_text(query))
151
- history_cp = copy.deepcopy(task_history)
152
- full_response = ""
153
- messages = []
154
- content = []
155
- for q, a in history_cp:
156
- if isinstance(q, (tuple, list)):
157
- if is_video_file(q[0]):
158
- content.append({'video': f'file://{q[0]}'})
159
- else:
160
- content.append({'image': f'file://{q[0]}'})
161
- else:
162
- content.append({'text': q})
163
- messages.append({'role': 'user', 'content': content})
164
- messages.append({'role': 'assistant', 'content': [{'text': a}]})
165
- content = []
166
- messages.pop()
167
-
168
-
169
- response = client.chat.completions.create(
170
- model="Qwen2_5VL",
171
- messages=messages,
172
- extra_body={},
173
- extra_headers={
174
- "apikey": "empty"
175
- },
176
- stream=True,
177
- temperature=0.7,
178
- top_p=1.0,
179
- )
180
 
181
-
182
-
183
- for response in responses:
184
- if not response.status_code == HTTPStatus.OK:
185
- raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
186
- response = response.output.choices[0].message.content
187
- response_text = []
188
- for ele in response:
189
- if 'text' in ele:
190
- response_text.append(ele['text'])
191
- elif 'box' in ele:
192
- response_text.append(ele['box'])
193
- response_text = ''.join(response_text)
194
- _chatbot[-1] = (_parse_text(chat_query), _remove_image_special(response_text))
195
- yield _chatbot
196
-
197
- if len(response) > 1:
198
- result_image = response[-1]['result_image']
199
- resp = requests.get(result_image)
200
- os.makedirs(uploaded_file_dir, exist_ok=True)
201
- name = f"tmp{secrets.token_hex(20)}.jpg"
202
- filename = os.path.join(uploaded_file_dir, name)
203
- with open(filename, 'wb') as f:
204
- f.write(resp.content)
205
- response = ''.join(r['box'] if 'box' in r else r['text'] for r in response[:-1])
206
- _chatbot.append((None, (filename,)))
207
- else:
208
- response = response[0]['text']
209
- _chatbot[-1] = (_parse_text(chat_query), response)
210
- full_response = _parse_text(response)
211
-
212
- task_history[-1] = (query, full_response)
213
- print("Qwen2.5-VL-Chat: " + _parse_text(full_response))
214
- yield _chatbot
215
-
216
-
217
- def regenerate(_chatbot, task_history):
218
- if not task_history:
219
- return _chatbot
220
- item = task_history[-1]
221
- if item[1] is None:
222
- return _chatbot
223
- task_history[-1] = (item[0], None)
224
- chatbot_item = _chatbot.pop(-1)
225
- if chatbot_item[0] is None:
226
- _chatbot[-1] = (_chatbot[-1][0], None)
227
- else:
228
- _chatbot.append((chatbot_item[0], None))
229
- _chatbot_gen = predict(_chatbot, task_history)
230
- for _chatbot in _chatbot_gen:
231
- yield _chatbot
232
-
233
- def add_text(history, task_history, text):
234
- task_text = text
235
- history = history if history is not None else []
236
- task_history = task_history if task_history is not None else []
237
- history = history + [(_parse_text(text), None)]
238
- task_history = task_history + [(task_text, None)]
239
- return history, task_history, ""
240
-
241
- def add_file(history, task_history, file):
242
- history = history if history is not None else []
243
- task_history = task_history if task_history is not None else []
244
- history = history + [((file.name,), None)]
245
- task_history = task_history + [((file.name,), None)]
246
- return history, task_history
247
-
248
- def reset_user_input():
249
- return gr.update(value="")
250
-
251
- def reset_state(task_history):
252
- task_history.clear()
253
- return []
254
-
255
- with gr.Blocks() as demo:
256
- gr.Markdown("""<center><font size=3> Qwen2.5-VL-32B-Instruct Demo </center>""")
257
-
258
- chatbot = gr.Chatbot(label='Qwen2.5-VL-32B-Instruct', elem_classes="control-height", height=500)
259
- query = gr.Textbox(lines=2, label='Input')
260
- task_history = gr.State([])
261
-
262
- with gr.Row():
263
- addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image", "video"])
264
- submit_btn = gr.Button("🚀 Submit (发送)")
265
- regen_btn = gr.Button("🤔️ Regenerate (重试)")
266
- empty_bin = gr.Button("🧹 Clear History (清除历史)")
267
-
268
- submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
269
- predict, [chatbot, task_history], [chatbot], show_progress=True
270
  )
271
- submit_btn.click(reset_user_input, [], [query])
272
- empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
273
- regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
274
- addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
275
-
276
 
277
- demo.queue(default_concurrency_limit=40).launch(
278
- share=args.share,
279
- # inbrowser=args.inbrowser,
280
- # server_port=args.server_port,
281
- # server_name=args.server_name,
282
- )
283
-
284
-
285
- def main():
286
- args = _get_args()
287
- _launch_demo(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
 
290
- if __name__ == '__main__':
291
- main()
 
49
  base_url=openai_api_base,
50
  )
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
+ def run_example(image, text_input=None):
55
+
56
+ image_path = array_to_image_path(image)
57
+ content = [
58
+ {
59
+ "type": "image_url",
60
+ "image_url": {
61
+ "url": f"data:image/jpeg;base64,{encode_image(image_path)}"
62
+ }
63
+ }
64
+ ]
65
+
66
+ # 可选文本内容
67
+ if text_input is not None and text_input.strip():
68
+ content.append(
69
+ {
70
+ "type": "text",
71
+ "text": text_input
72
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  )
 
 
 
 
 
74
 
75
+ messages = [
76
+ {
77
+ "role": "user",
78
+ "content": content
79
+ }
80
+ ]
81
+
82
+ response = client.chat.completions.create(
83
+ model="Qwen2_5VL",
84
+ messages=messages,
85
+ extra_body={},
86
+ extra_headers={
87
+ "apikey": "empty"
88
+ },
89
+ stream=False,
90
+ temperature=0.7,
91
+ top_p=1.0,
92
+ )
93
+
94
+ return response.choices[0].message.content
95
+
96
+
97
+
98
+ css = """
99
+ #output {
100
+ height: 500px;
101
+ overflow: auto;
102
+ border: 1px solid #ccc;
103
+ }
104
+ """
105
+
106
+ with gr.Blocks(css=css) as demo:
107
+ gr.Markdown("Test")
108
+ with gr.Tab(label="Input"):
109
+ with gr.Row():
110
+ with gr.Column():
111
+ input_img = gr.Image(label="Input Picture")
112
+ text_input = gr.Textbox(label="Text Prompt")
113
+ submit_btn = gr.Button(value="Submit")
114
+ with gr.Column():
115
+ output_text = gr.Textbox(label="Output Text")
116
+
117
+ submit_btn.click(run_example, [input_img, text_input], [output_text])
118
+ demo.queue()
119
+ demo.launch(share=True)
120
 
121