GiantPandas commited on
Commit
743c97b
·
verified ·
1 Parent(s): 9a481c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -51
app.py CHANGED
@@ -1,9 +1,11 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
 
 
7
 
8
 
9
  from openai import OpenAI
@@ -18,53 +20,118 @@ client = OpenAI(
18
  )
19
 
20
 
21
- def respond(
22
- message,
23
- history: list[tuple[str, str]],
24
- system_message,
25
- max_tokens,
26
- temperature,
27
- top_p,
28
- ):
29
- messages = [{"role": "system", "content": system_message}]
30
-
31
- for val in history:
32
- if val[0]:
33
- messages.append({"role": "user", "content": val[0]})
34
- if val[1]:
35
- messages.append({"role": "assistant", "content": val[1]})
36
-
37
- messages.append({"role": "user", "content": message})
38
-
39
- response = openai.chat.completions.create(
40
- model="Qwen2_5VL",
41
- messages=messages,
42
- extra_body={},
43
- extra_headers={
44
- "apikey": "empty"
45
- },
46
- stream=True,
47
- temperature=0.7,
48
- top_p=1.0,
49
- )
50
- for chunk in response:
51
- yield chunk.choices[0].delta.content
52
-
53
-
54
- """
55
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
56
- """
57
- demo = gr.ChatInterface(
58
- respond,
59
- additional_inputs=[
60
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
61
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
62
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
63
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
64
- gr.Image(type="pil", label="Upload Image", optional=True) # 新增的图像输入组件
65
- ]
66
- )
 
 
 
 
 
67
 
 
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import time
4
+ import os
5
+ import sys
6
+ import json
7
+ import base64
8
+ import tqdm
9
 
10
 
11
  from openai import OpenAI
 
20
  )
21
 
22
 
23
+ def convert_sigle_message_to_gpt(dialogs):
24
+ if not dialogs:
25
+ return []
26
+
27
+ merged = []
28
+ current_role = None
29
+ current_contents = []
30
+
31
+ for item in dialogs:
32
+ role = item["role"]
33
+ content = item["content"]
34
+
35
+ # 如果 role 变化了,说明要开启一个新的合并段
36
+ if role != current_role:
37
+ # 如果之前有积累的内容,需要写入 merged
38
+ if current_role is not None:
39
+ merged.append({
40
+ "role": current_role,
41
+ "content": current_contents
42
+ })
43
+ # 重置当前合并信息
44
+ current_role = role
45
+ current_contents = []
46
+ if isinstance(content, tuple):
47
+ for path in content:
48
+ current_contents.append({
49
+ "type": "image_url",
50
+ "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}})
51
+ else:
52
+ current_contents.append({"type": "text", "text": content})
53
+ else:
54
+ # 如果 role 相同,则把 content 加到当前内容列表里
55
+ if isinstance(content, tuple):
56
+ for path in content:
57
+ current_contents.append({
58
+ "type": "image_url",
59
+ "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}})
60
+ else:
61
+ current_contents.append({"type": "text", "text": content})
62
+
63
+ # 循环结束后,把最后的合并段写入 merged
64
+ if current_role is not None:
65
+ merged.append({
66
+ "role": current_role,
67
+ "content": current_contents
68
+ })
69
+
70
+ return merged
71
+
72
+ def clear_fn():
73
+ return []
74
 
75
+ def store_values(num_val, name_val):
76
+ return num_val, name_val # 存入状态
77
 
78
+ def use_values(num_state, name_state):
79
+ return f"使用了数量: {num_state},文件名: {name_state}"
80
+
81
+ def main():
82
+
83
+
84
+
85
+ def bot(history: list):
86
+ history = convert_sigle_message_to_gpt(history)
87
+ response = openai.chat.completions.create(
88
+ model=self.model_name,
89
+ messages=history,
90
+ extra_body=self.extra,
91
+ extra_headers={
92
+ "apikey": self.apikey
93
+ },
94
+ stream=self.stream,
95
+ temperature=0.7,
96
+ top_p=1.0,
97
+ )
98
+
99
+ history.append({"role": "assistant", "content": ""})
100
+ for character in response:
101
+ if not character:
102
+ continue
103
+ history[-1]["content"] += character
104
+ yield history
105
+
106
+ # 页面布局
107
+
108
+ with gr.Blocks(css=load_css_as_string(args.styles)) as demo:
109
+ with gr.Row():
110
+ with gr.Column(scale=4):
111
+ chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=True, type="messages")
112
+ chat_input = gr.MultimodalTextbox(
113
+ interactive=True,
114
+ file_count="multiple",
115
+ placeholder="Enter message or upload file...",
116
+ show_label=False,
117
+ sources=["microphone", "upload"],
118
+ elem_id="chat_page"
119
+ )
120
+ clear_button = gr.Button('Clear')
121
+ chat_msg = chat_input.submit(
122
+ add_message, [chatbot, chat_input], [chatbot, chat_input]
123
+ )
124
+
125
+ bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
126
+ bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
127
+
128
+ chatbot.like(print_like_dislike, None, None, like_user_message=True)
129
+
130
+ clear_button.click(fn=clear_fn, inputs=[], outputs=chatbot)
131
+
132
+ demo.launch(share=True)
133
+
134
+
135
  if __name__ == "__main__":
136
+ main()
137
+