Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import chatglm_cpp
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
model_file_path = "chatglm3-ggml_q4_0.bin"
|
| 6 |
+
chatglm_llm = chatglm_cpp.Pipeline(Path(model_file_path))
|
| 7 |
+
|
| 8 |
+
def predict(message, history):
|
| 9 |
+
'''
|
| 10 |
+
messages = []
|
| 11 |
+
for human_content, system_content in history:
|
| 12 |
+
message_human = {
|
| 13 |
+
"role": "user",
|
| 14 |
+
"content": human_content + "\n",
|
| 15 |
+
}
|
| 16 |
+
message_system = {
|
| 17 |
+
"role": "system",
|
| 18 |
+
"content": system_content + "\n",
|
| 19 |
+
}
|
| 20 |
+
messages.append(message_human)
|
| 21 |
+
messages.append(message_system)
|
| 22 |
+
message_human = {
|
| 23 |
+
"role": "user",
|
| 24 |
+
"content": message + "\n",
|
| 25 |
+
}
|
| 26 |
+
messages.append(message_human)
|
| 27 |
+
# Llamaでの回答を取得(ストリーミングオン)
|
| 28 |
+
streamer = llama.create_chat_completion(messages, stream=True)
|
| 29 |
+
'''
|
| 30 |
+
flatten_history = []
|
| 31 |
+
for a, b in history:
|
| 32 |
+
flatten_history.append(a)
|
| 33 |
+
flatten_history.append(b)
|
| 34 |
+
|
| 35 |
+
streamer = chatglm_llm.chat(
|
| 36 |
+
history= flatten_history + [message], do_sample=False,
|
| 37 |
+
stream = True
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
'''
|
| 41 |
+
partial_message = ""
|
| 42 |
+
for msg in streamer:
|
| 43 |
+
message = msg['choices'][0]['delta']
|
| 44 |
+
if 'content' in message:
|
| 45 |
+
partial_message += message['content']
|
| 46 |
+
yield partial_message
|
| 47 |
+
'''
|
| 48 |
+
response = ""
|
| 49 |
+
for new_text in streamer:
|
| 50 |
+
response += new_text
|
| 51 |
+
yield response
|
| 52 |
+
|
| 53 |
+
gr.ChatInterface(predict,
|
| 54 |
+
chatbot=gr.Chatbot(height=300),
|
| 55 |
+
textbox=gr.Textbox(placeholder="你好 人工智能助手 ChatGLM3,我可以问你一些问题吗?", container=False, scale=7),
|
| 56 |
+
title="ChatGLM3 Chatbot 🐼",
|
| 57 |
+
description="与人工智能助手 ChatGLM3 进行对话",
|
| 58 |
+
theme="soft",
|
| 59 |
+
examples=["你听说过马克思吗?", "如何进行经济建设?", "明朝内阁制度的特点是什么?",
|
| 60 |
+
"请解释下面的emoji符号描述的情景👨👩🔥❄️",
|
| 61 |
+
],
|
| 62 |
+
cache_examples=False,
|
| 63 |
+
retry_btn=None,
|
| 64 |
+
undo_btn="Delete Previous",
|
| 65 |
+
clear_btn="Clear",
|
| 66 |
+
).launch(enable_queue=True)
|