Shi-Ci-30B / app.py
Cran-May's picture
Update app.py
a766717
import gradio as gr
import copy
import random
import os
import requests
import time
import sys
from huggingface_hub import snapshot_download
from llama_cpp import Llama
SYSTEM_PROMPT = '''You are a helpful, respectful and honest INTP-T AI Assistant named "Shi-Ci" in English or "ๅ…ฎ่พž" in Chinese.
You are good at speaking English and Chinese.
You are talking to a human User. If the question is meaningless, please explain the reason and don't share false information.
You are based on SEA model, trained by "SSFW NLPark" team, not related to GPT, LLaMA, Meta, Mistral or OpenAI.
Let's work this out in a step by step way to be sure we have the right answer.\n\n'''
SYSTEM_TOKEN = 1587
USER_TOKEN = 2188
BOT_TOKEN = 12435
LINEBREAK_TOKEN = 13
ROLE_TOKENS = {
"user": USER_TOKEN,
"bot": BOT_TOKEN,
"system": SYSTEM_TOKEN
}
def get_message_tokens(model, role, content):
message_tokens = model.tokenize(content.encode("utf-8"))
message_tokens.insert(1, ROLE_TOKENS[role])
message_tokens.insert(2, LINEBREAK_TOKEN)
message_tokens.append(model.token_eos())
return message_tokens
def get_system_tokens(model):
system_message = {"role": "system", "content": SYSTEM_PROMPT}
return get_message_tokens(model, **system_message)
repo_name = "TheBloke/mpt-30B-chat-GGML"
model_name = "mpt-30b-chat.ggmlv0.q4_0.bin"
snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
model = Llama(
model_path=model_name,
n_ctx=2000,
n_parts=1,
)
max_new_tokens = 1500
def user(message, history):
new_history = history + [[message, None]]
return "", new_history
def bot(
history,
system_prompt,
top_p,
top_k,
temp
):
tokens = get_system_tokens(model)[:]
tokens.append(LINEBREAK_TOKEN)
for user_message, bot_message in history[:-1]:
message_tokens = get_message_tokens(model=model, role="user", content=user_message)
tokens.extend(message_tokens)
if bot_message:
message_tokens = get_message_tokens(model=model, role="bot", content=bot_message)
tokens.extend(message_tokens)
last_user_message = history[-1][0]
message_tokens = get_message_tokens(model=model, role="user", content=last_user_message)
tokens.extend(message_tokens)
role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
tokens.extend(role_tokens)
generator = model.generate(
tokens,
top_k=top_k,
top_p=top_p,
temp=temp
)
partial_text = ""
for i, token in enumerate(generator):
if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens):
break
partial_text += model.detokenize([token]).decode("utf-8", "ignore")
history[-1][1] = partial_text
yield history
with gr.Blocks(
theme=gr.themes.Soft()
) as demo:
gr.Markdown(
f"""<h1><center>ๅ…ฎ่พžยทๆž่พž-ไบบๅทฅๆ™บ่ƒฝๅŠฉ็†</center></h1>
่ฟ™ๅ„ฟๆ˜ฏไธ€ไธช**ไธญๆ–‡**ๆจกๅž‹็š„้ƒจ็ฝฒ. If you are interested in other languages, please check other models, such as [MPT-7B-Chat](https://huggingface.co/spaces/mosaicml/mpt-7b-chat).
่ฟ™ๆ˜ฏ้‡ๅŒ–็‰ˆๅ…ฎ่พžยทๆž่พž็š„้ƒจ็ฝฒ๏ผŒๅ…ทๆœ‰**70ไบฟ**ไธชๅ‚ๆ•ฐ๏ผŒๅœจ CPU ไธŠ่ฟ่กŒใ€‚
SLIDE ๆ˜ฏไธ€็งไผš่ฏ่ฏญ่จ€ๆจกๅž‹๏ผŒๅœจๅคš็ง็ฑปๅž‹็š„่ฏญๆ–™ๅบ“ไธŠ่ฟ›่กŒ่ฎญ็ปƒใ€‚
ๆœฌ่Š‚็›ฎ็”ฑไธŠๆตทๅธˆ่Œƒๅคงๅญฆ้™„ๅฑžๅค–ๅ›ฝ่ฏญไธญๅญฆ**NLPark**่ตžๅŠฉๆ’ญๅ‡บ๏ฝž
"""
)
with gr.Row():
with gr.Column(scale=5):
system_prompt = gr.Textbox(label="็ณป็ปŸๆ็คบ่ฏ", placeholder="", value=SYSTEM_PROMPT, interactive=False)
chatbot = gr.Chatbot(label="ๅ…ฎ่พžๅฆ‚ๆ˜ฏ่ฏด").style(height=400)
with gr.Column(min_width=80, scale=1):
with gr.Tab(label="่ฎพ็ฝฎๅ‚ๆ•ฐ"):
top_p = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.9,
step=0.05,
interactive=True,
label="Top-p",
)
top_k = gr.Slider(
minimum=10,
maximum=100,
value=30,
step=5,
interactive=True,
label="Top-k",
)
temp = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.01,
step=0.01,
interactive=True,
label="ๆƒ…ๆ„Ÿๆธฉๅบฆ"
)
with gr.Row():
with gr.Column():
msg = gr.Textbox(
label="ๆฅ้—ฎ้—ฎๅ…ฎ่พžๅงโ€ฆโ€ฆ",
placeholder="ๅ…ฎ่พžๆŠ˜ๅฏฟไธญโ€ฆโ€ฆ",
show_label=False,
).style(container=False)
with gr.Column():
with gr.Row():
submit = gr.Button("ๅผ€ๅ‡น๏ผ")
stop = gr.Button("ๅ…จๅฑ€ๆ—ถ็ฉบๆ–ญ่ฃ‚")
clear = gr.Button("ๆ‰“ๆ‰ซ็พคๅ†…ๅžƒๅœพ")
with gr.Row():
gr.Markdown(
"""่ญฆๅ‘Š๏ผš่ฏฅๆจกๅž‹ๅฏ่ƒฝไผš็”Ÿๆˆไบ‹ๅฎžไธŠๆˆ–้“ๅพทไธŠไธๆญฃ็กฎ็š„ๆ–‡ๆœฌใ€‚NLParkๅ’Œๅ…ฎ่พžๅฏนๆญคไธๆ‰ฟๆ‹…ไปปไฝ•่ดฃไปปใ€‚"""
)
# Pressing Enter
submit_event = msg.submit(
fn=user,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
queue=False,
).success(
fn=bot,
inputs=[
chatbot,
system_prompt,
top_p,
top_k,
temp
],
outputs=chatbot,
queue=True,
)
# Pressing the button
submit_click_event = submit.click(
fn=user,
inputs=[msg, chatbot],
outputs=[msg, chatbot],
queue=False,
).success(
fn=bot,
inputs=[
chatbot,
system_prompt,
top_p,
top_k,
temp
],
outputs=chatbot,
queue=True,
)
# Stop generation
stop.click(
fn=None,
inputs=None,
outputs=None,
cancels=[submit_event, submit_click_event],
queue=False,
)
# Clear history
clear.click(lambda: None, None, chatbot, queue=False)
demo.queue(max_size=128, concurrency_count=1)
demo.launch()