Spaces:
Sleeping
Sleeping
| # Reference: | |
| # https://github.com/li-plus/chatglm.cpp | |
| # https://github.com/li-plus/chatglm.cpp/blob/main/examples/web_demo.py | |
| import chatglm_cpp | |
| import gradio as gr | |
| import argparse | |
| from pathlib import Path | |
| pipeline = chatglm_cpp.Pipeline("./chatglm3-ggml.bin") | |
| max_length = 2048 | |
| top_p = 0.4 | |
| temp = 0.95 | |
| max_context_length=512 | |
| mode = "chat" | |
| top_k = 0 | |
| repeat_penalty = 1.0 | |
| threads = 0 | |
| def postprocess(text): | |
| #if args.plain: | |
| # return f"<pre>{text}</pre>" | |
| return text | |
| def predict(input, chatbot, max_length, top_p, temperature, messages): | |
| chatbot.append((postprocess(input), "")) | |
| messages.append(chatglm_cpp.ChatMessage(role="user", content=input)) | |
| generation_kwargs = dict( | |
| max_length=max_length, | |
| max_context_length=max_context_length, | |
| do_sample=temperature > 0, | |
| top_k=top_k, | |
| top_p=top_p, | |
| temperature=temperature, | |
| repetition_penalty=repeat_penalty, | |
| num_threads=threads, | |
| stream=True, | |
| ) | |
| response = "" | |
| if mode == "chat": | |
| chunks = [] | |
| for chunk in pipeline.chat(messages, **generation_kwargs): | |
| response += chunk.content | |
| chunks.append(chunk) | |
| chatbot[-1] = (chatbot[-1][0], postprocess(response)) | |
| yield chatbot, messages | |
| messages.append(pipeline.merge_streaming_messages(chunks)) | |
| else: | |
| for chunk in pipeline.generate(input, **generation_kwargs): | |
| response += chunk | |
| chatbot[-1] = (chatbot[-1][0], postprocess(response)) | |
| yield chatbot, messages | |
| yield chatbot, messages | |
| def reset_user_input(): | |
| return gr.update(value="") | |
| def reset_state(): | |
| return [], [] | |
| with gr.Blocks() as demo: | |
| gr.HTML("""<h1 align="center">ChatGLM3 Quantized by ChatGLM.cpp. Reduce size from 12G to 3.4G.</h1>""") | |
| chatbot = gr.Chatbot() | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8) | |
| submitBtn = gr.Button("Submit", variant="primary") | |
| with gr.Column(scale=1): | |
| max_length = gr.Slider(0, 2048, value=max_length, step=1.0, label="Maximum Length", interactive=True) | |
| top_p = gr.Slider(0, 1, value=top_p, step=0.01, label="Top P", interactive=True) | |
| temperature = gr.Slider(0, 1, value=temp, step=0.01, label="Temperature", interactive=True) | |
| emptyBtn = gr.Button("Clear History") | |
| messages = gr.State([]) | |
| submitBtn.click( | |
| predict, | |
| [user_input, chatbot, max_length, top_p, temperature, messages], | |
| [chatbot, messages], | |
| show_progress=True, | |
| ) | |
| submitBtn.click(reset_user_input, [], [user_input]) | |
| emptyBtn.click(reset_state, outputs=[chatbot, messages], show_progress=True) | |
| demo.queue().launch(share=False, inbrowser=True) |