| import gradio as gr |
| from llama_cpp import Llama |
|
|
| llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml") |
| |
| def generate(message, history,temperature=0.3,max_tokens=512): |
| system_prompt = """You are a super Inteligent AI assistant. |
| I want you to think smartly, step by step. |
| Once you've thought through things step by step, check the responses |
| before issuing them. I want you to answer clearly, accurately, |
| and without any unnecessary words. I want you to be concise and provide exact answers, |
| with known data, without making things up. You're called "Little Llama", |
| you're a language model that was compressed but you're still the smartest!""" |
| formatted_prompt = [{"role": "system", "content": system_prompt}] |
| for user_prompt, bot_response in history: |
| formatted_prompt.append({"role": "user", "content": user_prompt}) |
| formatted_prompt.append({"role": "assistant", "content": bot_response }) |
| formatted_prompt.append({"role": "user", "content": message}) |
| stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature, max_tokens=max_tokens, stream=True) |
| response = "" |
| for chunk in stream_response: |
| if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]: |
| response += chunk['choices'][0]["delta"]["content"] |
| yield response |
|
|
| mychatbot = gr.Chatbot( |
| avatar_images=["user.png", "botnb.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,) |
| |
| iface = gr.ChatInterface(fn=generate, chatbot=mychatbot, retry_btn=None, undo_btn=None) |
|
|
| with gr.Blocks() as demo: |
| gr.HTML("<center><h1>Llama 13b - GGUF Q_4_K_M</h1></center>") |
| iface.render() |
|
|
| demo.queue().launch(show_api=False, server_name="0.0.0.0") |
| |