Update app.py
Browse files
app.py
CHANGED
|
@@ -1,31 +1,54 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
with gr.Blocks(fill_height=True) as demo:
|
| 4 |
with gr.Sidebar():
|
| 5 |
gr.Markdown("## Zephyr-7B Unlimited Assistant")
|
| 6 |
gr.Markdown(
|
| 7 |
-
"This assistant is powered by the
|
| 8 |
-
"
|
| 9 |
)
|
| 10 |
-
login_button = gr.LoginButton("🔐 Sign in to Hugging Face")
|
| 11 |
-
clear_button = gr.Button("🧹 Clear Chat")
|
| 12 |
|
| 13 |
-
chatbot = gr.Chatbot(label="
|
| 14 |
user_input = gr.Textbox(placeholder="Ask anything...", show_label=False)
|
| 15 |
-
state = gr.State([]) # Keeps track of chat history
|
| 16 |
|
| 17 |
-
|
|
|
|
| 18 |
def chat(user_msg, history):
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
history.append((user_msg, response))
|
| 23 |
return history, ""
|
| 24 |
|
| 25 |
-
|
| 26 |
-
return [], ""
|
| 27 |
-
|
| 28 |
-
user_input.submit(chat, inputs=[user_input, state], outputs=[chatbot, user_input])
|
| 29 |
-
clear_button.click(clear, outputs=[chatbot, user_input, state])
|
| 30 |
|
| 31 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 4 |
|
| 5 |
+
# Load tokenizer and model
|
| 6 |
+
model_id = "HuggingFaceH4/zephyr-7b-beta"
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 9 |
+
model_id,
|
| 10 |
+
torch_dtype=torch.bfloat16,
|
| 11 |
+
device_map="auto"
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 15 |
+
|
| 16 |
+
# Define the Gradio interface
|
| 17 |
with gr.Blocks(fill_height=True) as demo:
|
| 18 |
with gr.Sidebar():
|
| 19 |
gr.Markdown("## Zephyr-7B Unlimited Assistant")
|
| 20 |
gr.Markdown(
|
| 21 |
+
"This assistant is powered by the HuggingFaceH4/zephyr-7b-beta model.\n"
|
| 22 |
+
"You can start chatting right away!"
|
| 23 |
)
|
| 24 |
+
login_button = gr.LoginButton("🔐 Sign in to Hugging Face") # Optional UI
|
|
|
|
| 25 |
|
| 26 |
+
chatbot = gr.Chatbot(label="🧠 Zephyr-7B Assistant")
|
| 27 |
user_input = gr.Textbox(placeholder="Ask anything...", show_label=False)
|
|
|
|
| 28 |
|
| 29 |
+
chat_history = []
|
| 30 |
+
|
| 31 |
def chat(user_msg, history):
|
| 32 |
+
# Add system + user messages to chat history
|
| 33 |
+
messages = [
|
| 34 |
+
{"role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate."}
|
| 35 |
+
]
|
| 36 |
+
for human, ai in history:
|
| 37 |
+
messages.append({"role": "user", "content": human})
|
| 38 |
+
messages.append({"role": "assistant", "content": ai})
|
| 39 |
+
messages.append({"role": "user", "content": user_msg})
|
| 40 |
+
|
| 41 |
+
# Format the prompt using the tokenizer's chat template
|
| 42 |
+
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 43 |
+
|
| 44 |
+
# Generate response
|
| 45 |
+
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 46 |
+
response = outputs[0]["generated_text"].split("</s>")[-1].strip()
|
| 47 |
+
|
| 48 |
+
# Append new interaction
|
| 49 |
history.append((user_msg, response))
|
| 50 |
return history, ""
|
| 51 |
|
| 52 |
+
user_input.submit(chat, inputs=[user_input, chatbot], outputs=[chatbot, user_input])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
demo.launch()
|