Spaces:
Runtime error
Runtime error
Commit
·
59ccd2c
1
Parent(s):
afcdd52
sahabat tai
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import
|
| 2 |
import torch
|
| 3 |
import transformers
|
| 4 |
|
|
@@ -17,40 +17,50 @@ terminators = [
|
|
| 17 |
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
| 18 |
]
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
# User input
|
| 30 |
-
user_input = st.chat_input("Type your message here...")
|
| 31 |
-
|
| 32 |
-
# Generate response
|
| 33 |
-
if user_input:
|
| 34 |
-
# Add user message to chat history
|
| 35 |
-
st.session_state.chat_history.append({"role": "user", "content": user_input})
|
| 36 |
-
|
| 37 |
-
# Prepare conversation context
|
| 38 |
-
conversation = [
|
| 39 |
-
{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_history
|
| 40 |
-
]
|
| 41 |
-
|
| 42 |
-
# Generate response using the pipeline
|
| 43 |
outputs = pipeline(
|
| 44 |
-
|
| 45 |
max_new_tokens=256,
|
| 46 |
eos_token_id=terminators,
|
| 47 |
)
|
| 48 |
-
|
| 49 |
# Extract and format the assistant's response
|
| 50 |
-
assistant_response = outputs[0]["generated_text"]
|
| 51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
#
|
| 54 |
-
|
| 55 |
-
with st.chat_message(message["role"]):
|
| 56 |
-
st.markdown(message["content"])
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
import torch
|
| 3 |
import transformers
|
| 4 |
|
|
|
|
| 17 |
pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
| 18 |
]
|
| 19 |
|
| 20 |
+
# Chatbot Functionality
|
| 21 |
+
def chatbot(messages):
|
| 22 |
+
"""
|
| 23 |
+
Handles user interactions and returns the model's response.
|
| 24 |
+
Args:
|
| 25 |
+
messages (list): List of messages with roles ('user' or 'assistant') and content.
|
| 26 |
|
| 27 |
+
Returns:
|
| 28 |
+
list: Updated conversation with the assistant's response.
|
| 29 |
+
"""
|
| 30 |
+
# Prepare the conversation for the model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
outputs = pipeline(
|
| 32 |
+
messages,
|
| 33 |
max_new_tokens=256,
|
| 34 |
eos_token_id=terminators,
|
| 35 |
)
|
|
|
|
| 36 |
# Extract and format the assistant's response
|
| 37 |
+
assistant_response = outputs[0]["generated_text"] if outputs else "I'm sorry, I couldn't generate a response."
|
| 38 |
+
messages.append({"role": "assistant", "content": assistant_response})
|
| 39 |
+
return messages
|
| 40 |
+
|
| 41 |
+
# Gradio Chat Interface
|
| 42 |
+
with gr.Blocks() as demo:
|
| 43 |
+
gr.Markdown("# 🤗 Gemma2 Chatbot")
|
| 44 |
+
gr.Markdown("A chatbot that understands Javanese and Sundanese, powered by `GoToCompany/gemma2`.")
|
| 45 |
+
|
| 46 |
+
chat_history = gr.Chatbot(label="Gemma2 Chatbot")
|
| 47 |
+
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
|
| 48 |
+
send_button = gr.Button("Send")
|
| 49 |
+
|
| 50 |
+
def respond(chat_history, user_message):
|
| 51 |
+
# Add user message to chat history
|
| 52 |
+
chat_history.append(("user", user_message))
|
| 53 |
+
|
| 54 |
+
# Generate assistant's response
|
| 55 |
+
conversation = [{"role": role, "content": content} for role, content in chat_history]
|
| 56 |
+
response = chatbot(conversation)
|
| 57 |
+
|
| 58 |
+
# Add assistant's response to chat history
|
| 59 |
+
assistant_message = response[-1]["content"]
|
| 60 |
+
chat_history.append(("assistant", assistant_message))
|
| 61 |
+
return chat_history, ""
|
| 62 |
+
|
| 63 |
+
send_button.click(respond, inputs=[chat_history, user_input], outputs=[chat_history, user_input])
|
| 64 |
|
| 65 |
+
# Launch the app
|
| 66 |
+
demo.launch()
|
|
|
|
|
|