Capstone2.0 / app.py
hagardner18's picture
increased max tokens to 512
f4777e7 verified
raw
history blame contribute delete
763 Bytes
import gradio as gr
from huggingface_hub import InferenceClient
#create instance of InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
#respond function
def respond (message, history):
messages = [{"role": "system", "content": "You are a friendly color theory chatbot."}]
#include conversation history
if history:
messages.extend(history)
#add current user's message
messages.append({"role":"user", "content": message})
#chat completion call
response = client.chat_completion(
messages,
max_tokens=512
)
#extract and return chatbots response
return response['choices'][0]['message']['content'].strip()
chatbot = gr.ChatInterface(respond, type="messages")
chatbot.launch(debug=True)