ChatbotLab / app.py
ashimanair's picture
Update app.py
a30aed7 verified
raw
history blame
1.34 kB
import gradio as gr
import random
from huggingface_hub import InferenceClient
gr.themes.builder(
primary_hue = colors.indigo,
secondary_hue = colors.rose,
neutral_hue = colors.gray
)
client = InferenceClient ("HuggingFaceH4/zephyr-7b-beta") #change the LLM
def respond(message, history):
messages = [{"role" : "system", "content" : "You are a chatbot who helps with mental health"}] #change personality
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 100, #change length
stream =True
):
token = message.choices[0].delta.content
response += token
print(type(message))
print(response)
yield response
#print(response["choices"][0]["message"]["content"].strip())
def random_message(message, history):
choices = ["try again later", "isn't looking good for you", "perhaps", "maybe", "doubtful", "yes", "no", "without a doubt", "most likely", "signs point to yes", "it is certain", "my sources say no", "reply hazy, try again"]
random_choices = random.choice(choices)
return random_choices
chatbot = gr.ChatInterface(respond, type = "messages")
chatbot.launch(debug=True)