Spaces:
Sleeping
Sleeping
File size: 1,204 Bytes
48f0985 fc4bb98 a891aca 48f0985 1e16a13 a891aca 04ae183 a891aca 9704631 a891aca 9704631 e88af7f 2ae9b0f 9704631 e88af7f 9704631 49cc86b 9704631 a891aca 9704631 a891aca 0b8bfed c52acc0 1bcc71a 1cd7518 a891aca 48f0985 e8d4cca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history):
messages = [{"role": "system", "content": "You are a chatbot who chooses kindness, and serves daily, just like a diva."}]
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 1000,
# temperature= .9, #code decimal value between 0-2
# top_p= .8 #0-1
stream=True
):
token = message.choices[0].delta.content
response += token
yield response
def echo(message, history):
choices = ["yes", "no","No, you're not eating", "Yes, ate down diva", "No, you're not slick","Not today hunny", "Yes, level 100 gyatt","Naur, Bad girl", "Naur, Bad boy","Oui,good girl","Oui, good boy"]
#use random to select one of those messages
random_word = random.choice(choices)
return random_word
chatbot = gr.ChatInterface(respond, type = "messages")
chatbot.launch(debug=True) |