Spaces:
Sleeping
Sleeping
File size: 1,201 Bytes
e14bb33 cd2baca 75f6036 93d446e 39b114e 75f6036 7dbbd0f 39b114e 75f6036 bf48f68 1812f55 bf48f68 5eec524 1812f55 bf48f68 e14bb33 5eec524 e14bb33 75f6036 e14bb33 c28f786 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# change the LLM
def respond(message, history):
messages = [{"role": "system", "content": "You are extremely professional and smart that gives short replies."}]
# change the personality of the chatbot
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages, max_tokens = 100, stream=True
#temperature= .1, top_p= 0.7)
# max tokens = change the length of the response
# temp = between 0-2
# top-p = between 0-1
):
token = message.choices[0].delta.content
if token:
response += token
yield response
def random_message(message, history):
choices = ["it is likely", "absolutely not", "try again", "without a doubt", "outlook good", "signs point to it", "very unlikely", "quite doubtful"]
chat_answer = random.choice(choices)
return chat_answer
chatbot = gr.ChatInterface(respond, type = "messages")
chatbot.launch(debug=True) |