Spaces:
Sleeping
Sleeping
File size: 1,540 Bytes
843865c df27ea5 4092c47 83d4d88 843865c d808ffe a52c23f 33b4aee 23420e0 4092c47 a7aa1b1 4092c47 b83a61f 502b775 d115d34 0e1ac36 502b775 930c92e b83a61f 96a3c19 9f4317e 502b775 9f4317e 4092c47 227b64f b3aa255 70e60de 82b0a8f 843865c 688a522 843865c eda9ed0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
from gradio.themes.utils import colors
theme = gr.themes.Default(
primary_hue=gr.themes.Color(
c100="#ffedd5", c200="#fed7aa", c300="#ffe09e", c400="#c2814c",
c50="#fff8f0", c500="#f97316", c600="#ea580c", c700="#c2410c",
c800="#9a3412", c900="#7c2d12", c950="#611f00"
)
)
client = InferenceClient ("HuggingFaceH4/zephyr-7b-beta") #change the LLM
def respond(message, history):
messages = [{"role" : "system", "content" : "You are a chatbot who helps with mental health"}] #change personality
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 100, #change length
stream =True
):
token = message.choices[0].delta.content
response += token
print(type(message))
print(response)
yield response
#print(response["choices"][0]["message"]["content"].strip())
def random_message(message, history):
choices = ["try again later", "isn't looking good for you", "perhaps", "maybe", "doubtful", "yes", "no", "without a doubt", "most likely", "signs point to yes", "it is certain", "my sources say no", "reply hazy, try again"]
random_choices = random.choice(choices)
return random_choices
chatbot = gr.ChatInterface(respond, type = "messages", theme=theme)
chatbot.launch(debug=True) |