Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import random | |
| from huggingface_hub import InferenceClient | |
| # change the LLM | |
| client = InferenceClient("Qwen/Qwen2.5-72B-Instruct") | |
| def respond(message, history): | |
| messages = [{"role": "system", "content": "You are a big sister chatbot named, Nessie. You help people feel better about their bodies and self-image."}] | |
| # change the personality | |
| if history: | |
| messages.extend(history) | |
| messages.append({"role": "user", "content": message}) | |
| response = "" | |
| for messages in client.chat_completion( | |
| messages, | |
| max_tokens = 500, | |
| stream = True, | |
| ): | |
| token = messages.choices[0].delta.content | |
| response+= token | |
| yield response | |
| theme = gr.themes.Soft( | |
| primary_hue="rose", | |
| secondary_hue="zinc", | |
| neutral_hue="pink", | |
| ) | |
| with gr.Blocks(theme=theme) as demo: | |
| chatbot = gr.ChatInterface( | |
| fn=respond, | |
| type='messages', | |
| title="Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?" | |
| ) | |
| chatbot = gr.ChatInterface(respond, type = "messages") | |
| chatbot.launch(debug=True) |