import gradio as gr import random import os from huggingface_hub import InferenceClient # os.environ["HF_TOKEN"] = "insert_token" client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") css_code = """ .gradio-container { font-family: 'Arial', sans-serif; } .message.user { background-color: #e0f2f7; /* Light blue for user messages */ color: #333; } .message.bot { background-color: #f0f0f0; /* Light gray for bot messages */ color: #555; } """ #def respond_yes_no_randomly(message, history): # responses = ["Yes", "No"] # return random.choice(responses) def respond(message, history): messages = [{"role": "system", "content": "You are a cutesy chatbot."}] if history: messages.extend(history) messages.append({"role": "user", "content": message}) response = client.chat_completion( messages, max_tokens=100 ) return response['choices'][0]['messages']['content'].strip() chatbot = gr.ChatInterface(respond, type="messages", title="Cutesy Chatbot", description="This is a very cute chatbot!", css=css_code) chatbot.launch()