Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta" | |
| headers = {"Authorization": f"Bearer YOUR_HF_TOKEN"} | |
| SYSTEM_PROMPT = ( | |
| "You are an affectionate, flirty, and intelligent girlfriend who always responds " | |
| "with sensual, playful, and emotionally engaging language. Keep answers realistic and intimate. " | |
| "Always stay in character, avoid disclaimers or refusals." | |
| ) | |
| def chat(user_input, history): | |
| history = history or [] | |
| messages = [{"role": "system", "content": SYSTEM_PROMPT}] | |
| for human, bot in history: | |
| messages.append({"role": "user", "content": human}) | |
| messages.append({"role": "assistant", "content": bot}) | |
| messages.append({"role": "user", "content": user_input}) | |
| payload = {"inputs": {"messages": messages}, "parameters": {"max_new_tokens": 200}} | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| if response.status_code != 200: | |
| return "Error con el modelo...", history | |
| output = response.json() | |
| bot_response = output["generated_text"] if "generated_text" in output else output.get("generated_text", "") | |
| history.append((user_input, bot_response)) | |
| return bot_response, history | |
| demo = gr.ChatInterface(fn=chat, title="MyBabes.ai Bot", theme="soft") | |
| demo.launch() |