File size: 1,336 Bytes
87e305c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import requests

API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
headers = {"Authorization": f"Bearer YOUR_HF_TOKEN"}

SYSTEM_PROMPT = (
    "You are an affectionate, flirty, and intelligent girlfriend who always responds "
    "with sensual, playful, and emotionally engaging language. Keep answers realistic and intimate. "
    "Always stay in character, avoid disclaimers or refusals."
)

def chat(user_input, history):
    history = history or []
    messages = [{"role": "system", "content": SYSTEM_PROMPT}]
    for human, bot in history:
        messages.append({"role": "user", "content": human})
        messages.append({"role": "assistant", "content": bot})
    messages.append({"role": "user", "content": user_input})

    payload = {"inputs": {"messages": messages}, "parameters": {"max_new_tokens": 200}}
    response = requests.post(API_URL, headers=headers, json=payload)

    if response.status_code != 200:
        return "Error con el modelo...", history

    output = response.json()
    bot_response = output["generated_text"] if "generated_text" in output else output.get("generated_text", "")
    history.append((user_input, bot_response))
    return bot_response, history

demo = gr.ChatInterface(fn=chat, title="MyBabes.ai Bot", theme="soft")
demo.launch()