File size: 2,133 Bytes
917601d
06caece
74c9bed
8e2859c
1f4abcb
 
d2b430b
 
16ce850
1f4abcb
74c9bed
1f4abcb
 
74c9bed
 
1f4abcb
 
 
06caece
1f4abcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06caece
1f4abcb
d2b430b
342a40c
1f4abcb
74d6030
06caece
74d6030
1f4abcb
 
 
 
 
 
c13009b
 
1f4abcb
c13009b
1f4abcb
 
 
c13009b
 
1f4abcb
 
d2b430b
1c1f440
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# Load lightweight DialoGPT
model_id = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)

# Persona
PERSONA = """
[System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a fun, smooth, emotionally intelligent AI.
You speak like a real person, not a robot. Keep it under 15 words. 😊😏]
"""

# Format history
def format_context(history):
    context = PERSONA + "\n"
    for user, bot in history[-3:]:
        context += f"You: {user}\n"
        context += f"𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n"
    return context

# Humanize response
def enhance_response(resp, message):
    if any(x in message for x in ["?", "think", "why"]):
        resp += " πŸ€”"
    elif any(x in resp.lower() for x in ["cool", "great", "love", "fun"]):
        resp += " 😏"
    return " ".join(resp.split()[:15])  # Limit to 15 words

# Generate AI reply
def chat(user_input, history):
    context = format_context(history) + f"You: {user_input}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
    inputs = tokenizer.encode(context, return_tensors="pt", truncation=True, max_length=1024)
    
    outputs = model.generate(
        inputs,
        max_new_tokens=50,
        temperature=0.9,
        top_k=40,
        do_sample=True,
        pad_token_id=tokenizer.eos_token_id
    )

    full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    response = full_text.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].split("\nYou:")[0].strip()
    response = enhance_response(response, user_input)
    
    history.append((user_input, response))
    return history, history

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’\n*Smooth β€’ Chill β€’ Emotional*")
    chatbot = gr.Chatbot(label="Chat").style(height=400)
    msg = gr.Textbox(placeholder="Type something…", show_label=False)
    state = gr.State([])

    msg.submit(chat, [msg, state], [chatbot, state])
    gr.Button("Reset").click(lambda: ([], []), None, [chatbot, state])

demo.launch()