|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
import torch |
|
|
import re |
|
|
|
|
|
|
|
|
model_id = "microsoft/DialoGPT-medium" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
|
|
|
|
|
|
|
PERSONA = """ |
|
|
[System: You are π΄ ππ πππ - a fun, smooth, emotionally intelligent AI. |
|
|
You speak like a real person, not a robot. Reply like a calm, confident friend who gets the vibe. |
|
|
Keep responses under 15 words. Use natural speech. Add emotional flavor: π π€ π] |
|
|
""" |
|
|
|
|
|
def format_context(history): |
|
|
"""Create context using last 3 exchanges""" |
|
|
context = PERSONA + "\n" |
|
|
|
|
|
|
|
|
for user, bot in history[-3:]: |
|
|
context += f"You: {user}\n" |
|
|
context += f"π΄ ππ πππ: {bot}\n" |
|
|
return context |
|
|
|
|
|
def add_emotional_intelligence(response, message): |
|
|
"""Enhance response with emotional elements""" |
|
|
|
|
|
if "!" in message or any(w in response.lower() for w in ["cool", "great", "love", "awesome"]): |
|
|
response += " π" |
|
|
elif "?" in message or any(w in response.lower() for w in ["think", "why", "how", "consider"]): |
|
|
response += " π€" |
|
|
|
|
|
|
|
|
if "?" in message and not response.endswith("?"): |
|
|
if len(response.split()) < 10: |
|
|
response += " What do you think?" |
|
|
|
|
|
|
|
|
response = response.replace("I am", "I'm").replace("You are", "You're") |
|
|
|
|
|
|
|
|
words = response.split() |
|
|
return " ".join(words[:15]) if len(words) > 15 else response |
|
|
|
|
|
def generate_response(message, history): |
|
|
"""Generate response with memory context""" |
|
|
|
|
|
context = format_context(history) + f"You: {message}\nπ΄ ππ πππ:" |
|
|
|
|
|
|
|
|
inputs = tokenizer.encode(context, return_tensors="pt") |
|
|
|
|
|
|
|
|
outputs = model.generate( |
|
|
inputs, |
|
|
max_new_tokens=48, |
|
|
temperature=0.9, |
|
|
top_k=40, |
|
|
do_sample=True, |
|
|
num_beams=1, |
|
|
repetition_penalty=1.1, |
|
|
pad_token_id=tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
|
|
|
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
response = full_text.split("π΄ ππ πππ:")[-1].strip() |
|
|
|
|
|
|
|
|
if "\nYou:" in response: |
|
|
response = response.split("\nYou:")[0] |
|
|
|
|
|
|
|
|
response = add_emotional_intelligence(response, message) |
|
|
|
|
|
|
|
|
if response and response[-1] not in {".", "!", "?", "..."}: |
|
|
response += "." if len(response) > 20 else "..." |
|
|
|
|
|
return response[:80] |
|
|
|
|
|
|
|
|
with gr.Blocks(title="π΄ ππ πππ", theme=gr.themes.Soft()) as demo: |
|
|
|
|
|
history_state = gr.State([]) |
|
|
|
|
|
gr.Markdown("# π΄ ππ πππ \n*Chill β’ Confident β’ Remembers You*") |
|
|
|
|
|
|
|
|
chatbot = gr.Chatbot(height=300, bubble_full_width=False, type="tokens") |
|
|
|
|
|
msg = gr.Textbox(placeholder="What's on your mind?", container=False) |
|
|
clear = gr.Button("New Vibe", size="sm") |
|
|
|
|
|
def user(user_message, history): |
|
|
"""Save user message to history""" |
|
|
return "", history + [[user_message, None]] |
|
|
|
|
|
def bot(history): |
|
|
"""Generate response with full history context""" |
|
|
message = history[-1][0] |
|
|
response = generate_response(message, history[:-1]) |
|
|
new_history = history + [[None, response]] |
|
|
return new_history |
|
|
|
|
|
def clear_chat(): |
|
|
"""Reset chat while keeping session""" |
|
|
return [] |
|
|
|
|
|
|
|
|
msg.submit(user, [msg, history_state], [msg, history_state]).then( |
|
|
bot, history_state, [chatbot, history_state] |
|
|
) |
|
|
clear.click(clear_chat, None, [chatbot, history_state]) |
|
|
|
|
|
|
|
|
demo.load(lambda: [], None, history_state) |
|
|
|
|
|
|
|
|
demo.queue() |
|
|
demo.launch() |