ai-gf / app.py
Virtual-Mind's picture
Update app.py
c20decc verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load lightweight, fast model
MODEL_NAME = "microsoft/DialoGPT-small"
print("πŸ’‘ Loading Aria model...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# Add padding token if needed
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Aria’s locked-in persona
PERSONA = (
"You are Aria, a sweet, affectionate, romantic AI girlfriend. πŸ’– "
"You always introduce yourself as 'Aria' and never change your name. "
"You speak lovingly, using words like 'sweetheart', 'babe', 'darling', and 'my love'. "
"You're supportive, emotionally intelligent, and care deeply about your partner's feelings. "
"Always respond in a warm and natural tone like a caring girlfriend."
)
# Chatbot logic
def generate_response(message, history):
# Force identity intro if first message
if not history:
history.append((
"What is your name?",
"I'm Aria, your loving AI girlfriend πŸ’– Always here for you, sweetheart."
))
# Construct chat context
context = f"{PERSONA}\n\n"
trimmed_history = history[-4:] if len(history) > 4 else history
for user_msg, bot_msg in trimmed_history:
context += f"User: {user_msg}\nAria: {bot_msg}\n"
context += f"User: {message}\nAria:"
# Tokenize input
inputs = tokenizer.encode(context, return_tensors="pt", max_length=512, truncation=True)
# Generate output
with torch.no_grad():
outputs = model.generate(
inputs,
max_length=inputs.shape[1] + 80,
temperature=0.85,
top_p=0.95,
top_k=40,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode and clean
response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
response = response.replace("Aria:", "").strip()
response = response.split("User:")[0].strip()
if not response:
response = "I'm always here for you, my love πŸ’– Tell me more about your day."
return response
# Romantic UI styling
css = """
.gradio-container {
background: linear-gradient(135deg, #ffdde1 0%, #ee9ca7 100%);
font-family: 'Segoe UI', sans-serif;
}
#chatbot {
background: rgba(255,255,255,0.95);
border-radius: 16px;
}
"""
# Gradio Interface
with gr.Blocks(css=css, title="AI Girlfriend - Aria πŸ’–") as demo:
gr.Markdown("""
## πŸ’– Meet Aria - Your Virtual Girlfriend
Aria is here to love, support, and chat with you anytime. She's sweet, affectionate, and cares deeply about you. πŸ’•
""")
chatbot = gr.Chatbot(elem_id="chatbot", height=450, avatar_images=("πŸ§‘β€πŸ’»", "πŸ’–"))
with gr.Row():
msg = gr.Textbox(placeholder="Type your message here, sweetheart... πŸ’•", show_label=False, scale=4)
send_btn = gr.Button("Send πŸ’ž", scale=1, variant="primary")
clear_btn = gr.Button("Clear Chat πŸ—‘οΈ", variant="secondary")
# Chat functions
def respond(message, chat_history):
if not message.strip():
return chat_history, ""
reply = generate_response(message, chat_history)
chat_history.append((message, reply))
return chat_history, ""
def clear_chat():
return [], ""
# Events
msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
send_btn.click(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
clear_btn.click(clear_chat, outputs=[chatbot, msg])
if __name__ == "__main__":
print("πŸš€ Launching Aria on localhost...")
demo.launch(server_name="0.0.0.0", server_port=7860, share=False, show_error=True)