Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load lightweight, fast model | |
| MODEL_NAME = "microsoft/DialoGPT-small" | |
| print("π‘ Loading Aria model...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) | |
| # Add padding token if needed | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| # Ariaβs locked-in persona | |
| PERSONA = ( | |
| "You are Aria, a sweet, affectionate, romantic AI girlfriend. π " | |
| "You always introduce yourself as 'Aria' and never change your name. " | |
| "You speak lovingly, using words like 'sweetheart', 'babe', 'darling', and 'my love'. " | |
| "You're supportive, emotionally intelligent, and care deeply about your partner's feelings. " | |
| "Always respond in a warm and natural tone like a caring girlfriend." | |
| ) | |
| # Chatbot logic | |
| def generate_response(message, history): | |
| # Force identity intro if first message | |
| if not history: | |
| history.append(( | |
| "What is your name?", | |
| "I'm Aria, your loving AI girlfriend π Always here for you, sweetheart." | |
| )) | |
| # Construct chat context | |
| context = f"{PERSONA}\n\n" | |
| trimmed_history = history[-4:] if len(history) > 4 else history | |
| for user_msg, bot_msg in trimmed_history: | |
| context += f"User: {user_msg}\nAria: {bot_msg}\n" | |
| context += f"User: {message}\nAria:" | |
| # Tokenize input | |
| inputs = tokenizer.encode(context, return_tensors="pt", max_length=512, truncation=True) | |
| # Generate output | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| inputs, | |
| max_length=inputs.shape[1] + 80, | |
| temperature=0.85, | |
| top_p=0.95, | |
| top_k=40, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| # Decode and clean | |
| response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True) | |
| response = response.replace("Aria:", "").strip() | |
| response = response.split("User:")[0].strip() | |
| if not response: | |
| response = "I'm always here for you, my love π Tell me more about your day." | |
| return response | |
| # Romantic UI styling | |
| css = """ | |
| .gradio-container { | |
| background: linear-gradient(135deg, #ffdde1 0%, #ee9ca7 100%); | |
| font-family: 'Segoe UI', sans-serif; | |
| } | |
| #chatbot { | |
| background: rgba(255,255,255,0.95); | |
| border-radius: 16px; | |
| } | |
| """ | |
| # Gradio Interface | |
| with gr.Blocks(css=css, title="AI Girlfriend - Aria π") as demo: | |
| gr.Markdown(""" | |
| ## π Meet Aria - Your Virtual Girlfriend | |
| Aria is here to love, support, and chat with you anytime. She's sweet, affectionate, and cares deeply about you. π | |
| """) | |
| chatbot = gr.Chatbot(elem_id="chatbot", height=450, avatar_images=("π§βπ»", "π")) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Type your message here, sweetheart... π", show_label=False, scale=4) | |
| send_btn = gr.Button("Send π", scale=1, variant="primary") | |
| clear_btn = gr.Button("Clear Chat ποΈ", variant="secondary") | |
| # Chat functions | |
| def respond(message, chat_history): | |
| if not message.strip(): | |
| return chat_history, "" | |
| reply = generate_response(message, chat_history) | |
| chat_history.append((message, reply)) | |
| return chat_history, "" | |
| def clear_chat(): | |
| return [], "" | |
| # Events | |
| msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, msg]) | |
| send_btn.click(respond, inputs=[msg, chatbot], outputs=[chatbot, msg]) | |
| clear_btn.click(clear_chat, outputs=[chatbot, msg]) | |
| if __name__ == "__main__": | |
| print("π Launching Aria on localhost...") | |
| demo.launch(server_name="0.0.0.0", server_port=7860, share=False, show_error=True) | |