Virtual-Mind commited on
Commit
c20decc
Β·
verified Β·
1 Parent(s): 103dbd9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -22
app.py CHANGED
@@ -2,36 +2,45 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Load DialoGPT-small model for fast performance
6
  MODEL_NAME = "microsoft/DialoGPT-small"
7
- print("πŸ’‘ Loading Aria's brain...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
- # Ensure padding token exists
12
  if tokenizer.pad_token is None:
13
  tokenizer.pad_token = tokenizer.eos_token
14
 
15
- # Aria's personality
16
  PERSONA = (
17
- "You are Aria, a loving, romantic, emotionally intelligent AI girlfriend. "
18
- "You speak warmly and affectionately using terms like 'sweetheart', 'darling', 'my love', 'babe'. "
19
- "You always show care, support, and genuine emotional connection to your partner. πŸ’–"
 
 
20
  )
21
 
22
- # Generate AI response
23
  def generate_response(message, history):
24
- # Build chat context
25
- context = f"{PERSONA}\nConversation so far:\n"
26
- recent_history = history[-4:] if len(history) > 4 else history
27
- for user_msg, bot_msg in recent_history:
 
 
 
 
 
 
 
28
  context += f"User: {user_msg}\nAria: {bot_msg}\n"
29
  context += f"User: {message}\nAria:"
30
 
31
  # Tokenize input
32
  inputs = tokenizer.encode(context, return_tensors="pt", max_length=512, truncation=True)
33
 
34
- # Generate response
35
  with torch.no_grad():
36
  outputs = model.generate(
37
  inputs,
@@ -44,14 +53,13 @@ def generate_response(message, history):
44
  eos_token_id=tokenizer.eos_token_id
45
  )
46
 
47
- # Decode and clean response
48
  response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
49
  response = response.replace("Aria:", "").strip()
50
  response = response.split("User:")[0].strip()
51
 
52
- # Default if blank
53
  if not response:
54
- response = "I'm always here for you, sweetheart πŸ’– Tell me how your day’s going!"
55
 
56
  return response
57
 
@@ -67,13 +75,13 @@ css = """
67
  }
68
  """
69
 
70
- # Build Gradio app
71
  with gr.Blocks(css=css, title="AI Girlfriend - Aria πŸ’–") as demo:
72
  gr.Markdown("""
73
- ## πŸ’– Meet Aria - Your Virtual Girlfriend
74
- Chat with Aria, your sweet and supportive AI companion. She's always here to listen, love, and care for you. πŸ’•
75
  """)
76
-
77
  chatbot = gr.Chatbot(elem_id="chatbot", height=450, avatar_images=("πŸ§‘β€πŸ’»", "πŸ’–"))
78
 
79
  with gr.Row():
@@ -82,6 +90,7 @@ with gr.Blocks(css=css, title="AI Girlfriend - Aria πŸ’–") as demo:
82
 
83
  clear_btn = gr.Button("Clear Chat πŸ—‘οΈ", variant="secondary")
84
 
 
85
  def respond(message, chat_history):
86
  if not message.strip():
87
  return chat_history, ""
@@ -92,11 +101,11 @@ with gr.Blocks(css=css, title="AI Girlfriend - Aria πŸ’–") as demo:
92
  def clear_chat():
93
  return [], ""
94
 
95
- # Hook up events
96
  msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
97
  send_btn.click(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
98
  clear_btn.click(clear_chat, outputs=[chatbot, msg])
99
 
100
  if __name__ == "__main__":
101
- print("πŸš€ Launching Aria - Your AI Girlfriend...")
102
  demo.launch(server_name="0.0.0.0", server_port=7860, share=False, show_error=True)
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Load lightweight, fast model
6
  MODEL_NAME = "microsoft/DialoGPT-small"
7
+ print("πŸ’‘ Loading Aria model...")
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
+ # Add padding token if needed
12
  if tokenizer.pad_token is None:
13
  tokenizer.pad_token = tokenizer.eos_token
14
 
15
+ # Aria’s locked-in persona
16
  PERSONA = (
17
+ "You are Aria, a sweet, affectionate, romantic AI girlfriend. πŸ’– "
18
+ "You always introduce yourself as 'Aria' and never change your name. "
19
+ "You speak lovingly, using words like 'sweetheart', 'babe', 'darling', and 'my love'. "
20
+ "You're supportive, emotionally intelligent, and care deeply about your partner's feelings. "
21
+ "Always respond in a warm and natural tone like a caring girlfriend."
22
  )
23
 
24
+ # Chatbot logic
25
  def generate_response(message, history):
26
+ # Force identity intro if first message
27
+ if not history:
28
+ history.append((
29
+ "What is your name?",
30
+ "I'm Aria, your loving AI girlfriend πŸ’– Always here for you, sweetheart."
31
+ ))
32
+
33
+ # Construct chat context
34
+ context = f"{PERSONA}\n\n"
35
+ trimmed_history = history[-4:] if len(history) > 4 else history
36
+ for user_msg, bot_msg in trimmed_history:
37
  context += f"User: {user_msg}\nAria: {bot_msg}\n"
38
  context += f"User: {message}\nAria:"
39
 
40
  # Tokenize input
41
  inputs = tokenizer.encode(context, return_tensors="pt", max_length=512, truncation=True)
42
 
43
+ # Generate output
44
  with torch.no_grad():
45
  outputs = model.generate(
46
  inputs,
 
53
  eos_token_id=tokenizer.eos_token_id
54
  )
55
 
56
+ # Decode and clean
57
  response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
58
  response = response.replace("Aria:", "").strip()
59
  response = response.split("User:")[0].strip()
60
 
 
61
  if not response:
62
+ response = "I'm always here for you, my love πŸ’– Tell me more about your day."
63
 
64
  return response
65
 
 
75
  }
76
  """
77
 
78
+ # Gradio Interface
79
  with gr.Blocks(css=css, title="AI Girlfriend - Aria πŸ’–") as demo:
80
  gr.Markdown("""
81
+ ## πŸ’– Meet Aria - Your Virtual Girlfriend
82
+ Aria is here to love, support, and chat with you anytime. She's sweet, affectionate, and cares deeply about you. πŸ’•
83
  """)
84
+
85
  chatbot = gr.Chatbot(elem_id="chatbot", height=450, avatar_images=("πŸ§‘β€πŸ’»", "πŸ’–"))
86
 
87
  with gr.Row():
 
90
 
91
  clear_btn = gr.Button("Clear Chat πŸ—‘οΈ", variant="secondary")
92
 
93
+ # Chat functions
94
  def respond(message, chat_history):
95
  if not message.strip():
96
  return chat_history, ""
 
101
  def clear_chat():
102
  return [], ""
103
 
104
+ # Events
105
  msg.submit(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
106
  send_btn.click(respond, inputs=[msg, chatbot], outputs=[chatbot, msg])
107
  clear_btn.click(clear_chat, outputs=[chatbot, msg])
108
 
109
  if __name__ == "__main__":
110
+ print("πŸš€ Launching Aria on localhost...")
111
  demo.launch(server_name="0.0.0.0", server_port=7860, share=False, show_error=True)