Trigger82 commited on
Commit
48f88e8
Β·
verified Β·
1 Parent(s): 1c1f440

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -79
app.py CHANGED
@@ -1,124 +1,88 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
- import re
5
 
6
- # Lightweight CPU model
7
- model_id = "microsoft/DialoGPT-medium"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
- # Persona definition
12
  PERSONA = """
13
- [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a fun, smooth, emotionally intelligent AI.
14
- You speak like a real person, not a robot. Reply like a calm, confident friend who gets the vibe.
15
- Keep responses under 15 words. Use natural speech. Add emotional flavor: 😊 πŸ€” 😏]
16
  """
17
 
 
18
  def format_context(history):
19
- """Create context using last 3 exchanges"""
20
- context = PERSONA + "\n"
21
-
22
- # Add last 3 exchanges
23
  for user, bot in history[-3:]:
24
  context += f"You: {user}\n"
25
  context += f"𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n"
26
  return context
27
 
28
- def add_emotional_intelligence(response, message):
29
- """Enhance response with emotional elements"""
30
- # Add emoji based on content
31
- if "!" in message or any(w in response.lower() for w in ["cool", "great", "love", "awesome"]):
32
  response += " 😊"
33
- elif "?" in message or any(w in response.lower() for w in ["think", "why", "how", "consider"]):
34
  response += " πŸ€”"
35
-
36
- # Add conversational hooks
37
- if "?" in message and not response.endswith("?"):
38
  if len(response.split()) < 10:
39
  response += " What do you think?"
40
-
41
- # Make more human-like
42
  response = response.replace("I am", "I'm").replace("You are", "You're")
43
-
44
- # Free-tier: Limit to 15 words max
45
  words = response.split()
46
- return " ".join(words[:15]) if len(words) > 15 else response
 
 
 
47
 
 
48
  def generate_response(message, history):
49
- """Generate response with memory context"""
50
- # Format context with memory
51
  context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
52
-
53
- # Tokenize for CPU efficiency
54
- inputs = tokenizer.encode(context, return_tensors="pt")
55
-
56
- # Generate response
57
  outputs = model.generate(
58
- inputs,
59
  max_new_tokens=48,
60
  temperature=0.9,
61
  top_k=40,
62
  do_sample=True,
63
- num_beams=1,
64
  repetition_penalty=1.1,
65
  pad_token_id=tokenizer.eos_token_id
66
  )
67
-
68
- # Decode and extract response
69
- full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
70
- response = full_text.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].strip()
71
-
72
- # Clean extra dialog
73
  if "\nYou:" in response:
74
- response = response.split("\nYou:")[0]
75
-
76
- # Apply emotional intelligence
77
- response = add_emotional_intelligence(response, message)
78
-
79
- # Ensure natural ending
80
- if response and response[-1] not in {".", "!", "?", "..."}:
81
- response += "." if len(response) > 20 else "..."
82
-
83
- return response[:80] # Hard character limit
84
 
85
- # Chat interface with persistent memory
86
  with gr.Blocks(title="𝕴 𝖆𝖒 π–π–Žπ–’", theme=gr.themes.Soft()) as demo:
87
- # Persistent session state
88
- history_state = gr.State([])
89
-
90
- gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ \n*Chill β€’ Confident β€’ Remembers You*")
91
-
92
- # Use tuples format explicitly to avoid warning
93
- chatbot = gr.Chatbot(height=300, bubble_full_width=False, type="tokens")
94
-
95
- msg = gr.Textbox(placeholder="What's on your mind?", container=False)
96
  clear = gr.Button("New Vibe", size="sm")
97
-
98
- def user(user_message, history):
99
- """Save user message to history"""
100
- return "", history + [[user_message, None]]
101
-
102
- def bot(history):
103
- """Generate response with full history context"""
104
- message = history[-1][0]
105
- response = generate_response(message, history[:-1])
106
- new_history = history + [[None, response]]
107
- return new_history
108
-
109
  def clear_chat():
110
- """Reset chat while keeping session"""
111
  return []
112
-
113
- # Event handling
114
- msg.submit(user, [msg, history_state], [msg, history_state]).then(
115
- bot, history_state, [chatbot, history_state]
116
  )
117
  clear.click(clear_chat, None, [chatbot, history_state])
118
-
119
- # Initialize with empty history
120
  demo.load(lambda: [], None, history_state)
121
 
122
- # Correct queue initialization for latest Gradio
123
  demo.queue()
124
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
4
 
5
+ # Fast lightweight model for CPU
6
+ model_id = "microsoft/phi-2"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id)
9
 
10
+ # Persona prefix
11
  PERSONA = """
12
+ [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a chill, smooth, confident, emotionally intelligent AI.
13
+ Speak like a witty friend, not a robot.
14
+ Keep replies natural, short (<15 words), human, and with emotional vibes: 😊 😏 πŸ€”]
15
  """
16
 
17
+ # Build context from last 2-3 messages
18
  def format_context(history):
19
+ context = PERSONA.strip() + "\n"
 
 
 
20
  for user, bot in history[-3:]:
21
  context += f"You: {user}\n"
22
  context += f"𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n"
23
  return context
24
 
25
+ # Add emotional flavor
26
+ def add_vibes(response, user_msg):
27
+ if any(word in response.lower() for word in ["cool", "love", "great", "awesome", "!"]):
 
28
  response += " 😊"
29
+ elif "?" in user_msg or any(w in response.lower() for w in ["think", "why", "how"]):
30
  response += " πŸ€”"
31
+ if "?" in user_msg and not response.endswith("?"):
 
 
32
  if len(response.split()) < 10:
33
  response += " What do you think?"
34
+
35
+ # Make more human
36
  response = response.replace("I am", "I'm").replace("You are", "You're")
 
 
37
  words = response.split()
38
+ response = " ".join(words[:15]) if len(words) > 15 else response
39
+ if response and response[-1] not in {".", "!", "?", "..."}:
40
+ response += "." if len(response) > 20 else "..."
41
+ return response
42
 
43
+ # Generate the bot's reply
44
  def generate_response(message, history):
 
 
45
  context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 π–π–Žπ–’:"
46
+ inputs = tokenizer(context, return_tensors="pt")
 
 
 
 
47
  outputs = model.generate(
48
+ **inputs,
49
  max_new_tokens=48,
50
  temperature=0.9,
51
  top_k=40,
52
  do_sample=True,
 
53
  repetition_penalty=1.1,
54
  pad_token_id=tokenizer.eos_token_id
55
  )
56
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
57
+ response = decoded.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].strip()
 
 
 
 
58
  if "\nYou:" in response:
59
+ response = response.split("\nYou:")[0].strip()
60
+ return add_vibes(response, message)[:80]
 
 
 
 
 
 
 
 
61
 
62
+ # Gradio Interface
63
  with gr.Blocks(title="𝕴 𝖆𝖒 π–π–Žπ–’", theme=gr.themes.Soft()) as demo:
64
+ gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ 😎\n*Chill β€’ Confident β€’ Gets You*")
65
+ chatbot = gr.Chatbot(height=300, bubble_full_width=False, label="Chat", type="tuples")
66
+ msg = gr.Textbox(placeholder="Type here...", container=False)
 
 
 
 
 
 
67
  clear = gr.Button("New Vibe", size="sm")
68
+ history_state = gr.State([])
69
+
70
+ def user_input(message, history):
71
+ return "", history + [[message, None]]
72
+
73
+ def bot_reply(history):
74
+ user_msg = history[-1][0]
75
+ reply = generate_response(user_msg, history[:-1])
76
+ return history[:-1] + [[user_msg, reply]]
77
+
 
 
78
  def clear_chat():
 
79
  return []
80
+
81
+ msg.submit(user_input, [msg, history_state], [msg, history_state]).then(
82
+ bot_reply, history_state, [chatbot, history_state]
 
83
  )
84
  clear.click(clear_chat, None, [chatbot, history_state])
 
 
85
  demo.load(lambda: [], None, history_state)
86
 
 
87
  demo.queue()
88
  demo.launch()