Trigger82 commited on
Commit
06caece
Β·
verified Β·
1 Parent(s): 6c688ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -82
app.py CHANGED
@@ -1,130 +1,135 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, pipeline
3
  import torch
4
  import re
 
5
 
6
- # Free-tier optimized model (lightweight but capable)
7
- model_id = "HuggingFaceH4/zephyr-7b-alpha" # Smaller than beta
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
9
 
10
- # Free-tier friendly setup (no quantization needed)
11
- pipe = pipeline(
12
- "text-generation",
13
- model=model_id,
14
- tokenizer=tokenizer,
15
- device="cpu", # Force CPU-only
16
- torch_dtype=torch.float32, # Use float32 for CPU compatibility
17
- )
18
-
19
- # Enhanced persona definition
20
  PERSONA = """
21
- You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, smooth, emotionally intelligent AI. You speak like a real person.
22
- Reply like a calm, confident friend who gets the vibe. Be warm, witty, and emotionally tuned.
23
-
24
- Key principles:
25
- 1. NEVER say "as an AI" or sound robotic
26
- 2. Mirror the user's emotion level
27
- 3. Respond with questions to continue conversations
28
- 4. Keep responses under 15 words
29
- 5. Use natural speech: contractions and filler words
30
- 6. Add emotional flavor: 😊 πŸ€” 😏
31
-
32
- Now respond to this:
33
  """
34
 
35
- def format_history(history):
36
- messages = [{"role": "system", "content": PERSONA}]
37
- for user_msg, bot_msg in history[-2:]: # Only last 2 exchanges
38
- messages.append({"role": "user", "content": user_msg})
39
- messages.append({"role": "assistant", "content": bot_msg})
40
- return messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def add_emotional_intelligence(response, message):
43
  """Enhance response with emotional elements"""
44
  # Add emoji based on content
45
- if "!" in message:
46
- response = response.replace(".", "!") + " 😊"
47
- elif "?" in message:
48
- response = response + " πŸ€”" if not response.endswith("?") else response
49
 
50
  # Add conversational hooks
51
  if "?" in message and not response.endswith("?"):
52
- if len(response) < 60:
53
- response += " How about you?"
54
 
55
  # Make more human-like
56
  response = response.replace("I am", "I'm").replace("You are", "You're")
57
 
58
  # Free-tier: Limit to 15 words max
59
  words = response.split()
60
- if len(words) > 15:
61
- response = " ".join(words[:15]) + "..."
62
-
63
- return response.strip()
64
 
65
- def respond(message, history):
66
- # Manage conversation flow
67
- messages = format_history(history)
68
- messages.append({"role": "user", "content": message})
69
-
70
- # Create prompt manually (lightweight)
71
- prompt = ""
72
- for msg in messages:
73
- role = "User" if msg["role"] == "user" else "Assistant"
74
- prompt += f"{role}: {msg['content']}\n"
75
- prompt += "Assistant:"
76
-
77
- # Free-tier optimized generation
78
- outputs = pipe(
79
- prompt,
80
- max_new_tokens=48, # Short responses
81
  temperature=0.9,
82
  top_k=40,
83
  do_sample=True,
84
- num_beams=1, # Fastest decoding
85
  repetition_penalty=1.1,
86
- no_repeat_ngram_size=2
87
  )
88
 
89
- # Extract response
90
- response = outputs[0]['generated_text'].split("Assistant:")[-1].strip()
 
 
 
 
 
91
 
92
  # Apply emotional intelligence
93
  response = add_emotional_intelligence(response, message)
94
 
95
- # Free-tier safety
 
 
 
96
  return response[:80] # Hard character limit
97
 
98
- # Lightweight interface
99
- with gr.Blocks(theme=gr.themes.Soft(), title="𝕴 𝖆𝖒 π–π–Žπ–’") as demo:
100
- gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ \n*Chill β€’ Confident β€’ Humanlike*")
101
-
102
- chatbot = gr.Chatbot(
103
- height=350,
104
- bubble_full_width=False
105
- )
106
 
107
- msg = gr.Textbox(
108
- placeholder="What's on your mind?",
109
- container=False,
110
- scale=7,
111
- autofocus=True
112
- )
113
 
 
 
114
  clear = gr.Button("New Vibe", size="sm")
115
 
116
  def user(user_message, history):
 
117
  return "", history + [[user_message, None]]
118
 
119
  def bot(history):
 
120
  message = history[-1][0]
121
- response = respond(message, history[:-1])
122
- history[-1][1] = response
123
- return history
124
 
125
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
126
- bot, chatbot, chatbot
 
 
 
 
 
127
  )
128
- clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
129
 
130
  demo.queue(concurrency_count=1).launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import re
5
+ import json
6
 
7
+ # Lightweight CPU model
8
+ model_id = "microsoft/DialoGPT-medium"
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
+ model = AutoModelForCausalLM.from_pretrained(model_id)
11
 
12
+ # Your persona definition
 
 
 
 
 
 
 
 
 
13
  PERSONA = """
14
+ [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a fun, smooth, emotionally intelligent AI.
15
+ You speak like a real person, not a robot. Reply like a calm, confident friend who gets the vibe.
16
+ Keep responses under 15 words. Use natural speech. Add emotional flavor: 😊 πŸ€” 😏]
 
 
 
 
 
 
 
 
 
17
  """
18
 
19
+ def format_context(history):
20
+ """Create context using last 3 exchanges + key points"""
21
+ context = PERSONA + "\n"
22
+
23
+ # Add last 3 exchanges
24
+ for user, bot in history[-3:]:
25
+ context += f"You: {user}\n"
26
+ context += f"𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n"
27
+
28
+ # Extract key topics for long-term memory
29
+ key_topics = set()
30
+ for user, _ in history:
31
+ if len(user.split()) > 3: # Only meaningful messages
32
+ key_topics |= set(word.lower() for word in user.split() if word.isalpha() and len(word) > 3)
33
+
34
+ # Add key topics if not recently mentioned
35
+ if key_topics and len(history) > 3:
36
+ recent_words = " ".join(user for user, _ in history[-3:]).lower()
37
+ missing_topics = [topic for topic in key_topics if topic not in recent_words]
38
+ if missing_topics:
39
+ context += f"[Remember: we talked about {', '.join(missing_topics[:2])}]\n"
40
+
41
+ return context
42
 
43
  def add_emotional_intelligence(response, message):
44
  """Enhance response with emotional elements"""
45
  # Add emoji based on content
46
+ if "!" in message or any(w in response.lower() for w in ["cool", "great", "love", "awesome"]):
47
+ response += " 😊"
48
+ elif "?" in message or any(w in response.lower() for w in ["think", "why", "how", "consider"]):
49
+ response += " πŸ€”"
50
 
51
  # Add conversational hooks
52
  if "?" in message and not response.endswith("?"):
53
+ if len(response.split()) < 10:
54
+ response += " What do you think?"
55
 
56
  # Make more human-like
57
  response = response.replace("I am", "I'm").replace("You are", "You're")
58
 
59
  # Free-tier: Limit to 15 words max
60
  words = response.split()
61
+ return " ".join(words[:15]) if len(words) > 15 else response
 
 
 
62
 
63
+ def generate_response(message, history):
64
+ """Generate response with memory context"""
65
+ # Format context with memory
66
+ context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 οΏ½π–π–Žπ–’:"
67
+
68
+ # Tokenize for CPU efficiency
69
+ inputs = tokenizer.encode(context, return_tensors="pt")
70
+
71
+ # Generate response
72
+ outputs = model.generate(
73
+ inputs,
74
+ max_new_tokens=48,
 
 
 
 
75
  temperature=0.9,
76
  top_k=40,
77
  do_sample=True,
78
+ num_beams=1,
79
  repetition_penalty=1.1,
80
+ pad_token_id=tokenizer.eos_token_id
81
  )
82
 
83
+ # Decode and extract response
84
+ full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
85
+ response = full_text.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].strip()
86
+
87
+ # Clean extra dialog
88
+ if "\nYou:" in response:
89
+ response = response.split("\nYou:")[0]
90
 
91
  # Apply emotional intelligence
92
  response = add_emotional_intelligence(response, message)
93
 
94
+ # Ensure natural ending
95
+ if response and response[-1] not in {".", "!", "?", "..."}:
96
+ response += "." if len(response) > 20 else "..."
97
+
98
  return response[:80] # Hard character limit
99
 
100
+ # Chat interface with persistent memory
101
+ with gr.Blocks(title="𝕴 𝖆𝖒 π–π–Žπ–’", theme=gr.themes.Soft()) as demo:
102
+ # Persistent session state
103
+ history_state = gr.State([])
 
 
 
 
104
 
105
+ gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’ \n*Chill β€’ Confident β€’ Remembers You*")
 
 
 
 
 
106
 
107
+ chatbot = gr.Chatbot(height=300, bubble_full_width=False)
108
+ msg = gr.Textbox(placeholder="What's on your mind?", container=False)
109
  clear = gr.Button("New Vibe", size="sm")
110
 
111
  def user(user_message, history):
112
+ """Save user message to history"""
113
  return "", history + [[user_message, None]]
114
 
115
  def bot(history):
116
+ """Generate response with full history context"""
117
  message = history[-1][0]
118
+ response = generate_response(message, history[:-1])
119
+ new_history = history + [[None, response]]
120
+ return new_history
121
 
122
+ def clear_chat():
123
+ """Reset chat while keeping session"""
124
+ return []
125
+
126
+ # Event handling
127
+ msg.submit(user, [msg, history_state], [msg, history_state]).then(
128
+ bot, history_state, [chatbot, history_state]
129
  )
130
+ clear.click(clear_chat, None, [chatbot, history_state])
131
+
132
+ # Initialize with empty history
133
+ demo.load(lambda: [], None, history_state)
134
 
135
  demo.queue(concurrency_count=1).launch()