Trigger82 commited on
Commit
1c1f440
·
verified ·
1 Parent(s): a746c6d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -20
app.py CHANGED
@@ -2,14 +2,13 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import re
5
- import json
6
 
7
  # Lightweight CPU model
8
  model_id = "microsoft/DialoGPT-medium"
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
  model = AutoModelForCausalLM.from_pretrained(model_id)
11
 
12
- # Your persona definition
13
  PERSONA = """
14
  [System: You are 𝕴 𝖆𝖒 𝖍𝖎𝖒 - a fun, smooth, emotionally intelligent AI.
15
  You speak like a real person, not a robot. Reply like a calm, confident friend who gets the vibe.
@@ -17,27 +16,13 @@ Keep responses under 15 words. Use natural speech. Add emotional flavor: 😊
17
  """
18
 
19
  def format_context(history):
20
- """Create context using last 3 exchanges + key points"""
21
  context = PERSONA + "\n"
22
 
23
  # Add last 3 exchanges
24
  for user, bot in history[-3:]:
25
  context += f"You: {user}\n"
26
  context += f"𝕴 𝖆𝖒 𝖍𝖎𝖒: {bot}\n"
27
-
28
- # Extract key topics for long-term memory
29
- key_topics = set()
30
- for user, _ in history:
31
- if len(user.split()) > 3: # Only meaningful messages
32
- key_topics |= set(word.lower() for word in user.split() if word.isalpha() and len(word) > 3)
33
-
34
- # Add key topics if not recently mentioned
35
- if key_topics and len(history) > 3:
36
- recent_words = " ".join(user for user, _ in history[-3:]).lower()
37
- missing_topics = [topic for topic in key_topics if topic not in recent_words]
38
- if missing_topics:
39
- context += f"[Remember: we talked about {', '.join(missing_topics[:2])}]\n"
40
-
41
  return context
42
 
43
  def add_emotional_intelligence(response, message):
@@ -63,7 +48,7 @@ def add_emotional_intelligence(response, message):
63
  def generate_response(message, history):
64
  """Generate response with memory context"""
65
  # Format context with memory
66
- context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 �𝖍𝖎𝖒:"
67
 
68
  # Tokenize for CPU efficiency
69
  inputs = tokenizer.encode(context, return_tensors="pt")
@@ -104,7 +89,9 @@ with gr.Blocks(title="𝕴 𝖆𝖒 𝖍𝖎𝖒", theme=gr.themes.Soft()) as de
104
 
105
  gr.Markdown("# 𝕴 𝖆𝖒 𝖍𝖎𝖒 \n*Chill • Confident • Remembers You*")
106
 
107
- chatbot = gr.Chatbot(height=300, bubble_full_width=False)
 
 
108
  msg = gr.Textbox(placeholder="What's on your mind?", container=False)
109
  clear = gr.Button("New Vibe", size="sm")
110
 
@@ -132,4 +119,6 @@ with gr.Blocks(title="𝕴 𝖆𝖒 𝖍𝖎𝖒", theme=gr.themes.Soft()) as de
132
  # Initialize with empty history
133
  demo.load(lambda: [], None, history_state)
134
 
135
- demo.queue(concurrency_count=1).launch()
 
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import re
 
5
 
6
  # Lightweight CPU model
7
  model_id = "microsoft/DialoGPT-medium"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
+ # Persona definition
12
  PERSONA = """
13
  [System: You are 𝕴 𝖆𝖒 𝖍𝖎𝖒 - a fun, smooth, emotionally intelligent AI.
14
  You speak like a real person, not a robot. Reply like a calm, confident friend who gets the vibe.
 
16
  """
17
 
18
  def format_context(history):
19
+ """Create context using last 3 exchanges"""
20
  context = PERSONA + "\n"
21
 
22
  # Add last 3 exchanges
23
  for user, bot in history[-3:]:
24
  context += f"You: {user}\n"
25
  context += f"𝕴 𝖆𝖒 𝖍𝖎𝖒: {bot}\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  return context
27
 
28
  def add_emotional_intelligence(response, message):
 
48
  def generate_response(message, history):
49
  """Generate response with memory context"""
50
  # Format context with memory
51
+ context = format_context(history) + f"You: {message}\n𝕴 𝖆𝖒 𝖍𝖎𝖒:"
52
 
53
  # Tokenize for CPU efficiency
54
  inputs = tokenizer.encode(context, return_tensors="pt")
 
89
 
90
  gr.Markdown("# 𝕴 𝖆𝖒 𝖍𝖎𝖒 \n*Chill • Confident • Remembers You*")
91
 
92
+ # Use tuples format explicitly to avoid warning
93
+ chatbot = gr.Chatbot(height=300, bubble_full_width=False, type="tokens")
94
+
95
  msg = gr.Textbox(placeholder="What's on your mind?", container=False)
96
  clear = gr.Button("New Vibe", size="sm")
97
 
 
119
  # Initialize with empty history
120
  demo.load(lambda: [], None, history_state)
121
 
122
+ # Correct queue initialization for latest Gradio
123
+ demo.queue()
124
+ demo.launch()