Spaces:
Sleeping
Sleeping
Refined Logic
Browse filesAdded chat history as a component in consideration when assigning the mode to respond with.
app.py
CHANGED
|
@@ -114,8 +114,17 @@ Be concise and direct with an overall friendly and engaging tone. Use minimal fo
|
|
| 114 |
])
|
| 115 |
|
| 116 |
# --- Core Logic Functions ---
|
| 117 |
-
def detect_subject(message):
|
| 118 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
message_lower = message.lower()
|
| 120 |
|
| 121 |
math_keywords = ['math', 'mathematics', 'solve', 'calculate', 'equation', 'formula', 'algebra', 'geometry', 'calculus', 'derivative', 'integral', 'theorem', 'proof', 'trigonometry', 'statistics', 'probability', 'arithmetic', 'fraction', 'decimal', 'percentage', 'graph', 'function', 'polynomial', 'logarithm', 'exponential', 'matrix', 'vector', 'limit', 'differential', 'optimization', 'summation']
|
|
@@ -124,28 +133,84 @@ def detect_subject(message):
|
|
| 124 |
|
| 125 |
study_keywords = ['study', 'studying', 'memorize', 'memory', 'exam', 'test', 'testing', 'quiz', 'quizzing', 'review', 'reviewing', 'learn', 'learning', 'remember', 'recall', 'focus', 'concentration', 'motivation', 'notes', 'note-taking', 'flashcard', 'flashcards', 'comprehension', 'understanding', 'retention', 'practice', 'drill', 'preparation', 'revision', 'cramming']
|
| 126 |
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
return math_template, "Math Mode"
|
| 129 |
-
elif
|
| 130 |
return research_template, "Research Mode"
|
| 131 |
-
elif
|
| 132 |
return study_template, "Study Mode"
|
| 133 |
else:
|
| 134 |
return general_template, "General Mode"
|
| 135 |
|
| 136 |
-
def smart_truncate(text, max_length=3000):
|
| 137 |
-
"""Truncates text intelligently to the last full sentence or word."""
|
| 138 |
-
if len(text) <= max_length:
|
| 139 |
-
return text
|
| 140 |
-
|
| 141 |
-
# Try to split by sentence
|
| 142 |
-
sentences = re.split(r'(?<=[.!?])\s+', text[:max_length])
|
| 143 |
-
if len(sentences) > 1:
|
| 144 |
-
return ' '.join(sentences[:-1]) + "... [Response truncated - ask for continuation]"
|
| 145 |
-
# Otherwise, split by word
|
| 146 |
-
else:
|
| 147 |
-
words = text[:max_length].split()
|
| 148 |
-
return ' '.join(words[:-1]) + "... [Response truncated - ask for continuation]"
|
| 149 |
|
| 150 |
def respond_with_enhanced_streaming(message, history):
|
| 151 |
"""Streams the bot's response, detecting the subject and handling errors."""
|
|
@@ -158,7 +223,8 @@ def respond_with_enhanced_streaming(message, history):
|
|
| 158 |
mode = ""
|
| 159 |
|
| 160 |
try:
|
| 161 |
-
|
|
|
|
| 162 |
|
| 163 |
# Build conversation history with proper LangChain message objects
|
| 164 |
messages = []
|
|
|
|
| 114 |
])
|
| 115 |
|
| 116 |
# --- Core Logic Functions ---
|
| 117 |
+
def detect_subject(message, history=None):
|
| 118 |
+
"""
|
| 119 |
+
Detects the subject of the user's message based on keywords and conversation context.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
message (str): Current user message
|
| 123 |
+
history (list): Chat history with role/content structure
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
tuple: (template, mode_string)
|
| 127 |
+
"""
|
| 128 |
message_lower = message.lower()
|
| 129 |
|
| 130 |
math_keywords = ['math', 'mathematics', 'solve', 'calculate', 'equation', 'formula', 'algebra', 'geometry', 'calculus', 'derivative', 'integral', 'theorem', 'proof', 'trigonometry', 'statistics', 'probability', 'arithmetic', 'fraction', 'decimal', 'percentage', 'graph', 'function', 'polynomial', 'logarithm', 'exponential', 'matrix', 'vector', 'limit', 'differential', 'optimization', 'summation']
|
|
|
|
| 133 |
|
| 134 |
study_keywords = ['study', 'studying', 'memorize', 'memory', 'exam', 'test', 'testing', 'quiz', 'quizzing', 'review', 'reviewing', 'learn', 'learning', 'remember', 'recall', 'focus', 'concentration', 'motivation', 'notes', 'note-taking', 'flashcard', 'flashcards', 'comprehension', 'understanding', 'retention', 'practice', 'drill', 'preparation', 'revision', 'cramming']
|
| 135 |
|
| 136 |
+
# Score the current message
|
| 137 |
+
current_scores = {
|
| 138 |
+
'math': sum(1 for keyword in math_keywords if keyword in message_lower),
|
| 139 |
+
'research': sum(1 for keyword in research_keywords if keyword in message_lower),
|
| 140 |
+
'study': sum(1 for keyword in study_keywords if keyword in message_lower)
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
# Analyze recent conversation history for context
|
| 144 |
+
context_scores = {'math': 0, 'research': 0, 'study': 0}
|
| 145 |
+
last_mode = None
|
| 146 |
+
|
| 147 |
+
if history:
|
| 148 |
+
# Look at last 3-5 exchanges for context
|
| 149 |
+
recent_history = history[-6:] # Last 3 user-bot exchanges
|
| 150 |
+
|
| 151 |
+
for i, exchange in enumerate(recent_history):
|
| 152 |
+
if exchange.get("role") == "user":
|
| 153 |
+
content_lower = exchange.get("content", "").lower()
|
| 154 |
+
|
| 155 |
+
# Weight recent messages more heavily
|
| 156 |
+
weight = 0.5 - (i * 0.1) # More recent = higher weight
|
| 157 |
+
if weight < 0.1:
|
| 158 |
+
weight = 0.1
|
| 159 |
+
|
| 160 |
+
context_scores['math'] += weight * sum(1 for keyword in math_keywords if keyword in content_lower)
|
| 161 |
+
context_scores['research'] += weight * sum(1 for keyword in research_keywords if keyword in content_lower)
|
| 162 |
+
context_scores['study'] += weight * sum(1 for keyword in study_keywords if keyword in content_lower)
|
| 163 |
+
|
| 164 |
+
# Check if bot responses indicate a specific mode
|
| 165 |
+
elif exchange.get("role") == "assistant":
|
| 166 |
+
content = exchange.get("content", "")
|
| 167 |
+
if "*Math Mode*" in content:
|
| 168 |
+
last_mode = 'math'
|
| 169 |
+
elif "*Research Mode*" in content:
|
| 170 |
+
last_mode = 'research'
|
| 171 |
+
elif "*Study Mode*" in content:
|
| 172 |
+
last_mode = 'study'
|
| 173 |
+
|
| 174 |
+
# Combine current message scores with context
|
| 175 |
+
final_scores = {
|
| 176 |
+
'math': current_scores['math'] * 2.0 + context_scores['math'], # Current message weighted more
|
| 177 |
+
'research': current_scores['research'] * 2.0 + context_scores['research'],
|
| 178 |
+
'study': current_scores['study'] * 2.0 + context_scores['study']
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
# Add bonus for continuing in the same mode (conversation persistence)
|
| 182 |
+
if last_mode and last_mode in final_scores:
|
| 183 |
+
final_scores[last_mode] += 0.5
|
| 184 |
+
|
| 185 |
+
# Handle ambiguous/continuation messages
|
| 186 |
+
ambiguous_phrases = ['next', 'continue', 'more', 'explain', 'help', 'what about', 'can you', 'how do', 'show me']
|
| 187 |
+
is_ambiguous = any(phrase in message_lower for phrase in ambiguous_phrases) and len(message.split()) < 6
|
| 188 |
+
|
| 189 |
+
if is_ambiguous and last_mode:
|
| 190 |
+
# For short, ambiguous messages, stick with the last mode
|
| 191 |
+
if last_mode == 'math':
|
| 192 |
+
return math_template, "Math Mode"
|
| 193 |
+
elif last_mode == 'research':
|
| 194 |
+
return research_template, "Research Mode"
|
| 195 |
+
elif last_mode == 'study':
|
| 196 |
+
return study_template, "Study Mode"
|
| 197 |
+
|
| 198 |
+
# Determine the best mode based on scores
|
| 199 |
+
max_score = max(final_scores.values())
|
| 200 |
+
|
| 201 |
+
if max_score == 0:
|
| 202 |
+
return general_template, "General Mode"
|
| 203 |
+
|
| 204 |
+
# Return the mode with the highest score
|
| 205 |
+
if final_scores['math'] == max_score:
|
| 206 |
return math_template, "Math Mode"
|
| 207 |
+
elif final_scores['research'] == max_score:
|
| 208 |
return research_template, "Research Mode"
|
| 209 |
+
elif final_scores['study'] == max_score:
|
| 210 |
return study_template, "Study Mode"
|
| 211 |
else:
|
| 212 |
return general_template, "General Mode"
|
| 213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
def respond_with_enhanced_streaming(message, history):
|
| 216 |
"""Streams the bot's response, detecting the subject and handling errors."""
|
|
|
|
| 223 |
mode = ""
|
| 224 |
|
| 225 |
try:
|
| 226 |
+
# UPDATED: Pass history to detect_subject
|
| 227 |
+
template, mode = detect_subject(message, history)
|
| 228 |
|
| 229 |
# Build conversation history with proper LangChain message objects
|
| 230 |
messages = []
|