uumerrr684 commited on
Commit
c58c8df
·
verified ·
1 Parent(s): e351eb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -159
app.py CHANGED
@@ -18,6 +18,25 @@ st.set_page_config(
18
  initial_sidebar_state="expanded"
19
  )
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Enhanced CSS styling
22
  st.markdown("""
23
  <style>
@@ -144,165 +163,6 @@ HISTORY_FILE = "rag_chat_history.json"
144
  SESSIONS_FILE = "rag_chat_sessions.json"
145
  USERS_FILE = "online_users.json"
146
 
147
- # ================= PERSONALITY QUESTIONS =================
148
-
149
- # Replace the personality questions section (around line 760-780) with this fixed version:
150
-
151
- # Personality Questions Section
152
- st.header("🎭 Personality Questions")
153
-
154
- # Name input for personalizing questions
155
- name_input = st.text_input("Enter name for personalized questions:", placeholder="e.g., Sarah, Ahmed", help="Replace [name] in questions with this name")
156
-
157
- if name_input.strip():
158
- name = name_input.strip()
159
- st.markdown(f"""
160
- <div class="personality-section">
161
- <strong>💫 Quick Questions for {name}:</strong><br>
162
- <small>Click any question to ask about {name}</small>
163
- </div>
164
- """, unsafe_allow_html=True)
165
-
166
- # Display personality questions as clickable buttons
167
- for i, question in enumerate(PERSONALITY_QUESTIONS):
168
- formatted_question = question.replace("[name]", name)
169
- if st.button(formatted_question, key=f"pq_{i}", use_container_width=True):
170
- # Add the question to chat and set flag to process it
171
- user_message = {"role": "user", "content": formatted_question}
172
- st.session_state.messages.append(user_message)
173
- st.session_state.process_personality_question = formatted_question
174
- st.rerun()
175
- else:
176
- st.markdown("""
177
- <div class="personality-section">
178
- <strong>💫 Sample Questions:</strong><br>
179
- <small>Enter a name above to personalize these questions</small>
180
- </div>
181
- """, unsafe_allow_html=True)
182
-
183
- # Show sample questions without names
184
- for question in PERSONALITY_QUESTIONS[:5]: # Show first 5 as examples
185
- st.markdown(f"• {question}")
186
-
187
- # Then, modify the main chat processing section to handle personality questions
188
- # Add this right after the chat input section and before the existing chat processing:
189
-
190
- # Check if we need to process a personality question
191
- if hasattr(st.session_state, 'process_personality_question'):
192
- prompt = st.session_state.process_personality_question
193
- del st.session_state.process_personality_question # Clear the flag
194
-
195
- # Display user message
196
- with st.chat_message("user"):
197
- st.markdown(prompt)
198
-
199
- # Process the question using the same logic as chat input
200
- # Update user tracking
201
- update_online_users()
202
-
203
- # Get RAG response
204
- with st.chat_message("assistant"):
205
- if rag_system and rag_system.model and rag_system.get_collection_count() > 0:
206
- # Search documents first
207
- search_results = rag_system.search(prompt, n_results=5)
208
-
209
- # Debug output for troubleshooting
210
- if search_results:
211
- st.info(f"🔍 Found {len(search_results)} potential matches. Best similarity: {search_results[0]['similarity']:.3f}")
212
- else:
213
- st.warning("🔍 No search results returned from vector database")
214
-
215
- # Check if we found relevant documents (very low threshold)
216
- if search_results and search_results[0]['similarity'] > 0.001: # Ultra-low threshold
217
- # Generate document-based answer
218
- result = rag_system.generate_answer(
219
- prompt,
220
- search_results,
221
- use_ai_enhancement=use_ai_enhancement,
222
- unlimited_tokens=unlimited_tokens
223
- )
224
-
225
- # Display AI answer or extracted answer
226
- if use_ai_enhancement and result['has_both']:
227
- answer_text = result['ai_answer']
228
- st.markdown(f"🤖 **AI Enhanced Answer:** {answer_text}")
229
-
230
- # Also show extracted answer for comparison if different
231
- if result['extracted_answer'] != answer_text:
232
- with st.expander("📄 View Extracted Answer"):
233
- st.markdown(result['extracted_answer'])
234
- else:
235
- answer_text = result['extracted_answer']
236
- st.markdown(f"📄 **Document Answer:** {answer_text}")
237
-
238
- # Show why AI enhancement wasn't used
239
- if use_ai_enhancement and not result['has_both']:
240
- st.info("💡 AI enhancement failed - showing extracted answer from documents")
241
-
242
- # Show RAG info with more details
243
- if show_sources and result['sources']:
244
- confidence_text = f"{result['confidence']*100:.1f}%" if show_confidence else ""
245
- st.markdown(f"""
246
- <div class="rag-attribution">
247
- <strong>📁 Sources:</strong> {', '.join(result['sources'])}<br>
248
- <strong>🎯 Confidence:</strong> {confidence_text}<br>
249
- <strong>📊 Found:</strong> {len(search_results)} relevant sections<br>
250
- <strong>🔍 Best Match:</strong> {search_results[0]['similarity']:.3f} similarity
251
- </div>
252
- """, unsafe_allow_html=True)
253
-
254
- # Add to messages with RAG info
255
- assistant_message = {
256
- "role": "assistant",
257
- "content": answer_text,
258
- "rag_info": {
259
- "sources": result['sources'],
260
- "confidence": result['confidence'],
261
- "extracted_answer": result['extracted_answer'],
262
- "has_ai": result['has_both']
263
- }
264
- }
265
-
266
- else:
267
- # No relevant documents found - show debug info
268
- if search_results:
269
- st.warning(f"📄 Found documents but similarity too low (best: {search_results[0]['similarity']:.3f}). Using general AI...")
270
- else:
271
- st.warning("📄 No documents found in search. Using general AI...")
272
-
273
- general_response = get_general_ai_response(prompt, unlimited_tokens=unlimited_tokens)
274
- st.markdown(f"💬 **General AI:** {general_response}")
275
-
276
- assistant_message = {
277
- "role": "assistant",
278
- "content": general_response,
279
- "rag_info": {"sources": [], "confidence": 0, "mode": "general"}
280
- }
281
-
282
- else:
283
- # RAG system not ready - use general AI
284
- if rag_system and rag_system.get_collection_count() == 0:
285
- st.warning("No documents indexed. Sync from GitHub or upload documents first...")
286
- else:
287
- st.error("RAG system not ready. Using general AI mode...")
288
-
289
- general_response = get_general_ai_response(prompt, unlimited_tokens=unlimited_tokens)
290
- st.markdown(f"💬 **General AI:** {general_response}")
291
-
292
- assistant_message = {
293
- "role": "assistant",
294
- "content": general_response,
295
- "rag_info": {"sources": [], "confidence": 0, "mode": "general"}
296
- }
297
-
298
- # Add assistant message to history
299
- st.session_state.messages.append(assistant_message)
300
-
301
- # Auto-save
302
- save_chat_history(st.session_state.messages)
303
-
304
- # Continue with the existing chat input processing...
305
-
306
  # ================= GITHUB INTEGRATION =================
307
 
308
  def clone_github_repo():
 
18
  initial_sidebar_state="expanded"
19
  )
20
 
21
+ # Define personality questions - THIS WAS MISSING!
22
+ PERSONALITY_QUESTIONS = [
23
+ "What is [name]'s personality like?",
24
+ "What are [name]'s favorite hobbies?",
25
+ "What does [name] do for work?",
26
+ "What are [name]'s strengths?",
27
+ "What makes [name] unique?",
28
+ "What is [name]'s educational background?",
29
+ "What are [name]'s life goals?",
30
+ "What challenges has [name] overcome?",
31
+ "What is [name]'s family role?",
32
+ "What are [name]'s values?",
33
+ "What does [name] enjoy doing in free time?",
34
+ "What skills does [name] have?",
35
+ "What motivates [name]?",
36
+ "What are [name]'s achievements?",
37
+ "How would friends describe [name]?"
38
+ ]
39
+
40
  # Enhanced CSS styling
41
  st.markdown("""
42
  <style>
 
163
  SESSIONS_FILE = "rag_chat_sessions.json"
164
  USERS_FILE = "online_users.json"
165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
  # ================= GITHUB INTEGRATION =================
167
 
168
  def clone_github_repo():