Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,23 +18,13 @@ st.set_page_config(
|
|
| 18 |
initial_sidebar_state="expanded"
|
| 19 |
)
|
| 20 |
|
| 21 |
-
# Define personality questions -
|
| 22 |
PERSONALITY_QUESTIONS = [
|
| 23 |
"What is [name]'s personality like?",
|
| 24 |
-
"What are [name]'s favorite hobbies?",
|
| 25 |
"What does [name] do for work?",
|
| 26 |
-
"What are [name]'s
|
| 27 |
-
"What makes [name]
|
| 28 |
-
"
|
| 29 |
-
"What are [name]'s life goals?",
|
| 30 |
-
"What challenges has [name] overcome?",
|
| 31 |
-
"What is [name]'s family role?",
|
| 32 |
-
"What are [name]'s values?",
|
| 33 |
-
"What does [name] enjoy doing in free time?",
|
| 34 |
-
"What skills does [name] have?",
|
| 35 |
-
"What motivates [name]?",
|
| 36 |
-
"What are [name]'s achievements?",
|
| 37 |
-
"How would friends describe [name]?"
|
| 38 |
]
|
| 39 |
|
| 40 |
# Enhanced CSS styling
|
|
@@ -807,9 +797,10 @@ with st.sidebar:
|
|
| 807 |
for i, question in enumerate(PERSONALITY_QUESTIONS):
|
| 808 |
formatted_question = question.replace("[name]", name)
|
| 809 |
if st.button(formatted_question, key=f"pq_{i}", use_container_width=True):
|
| 810 |
-
# Add the question to chat
|
| 811 |
user_message = {"role": "user", "content": formatted_question}
|
| 812 |
st.session_state.messages.append(user_message)
|
|
|
|
| 813 |
st.rerun()
|
| 814 |
else:
|
| 815 |
st.markdown("""
|
|
@@ -1036,6 +1027,120 @@ for message in st.session_state.messages:
|
|
| 1036 |
else:
|
| 1037 |
st.markdown(message["content"])
|
| 1038 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1039 |
# Chat input
|
| 1040 |
if prompt := st.chat_input("Ask questions about your documents..."):
|
| 1041 |
# Update user tracking
|
|
|
|
| 18 |
initial_sidebar_state="expanded"
|
| 19 |
)
|
| 20 |
|
| 21 |
+
# Define personality questions - reduced to general ones
|
| 22 |
PERSONALITY_QUESTIONS = [
|
| 23 |
"What is [name]'s personality like?",
|
|
|
|
| 24 |
"What does [name] do for work?",
|
| 25 |
+
"What are [name]'s hobbies?",
|
| 26 |
+
"What makes [name] special?",
|
| 27 |
+
"Tell me about [name]"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
]
|
| 29 |
|
| 30 |
# Enhanced CSS styling
|
|
|
|
| 797 |
for i, question in enumerate(PERSONALITY_QUESTIONS):
|
| 798 |
formatted_question = question.replace("[name]", name)
|
| 799 |
if st.button(formatted_question, key=f"pq_{i}", use_container_width=True):
|
| 800 |
+
# Add the question to chat and set flag to process it
|
| 801 |
user_message = {"role": "user", "content": formatted_question}
|
| 802 |
st.session_state.messages.append(user_message)
|
| 803 |
+
st.session_state.process_personality_question = formatted_question
|
| 804 |
st.rerun()
|
| 805 |
else:
|
| 806 |
st.markdown("""
|
|
|
|
| 1027 |
else:
|
| 1028 |
st.markdown(message["content"])
|
| 1029 |
|
| 1030 |
+
# Check if we need to process a personality question
|
| 1031 |
+
if hasattr(st.session_state, 'process_personality_question'):
|
| 1032 |
+
prompt = st.session_state.process_personality_question
|
| 1033 |
+
del st.session_state.process_personality_question # Clear the flag
|
| 1034 |
+
|
| 1035 |
+
# Display user message
|
| 1036 |
+
with st.chat_message("user"):
|
| 1037 |
+
st.markdown(prompt)
|
| 1038 |
+
|
| 1039 |
+
# Process the question using the same logic as chat input
|
| 1040 |
+
# Update user tracking
|
| 1041 |
+
update_online_users()
|
| 1042 |
+
|
| 1043 |
+
# Get RAG response
|
| 1044 |
+
with st.chat_message("assistant"):
|
| 1045 |
+
if rag_system and rag_system.model and rag_system.get_collection_count() > 0:
|
| 1046 |
+
# Search documents first
|
| 1047 |
+
search_results = rag_system.search(prompt, n_results=5)
|
| 1048 |
+
|
| 1049 |
+
# Debug output for troubleshooting
|
| 1050 |
+
if search_results:
|
| 1051 |
+
st.info(f"π Found {len(search_results)} potential matches. Best similarity: {search_results[0]['similarity']:.3f}")
|
| 1052 |
+
else:
|
| 1053 |
+
st.warning("π No search results returned from vector database")
|
| 1054 |
+
|
| 1055 |
+
# Check if we found relevant documents (very low threshold)
|
| 1056 |
+
if search_results and search_results[0]['similarity'] > 0.001: # Ultra-low threshold
|
| 1057 |
+
# Generate document-based answer
|
| 1058 |
+
result = rag_system.generate_answer(
|
| 1059 |
+
prompt,
|
| 1060 |
+
search_results,
|
| 1061 |
+
use_ai_enhancement=use_ai_enhancement,
|
| 1062 |
+
unlimited_tokens=unlimited_tokens
|
| 1063 |
+
)
|
| 1064 |
+
|
| 1065 |
+
# Display AI answer or extracted answer
|
| 1066 |
+
if use_ai_enhancement and result['has_both']:
|
| 1067 |
+
answer_text = result['ai_answer']
|
| 1068 |
+
st.markdown(f"π€ **AI Enhanced Answer:** {answer_text}")
|
| 1069 |
+
|
| 1070 |
+
# Also show extracted answer for comparison if different
|
| 1071 |
+
if result['extracted_answer'] != answer_text:
|
| 1072 |
+
with st.expander("π View Extracted Answer"):
|
| 1073 |
+
st.markdown(result['extracted_answer'])
|
| 1074 |
+
else:
|
| 1075 |
+
answer_text = result['extracted_answer']
|
| 1076 |
+
st.markdown(f"π **Document Answer:** {answer_text}")
|
| 1077 |
+
|
| 1078 |
+
# Show why AI enhancement wasn't used
|
| 1079 |
+
if use_ai_enhancement and not result['has_both']:
|
| 1080 |
+
st.info("π‘ AI enhancement failed - showing extracted answer from documents")
|
| 1081 |
+
|
| 1082 |
+
# Show RAG info with more details
|
| 1083 |
+
if show_sources and result['sources']:
|
| 1084 |
+
confidence_text = f"{result['confidence']*100:.1f}%" if show_confidence else ""
|
| 1085 |
+
st.markdown(f"""
|
| 1086 |
+
<div class="rag-attribution">
|
| 1087 |
+
<strong>π Sources:</strong> {', '.join(result['sources'])}<br>
|
| 1088 |
+
<strong>π― Confidence:</strong> {confidence_text}<br>
|
| 1089 |
+
<strong>π Found:</strong> {len(search_results)} relevant sections<br>
|
| 1090 |
+
<strong>π Best Match:</strong> {search_results[0]['similarity']:.3f} similarity
|
| 1091 |
+
</div>
|
| 1092 |
+
""", unsafe_allow_html=True)
|
| 1093 |
+
|
| 1094 |
+
# Add to messages with RAG info
|
| 1095 |
+
assistant_message = {
|
| 1096 |
+
"role": "assistant",
|
| 1097 |
+
"content": answer_text,
|
| 1098 |
+
"rag_info": {
|
| 1099 |
+
"sources": result['sources'],
|
| 1100 |
+
"confidence": result['confidence'],
|
| 1101 |
+
"extracted_answer": result['extracted_answer'],
|
| 1102 |
+
"has_ai": result['has_both']
|
| 1103 |
+
}
|
| 1104 |
+
}
|
| 1105 |
+
|
| 1106 |
+
else:
|
| 1107 |
+
# No relevant documents found - show debug info
|
| 1108 |
+
if search_results:
|
| 1109 |
+
st.warning(f"π Found documents but similarity too low (best: {search_results[0]['similarity']:.3f}). Using general AI...")
|
| 1110 |
+
else:
|
| 1111 |
+
st.warning("π No documents found in search. Using general AI...")
|
| 1112 |
+
|
| 1113 |
+
general_response = get_general_ai_response(prompt, unlimited_tokens=unlimited_tokens)
|
| 1114 |
+
st.markdown(f"π¬ **General AI:** {general_response}")
|
| 1115 |
+
|
| 1116 |
+
assistant_message = {
|
| 1117 |
+
"role": "assistant",
|
| 1118 |
+
"content": general_response,
|
| 1119 |
+
"rag_info": {"sources": [], "confidence": 0, "mode": "general"}
|
| 1120 |
+
}
|
| 1121 |
+
|
| 1122 |
+
else:
|
| 1123 |
+
# RAG system not ready - use general AI
|
| 1124 |
+
if rag_system and rag_system.get_collection_count() == 0:
|
| 1125 |
+
st.warning("No documents indexed. Sync from GitHub or upload documents first...")
|
| 1126 |
+
else:
|
| 1127 |
+
st.error("RAG system not ready. Using general AI mode...")
|
| 1128 |
+
|
| 1129 |
+
general_response = get_general_ai_response(prompt, unlimited_tokens=unlimited_tokens)
|
| 1130 |
+
st.markdown(f"π¬ **General AI:** {general_response}")
|
| 1131 |
+
|
| 1132 |
+
assistant_message = {
|
| 1133 |
+
"role": "assistant",
|
| 1134 |
+
"content": general_response,
|
| 1135 |
+
"rag_info": {"sources": [], "confidence": 0, "mode": "general"}
|
| 1136 |
+
}
|
| 1137 |
+
|
| 1138 |
+
# Add assistant message to history
|
| 1139 |
+
st.session_state.messages.append(assistant_message)
|
| 1140 |
+
|
| 1141 |
+
# Auto-save
|
| 1142 |
+
save_chat_history(st.session_state.messages)
|
| 1143 |
+
|
| 1144 |
# Chat input
|
| 1145 |
if prompt := st.chat_input("Ask questions about your documents..."):
|
| 1146 |
# Update user tracking
|