MeteKaba commited on
Commit
da9eeed
Β·
verified Β·
1 Parent(s): 14cb65f

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +241 -0
src/streamlit_app.py CHANGED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from rag_pipeline import build_rag_pipeline
3
+ from streamlit_extras.add_vertical_space import add_vertical_space
4
+
5
+ # --- PAGE CONFIG ---
6
+ st.set_page_config(
7
+ page_title="πŸ’¬ Personalized Lifecycle Companion",
8
+ page_icon="πŸ’«",
9
+ layout="centered",
10
+ )
11
+
12
+ # --- CUSTOM CSS (bubbles + badge) ---
13
+ st.markdown("""
14
+ <style>
15
+ .main-title { text-align: center; font-size: 2.2em; font-weight: 700; color: #4A90E2; }
16
+ .subtitle { text-align: center; font-size: 1.1em; color: #666; margin-bottom: 1.5em; }
17
+ .user-bubble {
18
+ background: linear-gradient(180deg, #dbe9ff, #c7ddff);
19
+ padding: 0.75em 1em;
20
+ border-radius: 12px;
21
+ margin: 0.5em 0 0.25em 0;
22
+ max-width: 80%;
23
+ }
24
+ .assistant-bubble {
25
+ background: #f5f7fa;
26
+ padding: 0.9em 1em;
27
+ border-radius: 12px;
28
+ margin: 0.25em 0 0.8em 0;
29
+ border: 1px solid #e1e4e8;
30
+ max-width: 80%;
31
+ }
32
+ .meta-badge {
33
+ display: inline-block;
34
+ font-size: 0.72em;
35
+ padding: 2px 8px;
36
+ border-radius: 999px;
37
+ margin-left: 8px;
38
+ vertical-align: middle;
39
+ }
40
+ .badge-dataset { background: #fff6ea; color: #b36b00; border: 1px solid #f0e68c; }
41
+ .badge-general { background: #eefcf3; color: #0a7f53; border: 1px solid #bfead4; }
42
+ .doc-box { background-color: #fffbe6; padding: 0.6em 0.8em; border-radius: 8px; border: 1px solid #f0e68c; margin-bottom: 0.5em; }
43
+ .doc-q { font-weight: 600; color: #333; }
44
+ .doc-a { color: #555; }
45
+
46
+ /* Make chat area scrollable and avoid hiding under input */
47
+ .chat-area {
48
+ max-height: 70vh;
49
+ overflow-y: auto;
50
+ padding-right: 8px;
51
+ padding-bottom: 120px; /* Space for input bar */
52
+ }
53
+
54
+ /* Fix the input container at the bottom */
55
+ .input-container {
56
+ position: fixed;
57
+ bottom: 0;
58
+ left: 0;
59
+ right: 0;
60
+ background-color: #ffffff;
61
+ padding: 1rem 2rem;
62
+ box-shadow: 0 -2px 10px rgba(0, 0, 0, 0.05);
63
+ z-index: 999;
64
+ }
65
+
66
+ /* Optional: make buttons line up neatly */
67
+ .stButton button {
68
+ height: 2.5em;
69
+ }
70
+
71
+ /* Hide Streamlit footer and hamburger for cleaner look */
72
+ #MainMenu {visibility: hidden;}
73
+ footer {visibility: hidden;}
74
+ header {visibility: hidden;}
75
+ </style>
76
+ """, unsafe_allow_html=True)
77
+
78
+ # --- HEADER ---
79
+ st.markdown('<div class="main-title">πŸ’¬ MoodMate</div>', unsafe_allow_html=True)
80
+ st.markdown('<div class="subtitle">Ask anything about personal, social, or business growth β€” powered by RAG + Gemini</div>', unsafe_allow_html=True)
81
+
82
+ add_vertical_space(2)
83
+
84
+ # --- LOAD PIPELINE ---
85
+ @st.cache_resource
86
+ def load_chain():
87
+ return build_rag_pipeline()
88
+
89
+ llm, retriever, rag_chain = load_chain()
90
+
91
+ # --- USER SETTINGS ---
92
+ st.markdown("### βš™οΈ Answer Selection Settings")
93
+
94
+ # Automatic vs Manual mode
95
+ auto_mode = st.checkbox("Automatic answer selection (default)", value=True)
96
+
97
+ # Manual answer type selection appears only if auto_mode is off
98
+ if not auto_mode:
99
+ answer_type = st.radio(
100
+ "Select answer type:",
101
+ ("Dataset-Based Answer", "General Reasoning Answer"),
102
+ index=0
103
+ )
104
+ add_vertical_space(1)
105
+
106
+ # --- SESSION STATE MEMORY ---
107
+ if "chat_history" not in st.session_state:
108
+ st.session_state.chat_history = []
109
+
110
+ # Ensure input_box key exists so it persists across runs
111
+ if "input_box" not in st.session_state:
112
+ st.session_state.input_box = ""
113
+
114
+ # --- LAYOUT: chat area + input at bottom ---
115
+ chat_col = st.container()
116
+
117
+ # Render chat area (so it updates live on each run)
118
+ with chat_col:
119
+ st.markdown("## πŸ’¬ Conversation")
120
+ chat_area = st.container()
121
+ with chat_area:
122
+ # Render each turn in order
123
+ for i, turn in enumerate(st.session_state.chat_history):
124
+ # User bubble (left)
125
+ st.markdown(f'<div class="user-bubble">πŸ§‘ You: {turn["user"]}</div>', unsafe_allow_html=True)
126
+
127
+ # Assistant bubble with subtle badge
128
+ typ = turn.get("type", "General Reasoning")
129
+ badge_html = (
130
+ f'<span class="meta-badge badge-dataset">Dataset-Based</span>'
131
+ if typ == "Dataset-Based Answer"
132
+ else f'<span class="meta-badge badge-general">General Reasoning</span>'
133
+ )
134
+
135
+ st.markdown(f'<div class="assistant-bubble">πŸ€– Assistant: {turn["ai"]} {badge_html}</div>', unsafe_allow_html=True)
136
+
137
+ # If dataset-based and has docs, show small expander for docs
138
+ if turn.get("type") == "Dataset-Based Answer" and turn.get("docs"):
139
+ with st.expander(f"πŸ“‚ Top Retrieved Documents for message {i+1}"):
140
+ for d in turn["docs"][:3]:
141
+ parts = d.page_content.split("\n")
142
+ q_text = parts[0].replace("Q: ", "") if len(parts) > 0 else ""
143
+ a_text = parts[1].replace("A: ", "") if len(parts) > 1 else ""
144
+ st.markdown(
145
+ f'<div class="doc-box"><div class="doc-q">Q: {q_text}</div><div class="doc-a">A: {a_text}</div></div>',
146
+ unsafe_allow_html=True
147
+ )
148
+
149
+ # --- SEND CALLBACK LOGIC ---
150
+ def handle_send():
151
+ query = st.session_state.input_box.strip()
152
+ if not query:
153
+ st.warning("Please enter a message.")
154
+ return
155
+
156
+ with st.spinner("πŸ” Thinking and retrieving relevant information..."):
157
+ # --- Build unified chat history for contextual prompting ---
158
+ N_keep = 6 # Keep last 6 turns for context
159
+ history_for_prompt = st.session_state.chat_history[-N_keep:]
160
+ full_prompt = ""
161
+ for turn in history_for_prompt:
162
+ full_prompt += f"User: {turn['user']}\nAI: {turn['ai']}\n"
163
+ full_prompt += f"User: {query}\nAI:"
164
+
165
+ rag_answer, general_answer, docs = "", "", []
166
+
167
+ # --- AUTO MODE ---
168
+ if auto_mode:
169
+ # Step 1: Try dataset-based (RAG) first
170
+ rag_result = rag_chain({"question": query})
171
+ rag_answer = rag_result.get("answer", "")
172
+ docs = rag_result.get("source_documents", [])
173
+
174
+ # Step 2: Evaluate RAG answer quality
175
+ # Automatically decide whether to show the dataset-based answer or fall back to general reasoning
176
+ # Explanation:
177
+ # - any(kw in rag_answer.lower() for kw in fallback_keywords): checks if any "bad" keyword appears
178
+ # - len(rag_answer.strip()) < 50: checks if the dataset-based answer is too short (likely low quality)
179
+ # - not (...): inverts the condition β€” we show dataset answer only if it’s *good enough*
180
+ fallback_keywords = ["cannot answer", "no information", "based on the context", "i'm sorry"]
181
+ rag_too_short = len(rag_answer.strip()) < 50
182
+ rag_weak = any(kw in rag_answer.lower() for kw in fallback_keywords)
183
+
184
+ if rag_weak or rag_too_short:
185
+ # Step 3: Fallback to general reasoning ONLY if RAG is weak
186
+ # Use full_prompt (last N_keep turns + current query) to generate answer with LLM
187
+ general_response_obj = llm.invoke(full_prompt)
188
+ general_answer = getattr(general_response_obj, "content", str(general_response_obj))
189
+ chosen_answer = general_answer
190
+ chosen_type = "General Reasoning"
191
+ else:
192
+ chosen_answer = rag_answer
193
+ chosen_type = "Dataset-Based Answer"
194
+
195
+ # --- MANUAL MODE ---
196
+ else:
197
+ if answer_type == "Dataset-Based Answer":
198
+ rag_result = rag_chain({"question": query})
199
+ rag_answer = rag_result.get("answer", "")
200
+ docs = rag_result.get("source_documents", [])
201
+ chosen_answer = rag_answer
202
+ chosen_type = "Dataset-Based Answer"
203
+ else:
204
+ general_response_obj = llm.invoke(full_prompt)
205
+ general_answer = getattr(general_response_obj, "content", str(general_response_obj))
206
+ chosen_answer = general_answer
207
+ chosen_type = "General Reasoning"
208
+
209
+ # --- Append to unified chat history ---
210
+ st.session_state.chat_history.append({
211
+ "user": query,
212
+ "ai": chosen_answer,
213
+ "type": chosen_type,
214
+ "docs": docs if chosen_type == "Dataset-Based Answer" else None
215
+ })
216
+
217
+ # βœ… Clear input after sending
218
+ st.session_state.input_box = ""
219
+
220
+ # --- INPUT AREA (stays at bottom) ---
221
+ # --- FIXED INPUT BAR ---
222
+ st.markdown('<div class="input-container">', unsafe_allow_html=True)
223
+
224
+ query = st.text_input(
225
+ "πŸ’­ Type your message here...",
226
+ key="input_box",
227
+ placeholder="e.g. How can I improve my communication skills?",
228
+ label_visibility="collapsed"
229
+ )
230
+
231
+ col1, col2 = st.columns([0.2, 0.8])
232
+ with col1:
233
+ st.button("Send πŸ’¬", key="send_button", on_click=handle_send)
234
+ with col2:
235
+ st.button("🧹 Clear Chat", key="clear_button", help="Clears conversation history (not persistent).", on_click=lambda: (
236
+ st.session_state.chat_history.clear(),
237
+ st.session_state.update({"input_box": ""}),
238
+ st.rerun()
239
+ ))
240
+
241
+ st.markdown('</div>', unsafe_allow_html=True)