KUNAL SHAW commited on
Commit
c4a40a4
·
1 Parent(s): 42222e5

Modern ChatGPT-style UI redesign

Browse files
Files changed (1) hide show
  1. streamlit_app/app.py +305 -283
streamlit_app/app.py CHANGED
@@ -1,18 +1,9 @@
1
  """
2
  Streamlit App for RAG Chatbot - Agentic AI eBook
3
-
4
- This is the main UI for the RAG chatbot. It provides:
5
- - Chat interface for asking questions
6
- - Configuration sidebar (API keys, top_k, etc.)
7
- - Display of retrieved chunks and confidence scores
8
- - Raw JSON response viewer
9
 
10
  Usage:
11
  streamlit run streamlit_app/app.py
12
-
13
- For Hugging Face Spaces deployment:
14
- - Set secrets in Space settings for PINECONE_API_KEY, OPENAI_API_KEY
15
- - Or let users input keys in the sidebar
16
  """
17
 
18
  import os
@@ -24,7 +15,7 @@ from dotenv import load_dotenv
24
  # Add parent directory to path for imports
25
  sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
26
 
27
- # Load environment variables from the project root .env file
28
  env_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.env')
29
  load_dotenv(env_path)
30
 
@@ -38,79 +29,135 @@ from app.rag_pipeline import RAGPipeline
38
  st.set_page_config(
39
  page_title="Agentic AI eBook Chatbot",
40
  page_icon="🤖",
41
- layout="wide",
42
- initial_sidebar_state="expanded"
43
  )
44
 
45
- # Custom CSS for better styling
46
  st.markdown("""
47
  <style>
48
- /* Main container styling */
49
- .main-header {
 
 
 
 
 
50
  font-size: 2.5rem;
51
- font-weight: bold;
52
- color: #1E88E5;
 
 
 
 
 
 
53
  text-align: center;
54
- margin-bottom: 1rem;
 
 
55
  }
56
 
57
- /* Answer card styling */
58
- .answer-card {
59
- background-color: #f0f7ff;
60
- border-left: 4px solid #1E88E5;
61
- padding: 1rem;
62
- border-radius: 0 8px 8px 0;
63
- margin: 1rem 0;
 
 
 
64
  }
65
 
66
- /* Confidence badge styling */
 
 
 
 
 
 
 
 
 
 
 
67
  .confidence-badge {
68
  display: inline-block;
69
- padding: 0.25rem 0.75rem;
70
- border-radius: 1rem;
71
- font-weight: bold;
72
- font-size: 0.9rem;
 
73
  }
74
 
75
- .confidence-high {
76
- background-color: #c8e6c9;
77
- color: #2e7d32;
78
  }
79
 
80
- .confidence-medium {
81
- background-color: #fff3e0;
82
- color: #ef6c00;
83
  }
84
 
85
- .confidence-low {
86
- background-color: #ffcdd2;
87
- color: #c62828;
88
  }
89
 
90
- /* Chunk card styling */
91
- .chunk-card {
92
- background-color: #fafafa;
93
- border: 1px solid #e0e0e0;
94
- padding: 0.75rem;
95
- border-radius: 8px;
96
- margin: 0.5rem 0;
 
 
97
  }
98
 
99
- /* Footer styling */
100
- .footer {
101
  text-align: center;
102
- color: #666;
103
- font-size: 0.8rem;
104
- margin-top: 2rem;
105
- padding-top: 1rem;
106
- border-top: 1px solid #e0e0e0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  }
108
  </style>
109
  """, unsafe_allow_html=True)
110
 
111
 
112
  # ============================================================================
113
- # Session State Initialization
114
  # ============================================================================
115
 
116
  if "messages" not in st.session_state:
@@ -124,249 +171,255 @@ if "last_response" not in st.session_state:
124
 
125
 
126
  # ============================================================================
127
- # Sidebar Configuration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  # ============================================================================
129
 
130
  with st.sidebar:
131
- st.header("⚙️ Configuration")
132
 
133
- st.markdown("---")
134
-
135
- # API Keys section
136
- st.subheader("🔑 API Keys")
137
-
138
- # Pinecone API Key
139
  pinecone_key = st.text_input(
140
  "Pinecone API Key",
141
  type="password",
142
  value=os.getenv("PINECONE_API_KEY", ""),
143
- help="Required for vector search. Get your key at pinecone.io"
144
  )
145
 
146
- # Pinecone Index Name
147
  index_name = st.text_input(
148
- "Pinecone Index Name",
149
  value=os.getenv("PINECONE_INDEX", "agentic-ai-ebook"),
150
- help="Name of your Pinecone index"
151
  )
152
 
153
- # OpenAI API Key (optional)
154
- openai_key = st.text_input(
155
- "OpenAI API Key (optional)",
156
- type="password",
157
- value=os.getenv("OPENAI_API_KEY", ""),
158
- help="For LLM-powered answers. Leave empty if using Groq."
159
- )
160
-
161
- # Groq API Key (optional - FREE!)
162
  groq_key = st.text_input(
163
- "Groq API Key (FREE LLM)",
164
  type="password",
165
  value=os.getenv("GROQ_API_KEY", ""),
166
- help="Free LLM alternative! Get key at console.groq.com"
167
- )
168
-
169
- st.markdown("---")
170
-
171
- # Retrieval settings
172
- st.subheader("🔍 Retrieval Settings")
173
-
174
- top_k = st.slider(
175
- "Number of chunks to retrieve (top_k)",
176
- min_value=1,
177
- max_value=10,
178
- value=6,
179
- help="More chunks = more context but potentially more noise"
180
  )
181
 
182
- use_llm = st.checkbox(
183
- "Use LLM for answer generation",
184
- value=True,
185
- help="Uncheck to always use extractive mode"
186
- )
187
-
188
- local_mode = st.checkbox(
189
- "Local Mode (no Pinecone)",
190
- value=False,
191
- help="Use local vector storage instead of Pinecone"
192
  )
193
 
194
  st.markdown("---")
195
 
196
- # Initialize/Reinitialize button
197
- if st.button("🔄 Initialize Pipeline", use_container_width=True):
198
- with st.spinner("Initializing RAG pipeline..."):
199
- try:
200
- st.session_state.pipeline = RAGPipeline(
201
- pinecone_api_key=pinecone_key if pinecone_key else None,
202
- openai_api_key=openai_key if openai_key else None,
203
- groq_api_key=groq_key if groq_key else None,
204
- index_name=index_name,
205
- local_only=local_mode,
206
- top_k=top_k
207
- )
208
- st.success("✅ Pipeline initialized!")
209
- except Exception as e:
210
- st.error(f"❌ Error: {str(e)}")
211
 
212
- # Status indicator
213
  st.markdown("---")
214
- st.subheader("📊 Status")
215
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  if st.session_state.pipeline:
217
- st.success("Pipeline: Ready")
218
- if st.session_state.pipeline.groq_client:
219
- st.info("Mode: Groq LLM (FREE)")
220
- elif st.session_state.pipeline.openai_client:
221
- st.info("Mode: OpenAI LLM")
222
- else:
223
- st.warning("Mode: Extractive (no LLM)")
224
  else:
225
- st.warning("Pipeline: Not initialized")
226
- st.caption("Click 'Initialize Pipeline' to start")
227
 
228
 
229
  # ============================================================================
230
- # Main Content Area
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  # ============================================================================
232
 
233
  # Header
234
- st.markdown('<div class="main-header">🤖 Agentic AI eBook Chatbot</div>', unsafe_allow_html=True)
 
235
 
236
- st.markdown("""
237
- <p style="text-align: center; color: #666;">
238
- Ask questions about the Agentic AI eBook. Answers are strictly grounded in the document.
239
- </p>
240
- """, unsafe_allow_html=True)
241
 
242
- st.markdown("---")
 
 
243
 
244
- # Check if pipeline is initialized
245
- if not st.session_state.pipeline:
246
- st.info("👈 Please configure your API keys and click 'Initialize Pipeline' in the sidebar to start.")
 
 
 
 
 
 
 
 
247
 
248
- # Show sample queries
249
- st.subheader("📝 Sample Questions to Try")
250
- sample_queries = [
251
- "What is the definition of 'agentic AI' described in the eBook?",
252
- "List the three risks of agentic systems the eBook mentions.",
253
- "What are the recommended safeguards for deploying agentic AI?",
254
- "How does the eBook distinguish between autonomous agents and traditional automation?",
255
- "What future research directions does the eBook propose?"
256
  ]
257
 
258
- for query in sample_queries:
259
- st.markdown(f"- {query}")
 
 
 
 
 
 
 
260
 
261
  else:
262
- # Chat input MUST be outside columns/containers
263
- user_input = st.chat_input("Ask a question about the Agentic AI eBook...")
264
-
265
- # Chat interface
266
- col1, col2 = st.columns([2, 1])
267
-
268
- with col1:
269
- st.subheader("💬 Chat")
270
-
271
- # Display chat history
272
- chat_container = st.container()
273
-
274
- with chat_container:
275
- for message in st.session_state.messages:
276
- with st.chat_message(message["role"]):
277
- st.write(message["content"])
278
-
279
- if user_input:
280
- # Add user message to chat
281
- st.session_state.messages.append({"role": "user", "content": user_input})
282
 
283
- # Display user message
284
- with st.chat_message("user"):
285
- st.write(user_input)
286
 
287
- # Get response from pipeline
288
- with st.chat_message("assistant"):
289
- with st.spinner("Searching document and generating answer..."):
290
- try:
291
- response = st.session_state.pipeline.query(
292
- user_input,
293
- top_k=top_k,
294
- use_llm=use_llm
295
- )
296
-
297
- # Store response for display
298
- st.session_state.last_response = response
299
-
300
- # Display answer
301
- answer = response.get("final_answer", "No answer generated")
302
- st.write(answer)
303
-
304
- # Display confidence
305
- confidence = response.get("confidence", 0.0)
306
- if confidence >= 0.7:
307
- conf_class = "confidence-high"
308
- elif confidence >= 0.4:
309
- conf_class = "confidence-medium"
310
- else:
311
- conf_class = "confidence-low"
312
-
313
- st.markdown(
314
- f'<span class="confidence-badge {conf_class}">Confidence: {confidence:.3f}</span>',
315
- unsafe_allow_html=True
316
- )
317
-
318
- # Add assistant message to chat
319
- st.session_state.messages.append({
320
- "role": "assistant",
321
- "content": answer
322
- })
323
-
324
- except Exception as e:
325
- st.error(f"Error: {str(e)}")
326
- st.session_state.messages.append({
327
- "role": "assistant",
328
- "content": f"Error: {str(e)}"
329
- })
330
-
331
- # Clear chat button
332
  if st.button("🗑️ Clear Chat", use_container_width=True):
333
  st.session_state.messages = []
334
  st.session_state.last_response = None
335
  st.rerun()
336
-
337
- with col2:
338
- st.subheader("📚 Retrieved Chunks")
 
 
 
 
 
 
 
 
 
 
 
339
 
340
- if st.session_state.last_response:
341
- response = st.session_state.last_response
342
- chunks = response.get("retrieved_chunks", [])
343
-
344
- if chunks:
345
- for i, chunk in enumerate(chunks):
346
- with st.expander(
347
- f"Chunk {i+1} (Page {chunk.get('page', '?')}, Score: {chunk.get('score', 0):.3f})",
348
- expanded=(i == 0)
349
- ):
350
- st.markdown(f"**ID:** `{chunk.get('id', 'unknown')}`")
351
- st.markdown(f"**Page:** {chunk.get('page', 'unknown')}")
352
- st.markdown(f"**Relevance Score:** {chunk.get('score', 0):.4f}")
353
- st.markdown("**Text:**")
354
- st.text_area(
355
- "Chunk text",
356
- value=chunk.get("text", ""),
357
- height=150,
358
- label_visibility="collapsed",
359
- key=f"chunk_{i}"
360
- )
361
- else:
362
- st.info("No chunks retrieved yet. Ask a question!")
363
-
364
- # Raw JSON viewer
365
- st.markdown("---")
366
- with st.expander("🔍 Show Raw JSON Response"):
367
- st.json(response)
368
- else:
369
- st.info("Ask a question to see retrieved chunks.")
 
 
 
 
 
 
 
 
370
 
371
 
372
  # ============================================================================
@@ -375,39 +428,8 @@ else:
375
 
376
  st.markdown("---")
377
  st.markdown("""
378
- <div class="footer">
379
- <p>
380
- <strong>Built for AI Engineer Intern Assignment</strong><br>
381
- Answers are strictly grounded in the Agentic AI eBook.<br>
382
- Using: LangGraph • Pinecone • Sentence-Transformers • Streamlit
383
- </p>
384
  </div>
385
  """, unsafe_allow_html=True)
386
-
387
-
388
- # ============================================================================
389
- # Auto-initialize if env vars are set
390
- # ============================================================================
391
-
392
- # Try to auto-initialize on first load if env vars are present
393
- if st.session_state.pipeline is None:
394
- env_pinecone = os.getenv("PINECONE_API_KEY")
395
- env_groq = os.getenv("GROQ_API_KEY")
396
- if env_pinecone:
397
- try:
398
- st.session_state.pipeline = RAGPipeline(
399
- pinecone_api_key=env_pinecone,
400
- openai_api_key=os.getenv("OPENAI_API_KEY"),
401
- groq_api_key=env_groq,
402
- index_name=os.getenv("PINECONE_INDEX", "agentic-ai-ebook"),
403
- local_only=False
404
- )
405
- # Debug: show which LLM is being used
406
- if st.session_state.pipeline.groq_client:
407
- st.sidebar.success("✅ Groq LLM connected!")
408
- elif st.session_state.pipeline.openai_client:
409
- st.sidebar.info("ℹ️ OpenAI LLM connected")
410
- else:
411
- st.sidebar.warning("⚠️ No LLM - using extractive mode")
412
- except Exception as e:
413
- st.sidebar.error(f"Auto-init failed: {e}")
 
1
  """
2
  Streamlit App for RAG Chatbot - Agentic AI eBook
3
+ Modern ChatGPT/Gemini-style UI
 
 
 
 
 
4
 
5
  Usage:
6
  streamlit run streamlit_app/app.py
 
 
 
 
7
  """
8
 
9
  import os
 
15
  # Add parent directory to path for imports
16
  sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
17
 
18
+ # Load environment variables
19
  env_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), '.env')
20
  load_dotenv(env_path)
21
 
 
29
  st.set_page_config(
30
  page_title="Agentic AI eBook Chatbot",
31
  page_icon="🤖",
32
+ layout="centered",
33
+ initial_sidebar_state="collapsed"
34
  )
35
 
36
+ # Modern CSS styling
37
  st.markdown("""
38
  <style>
39
+ /* Hide default streamlit elements */
40
+ #MainMenu {visibility: hidden;}
41
+ footer {visibility: hidden;}
42
+
43
+ /* Header styling */
44
+ .main-title {
45
+ text-align: center;
46
  font-size: 2.5rem;
47
+ font-weight: 700;
48
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
49
+ -webkit-background-clip: text;
50
+ -webkit-text-fill-color: transparent;
51
+ margin-bottom: 0.5rem;
52
+ }
53
+
54
+ .subtitle {
55
  text-align: center;
56
+ color: #888;
57
+ font-size: 1rem;
58
+ margin-bottom: 2rem;
59
  }
60
 
61
+ /* Chat message styling */
62
+ .user-message {
63
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
64
+ color: white;
65
+ padding: 1rem 1.5rem;
66
+ border-radius: 20px 20px 5px 20px;
67
+ margin: 0.5rem 0;
68
+ max-width: 80%;
69
+ margin-left: auto;
70
+ word-wrap: break-word;
71
  }
72
 
73
+ .assistant-message {
74
+ background: #f0f2f6;
75
+ color: #1a1a2e;
76
+ padding: 1rem 1.5rem;
77
+ border-radius: 20px 20px 20px 5px;
78
+ margin: 0.5rem 0;
79
+ max-width: 90%;
80
+ border: 1px solid #e0e0e0;
81
+ word-wrap: break-word;
82
+ }
83
+
84
+ /* Confidence badge */
85
  .confidence-badge {
86
  display: inline-block;
87
+ padding: 0.3rem 0.8rem;
88
+ border-radius: 20px;
89
+ font-size: 0.75rem;
90
+ font-weight: 600;
91
+ margin-top: 0.5rem;
92
  }
93
 
94
+ .conf-high {
95
+ background: #d4edda;
96
+ color: #155724;
97
  }
98
 
99
+ .conf-medium {
100
+ background: #fff3cd;
101
+ color: #856404;
102
  }
103
 
104
+ .conf-low {
105
+ background: #f8d7da;
106
+ color: #721c24;
107
  }
108
 
109
+ /* Source chips */
110
+ .source-chip {
111
+ display: inline-block;
112
+ background: #e9ecef;
113
+ color: #495057;
114
+ padding: 0.2rem 0.6rem;
115
+ border-radius: 12px;
116
+ font-size: 0.7rem;
117
+ margin: 0.1rem;
118
  }
119
 
120
+ /* Welcome container */
121
+ .welcome-box {
122
  text-align: center;
123
+ padding: 2rem;
124
+ background: linear-gradient(135deg, #667eea15 0%, #764ba215 100%);
125
+ border-radius: 20px;
126
+ margin: 2rem 0;
127
+ }
128
+
129
+ /* Sample question buttons */
130
+ .stButton > button {
131
+ border-radius: 20px !important;
132
+ border: 1px solid #667eea !important;
133
+ background: white !important;
134
+ color: #667eea !important;
135
+ font-size: 0.85rem !important;
136
+ padding: 0.5rem 1rem !important;
137
+ transition: all 0.3s ease !important;
138
+ }
139
+
140
+ .stButton > button:hover {
141
+ background: #667eea !important;
142
+ color: white !important;
143
+ }
144
+
145
+ /* Status indicator */
146
+ .status-ready {
147
+ color: #28a745;
148
+ font-weight: 600;
149
+ }
150
+
151
+ .status-not-ready {
152
+ color: #dc3545;
153
+ font-weight: 600;
154
  }
155
  </style>
156
  """, unsafe_allow_html=True)
157
 
158
 
159
  # ============================================================================
160
+ # Session State
161
  # ============================================================================
162
 
163
  if "messages" not in st.session_state:
 
171
 
172
 
173
  # ============================================================================
174
+ # Helper Functions
175
+ # ============================================================================
176
+
177
+ def initialize_pipeline(pinecone_key, index_name, openai_key, groq_key, local_mode):
178
+ """Initialize the RAG pipeline with given credentials."""
179
+ try:
180
+ pipeline = RAGPipeline(
181
+ pinecone_api_key=pinecone_key if not local_mode else None,
182
+ index_name=index_name,
183
+ namespace="agentic-ai",
184
+ openai_api_key=openai_key if openai_key else None,
185
+ groq_api_key=groq_key if groq_key else None,
186
+ local_mode=local_mode
187
+ )
188
+ return pipeline, None
189
+ except Exception as e:
190
+ return None, str(e)
191
+
192
+
193
+ def get_confidence_class(confidence):
194
+ """Get CSS class based on confidence score."""
195
+ if confidence >= 0.7:
196
+ return "conf-high"
197
+ elif confidence >= 0.4:
198
+ return "conf-medium"
199
+ return "conf-low"
200
+
201
+
202
+ # ============================================================================
203
+ # Sidebar for Settings
204
  # ============================================================================
205
 
206
  with st.sidebar:
207
+ st.markdown("## ⚙️ Settings")
208
 
 
 
 
 
 
 
209
  pinecone_key = st.text_input(
210
  "Pinecone API Key",
211
  type="password",
212
  value=os.getenv("PINECONE_API_KEY", ""),
213
+ key="pinecone_key"
214
  )
215
 
 
216
  index_name = st.text_input(
217
+ "Pinecone Index",
218
  value=os.getenv("PINECONE_INDEX", "agentic-ai-ebook"),
219
+ key="index_name"
220
  )
221
 
 
 
 
 
 
 
 
 
 
222
  groq_key = st.text_input(
223
+ "Groq API Key (FREE)",
224
  type="password",
225
  value=os.getenv("GROQ_API_KEY", ""),
226
+ key="groq_key",
227
+ help="Get free key at console.groq.com"
 
 
 
 
 
 
 
 
 
 
 
 
228
  )
229
 
230
+ openai_key = st.text_input(
231
+ "OpenAI Key (optional)",
232
+ type="password",
233
+ value=os.getenv("OPENAI_API_KEY", ""),
234
+ key="openai_key"
 
 
 
 
 
235
  )
236
 
237
  st.markdown("---")
238
 
239
+ top_k = st.slider("Chunks to retrieve", 1, 10, 6, key="top_k")
240
+ use_llm = st.checkbox("Use LLM", value=True, key="use_llm")
241
+ local_mode = st.checkbox("Local Mode", value=False, key="local_mode")
 
 
 
 
 
 
 
 
 
 
 
 
242
 
 
243
  st.markdown("---")
 
244
 
245
+ if st.button("🚀 Initialize", type="primary", use_container_width=True):
246
+ with st.spinner("Initializing..."):
247
+ pipeline, error = initialize_pipeline(
248
+ pinecone_key, index_name, openai_key, groq_key, local_mode
249
+ )
250
+ if error:
251
+ st.error(f"❌ {error}")
252
+ else:
253
+ st.session_state.pipeline = pipeline
254
+ st.success("✅ Ready!")
255
+
256
+ # Status
257
  if st.session_state.pipeline:
258
+ st.markdown('<p class="status-ready">● Pipeline Ready</p>', unsafe_allow_html=True)
 
 
 
 
 
 
259
  else:
260
+ st.markdown('<p class="status-not-ready">● Not Initialized</p>', unsafe_allow_html=True)
 
261
 
262
 
263
  # ============================================================================
264
+ # Auto-initialize if env vars are set
265
+ # ============================================================================
266
+
267
+ if st.session_state.pipeline is None:
268
+ pk = os.getenv("PINECONE_API_KEY", "")
269
+ gk = os.getenv("GROQ_API_KEY", "")
270
+
271
+ if pk and gk:
272
+ pipeline, _ = initialize_pipeline(
273
+ pk,
274
+ os.getenv("PINECONE_INDEX", "agentic-ai-ebook"),
275
+ os.getenv("OPENAI_API_KEY", ""),
276
+ gk,
277
+ False
278
+ )
279
+ if pipeline:
280
+ st.session_state.pipeline = pipeline
281
+
282
+
283
+ # ============================================================================
284
+ # Main UI
285
  # ============================================================================
286
 
287
  # Header
288
+ st.markdown('<h1 class="main-title">🤖 Agentic AI Chatbot</h1>', unsafe_allow_html=True)
289
+ st.markdown('<p class="subtitle">Ask questions about the Agentic AI eBook • Grounded answers only</p>', unsafe_allow_html=True)
290
 
 
 
 
 
 
291
 
292
+ # ============================================================================
293
+ # Chat Display
294
+ # ============================================================================
295
 
296
+ # Welcome screen if no messages
297
+ if not st.session_state.messages:
298
+ st.markdown("""
299
+ <div class="welcome-box">
300
+ <h2>👋 Welcome!</h2>
301
+ <p style="color: #666; max-width: 500px; margin: 0 auto 1rem auto;">
302
+ I'm your AI assistant for the Agentic AI eBook.
303
+ Ask me anything about the document and I'll find relevant answers.
304
+ </p>
305
+ </div>
306
+ """, unsafe_allow_html=True)
307
 
308
+ st.markdown("### 💡 Try these questions:")
309
+
310
+ sample_questions = [
311
+ "What is the definition of agentic AI?",
312
+ "What are the key characteristics of agentic systems?",
313
+ "What risks does the eBook mention?",
314
+ "What safeguards are recommended?"
 
315
  ]
316
 
317
+ cols = st.columns(2)
318
+ for i, q in enumerate(sample_questions):
319
+ with cols[i % 2]:
320
+ if st.button(q, key=f"sample_{i}", use_container_width=True):
321
+ if st.session_state.pipeline:
322
+ st.session_state.messages.append({"role": "user", "content": q})
323
+ st.rerun()
324
+ else:
325
+ st.warning("Please initialize the pipeline first (click sidebar)")
326
 
327
  else:
328
+ # Display all messages
329
+ for message in st.session_state.messages:
330
+ if message["role"] == "user":
331
+ st.markdown(f"""
332
+ <div style="display: flex; justify-content: flex-end; margin: 1rem 0;">
333
+ <div class="user-message">{message["content"]}</div>
334
+ </div>
335
+ """, unsafe_allow_html=True)
336
+ else:
337
+ content = message["content"]
338
+ confidence = message.get("confidence", 0)
339
+ sources = message.get("sources", [])
 
 
 
 
 
 
 
 
340
 
341
+ conf_class = get_confidence_class(confidence)
 
 
342
 
343
+ # Build sources HTML
344
+ sources_html = ""
345
+ if sources:
346
+ chips = " ".join([f'<span class="source-chip">📄 Page {s}</span>' for s in sources[:5]])
347
+ sources_html = f'<div style="margin-top: 0.5rem;">{chips}</div>'
348
+
349
+ st.markdown(f"""
350
+ <div style="margin: 1rem 0;">
351
+ <div class="assistant-message">
352
+ <div style="white-space: pre-wrap;">{content}</div>
353
+ <div style="margin-top: 0.75rem;">
354
+ <span class="confidence-badge {conf_class}">
355
+ Confidence: {confidence:.0%}
356
+ </span>
357
+ </div>
358
+ {sources_html}
359
+ </div>
360
+ </div>
361
+ """, unsafe_allow_html=True)
362
+
363
+ # Clear chat button
364
+ col1, col2, col3 = st.columns([1, 1, 1])
365
+ with col2:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  if st.button("🗑️ Clear Chat", use_container_width=True):
367
  st.session_state.messages = []
368
  st.session_state.last_response = None
369
  st.rerun()
370
+
371
+
372
+ # ============================================================================
373
+ # Chat Input (MUST be at top level, not in any container)
374
+ # ============================================================================
375
+
376
+ user_input = st.chat_input("Ask a question about the Agentic AI eBook...")
377
+
378
+ if user_input:
379
+ if st.session_state.pipeline is None:
380
+ st.warning("⚠️ Please initialize the pipeline first (open sidebar → click Initialize)")
381
+ else:
382
+ # Add user message
383
+ st.session_state.messages.append({"role": "user", "content": user_input})
384
 
385
+ # Get response
386
+ try:
387
+ with st.spinner("🔍 Searching document..."):
388
+ response = st.session_state.pipeline.query(
389
+ user_input,
390
+ top_k=st.session_state.get("top_k", 6),
391
+ use_llm=st.session_state.get("use_llm", True)
392
+ )
393
+
394
+ # Extract data
395
+ answer = response.get("final_answer", "I couldn't find an answer.")
396
+ confidence = response.get("confidence", 0.0)
397
+ chunks = response.get("retrieved_chunks", [])
398
+
399
+ # Get source pages
400
+ sources = list(set([c.get("page", "?") for c in chunks]))
401
+ sources.sort()
402
+
403
+ # Add assistant message
404
+ st.session_state.messages.append({
405
+ "role": "assistant",
406
+ "content": answer,
407
+ "confidence": confidence,
408
+ "sources": sources
409
+ })
410
+
411
+ st.session_state.last_response = response
412
+ st.rerun()
413
+
414
+ except Exception as e:
415
+ st.error(f"❌ Error: {str(e)}")
416
+ st.session_state.messages.append({
417
+ "role": "assistant",
418
+ "content": f"Sorry, an error occurred: {str(e)}",
419
+ "confidence": 0,
420
+ "sources": []
421
+ })
422
+ st.rerun()
423
 
424
 
425
  # ============================================================================
 
428
 
429
  st.markdown("---")
430
  st.markdown("""
431
+ <div style="text-align: center; color: #888; font-size: 0.8rem; padding-bottom: 2rem;">
432
+ Built with LangGraph • Pinecone • Groq • Streamlit<br>
433
+ <em>Answers are strictly grounded in the Agentic AI eBook</em>
 
 
 
434
  </div>
435
  """, unsafe_allow_html=True)