SyedZainAliShah commited on
Commit
dfcf54f
·
verified ·
1 Parent(s): 4edc0b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -115
app.py CHANGED
@@ -263,18 +263,14 @@ Instructions:
263
 
264
  # Add conversation history (last 3 exchanges for context)
265
  if history and len(history) > 0:
266
- # History is in tuple format [[user, bot], [user, bot]]
267
- # Take last 3 exchanges
268
  recent_history = history[-3:] if len(history) > 3 else history
269
- for exchange in recent_history:
270
- if isinstance(exchange, (list, tuple)) and len(exchange) >= 2:
 
271
  messages.append({
272
- "role": "user",
273
- "content": exchange[0]
274
- })
275
- messages.append({
276
- "role": "assistant",
277
- "content": exchange[1]
278
  })
279
 
280
  # Add current query with context
@@ -288,10 +284,10 @@ Question: {query}"""
288
  "content": user_message
289
  })
290
 
291
- # Call Groq API
292
  chat_completion = client.chat.completions.create(
293
  messages=messages,
294
- model="llama3-8b-8192",
295
  temperature=0.3,
296
  max_tokens=1024,
297
  )
@@ -341,117 +337,127 @@ def download_chat_history():
341
  def clear_history():
342
  """Clear conversation history"""
343
  document_store['conversation_history'] = []
344
- return [], "History cleared successfully!"
345
 
346
  # Build Gradio Interface
347
- with gr.Blocks(title="Enhanced RAG Chatbot") as demo:
348
-
349
- gr.Markdown("""
350
- # Enhanced RAG-Based Chatbot
351
- Upload PDF/DOCX files and ask questions about their content!
352
-
353
- **Features:**
354
- - Multiple file support (PDF & DOCX)
355
- - Semantic embeddings with sentence-transformers
356
- - Document preview & summaries
357
- - Conversational memory
358
- - Source references with page numbers
359
- - Download chat history
360
- """)
361
-
362
- with gr.Row():
363
- with gr.Column(scale=1):
364
- file_upload = gr.File(
365
- label="Upload Documents (PDF/DOCX)",
366
- file_count="multiple",
367
- file_types=[".pdf", ".docx"]
368
- )
369
- process_btn = gr.Button("Process Documents", variant="primary")
370
- process_output = gr.Markdown(label="Processing Status")
 
 
 
 
 
 
 
371
 
372
- gr.Markdown("### Chat History Options")
373
- download_btn = gr.Button("Download History (JSON)")
374
- download_file = gr.File(label="Download", visible=True)
375
- clear_btn = gr.Button("Clear History")
376
- clear_msg = gr.Textbox(label="Status", interactive=False, visible=False)
377
-
378
- with gr.Column(scale=2):
379
- chatbot = gr.Chatbot(label="Conversation", height=500)
380
- query_input = gr.Textbox(
381
- label="Ask a question",
382
- placeholder="Type your question here and press Enter...",
383
- lines=2
384
- )
385
- submit_btn = gr.Button("Ask Question", variant="primary")
386
-
387
- # Event handlers
388
- process_btn.click(
389
- fn=process_files,
390
- inputs=[file_upload],
391
- outputs=[process_output]
392
- )
393
-
394
- def handle_question(question, history):
395
- """Handle user question and return updated chat"""
396
- if not question or not question.strip():
397
- return history, ""
398
 
399
- # Ensure history is a list
400
- if history is None:
401
- history = []
 
 
 
402
 
403
- # Generate answer
404
- answer = generate_answer(question, history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
- # Append as tuple (user_msg, bot_msg) - Gradio default format
407
- history = history + [[question, answer]]
 
 
 
 
 
 
408
 
409
- return history, ""
410
-
411
- # Submit button click
412
- submit_btn.click(
413
- fn=handle_question,
414
- inputs=[query_input, chatbot],
415
- outputs=[chatbot, query_input]
416
- )
417
-
418
- # Enter key press
419
- query_input.submit(
420
- fn=handle_question,
421
- inputs=[query_input, chatbot],
422
- outputs=[chatbot, query_input]
423
- )
424
-
425
- # Download history
426
- download_btn.click(
427
- fn=download_chat_history,
428
- outputs=[download_file]
429
- )
430
-
431
- # Clear history
432
- def clear_and_show(history):
433
- document_store['conversation_history'] = []
434
- return [], "History cleared!", gr.update(visible=True)
435
-
436
- clear_btn.click(
437
- fn=clear_and_show,
438
- inputs=[chatbot],
439
- outputs=[chatbot, clear_msg, clear_msg]
440
- )
441
-
442
- gr.Markdown("""
443
- ---
444
- ### How RAG Works:
445
- 1. **Retrieval**: Finds relevant text chunks from uploaded documents using semantic similarity
446
- 2. **Augmentation**: Combines retrieved context with your question
447
- 3. **Generation**: Uses Groq LLM to generate accurate answers based on the context
448
 
449
- ### Usage Instructions:
450
- 1. Upload one or more PDF/DOCX files
451
- 2. Click "Process Documents" and wait for confirmation
452
- 3. Ask questions about the content
453
- 4. Download chat history anytime as JSON
454
- """)
455
 
 
456
  if __name__ == "__main__":
 
457
  demo.launch(ssr_mode=False)
 
263
 
264
  # Add conversation history (last 3 exchanges for context)
265
  if history and len(history) > 0:
266
+ # Get last 3 user messages (skip current one which isn't in history yet)
 
267
  recent_history = history[-3:] if len(history) > 3 else history
268
+ for msg in recent_history:
269
+ # History format from Gradio Chatbot with type="messages"
270
+ if isinstance(msg, dict) and "role" in msg and "content" in msg:
271
  messages.append({
272
+ "role": msg["role"],
273
+ "content": msg["content"]
 
 
 
 
274
  })
275
 
276
  # Add current query with context
 
284
  "content": user_message
285
  })
286
 
287
+ # Call Groq API with updated model
288
  chat_completion = client.chat.completions.create(
289
  messages=messages,
290
+ model="llama-3.1-8b-instant", # Updated model
291
  temperature=0.3,
292
  max_tokens=1024,
293
  )
 
337
  def clear_history():
338
  """Clear conversation history"""
339
  document_store['conversation_history'] = []
340
+ return None, "History cleared successfully!"
341
 
342
  # Build Gradio Interface
343
+ def create_demo():
344
+ with gr.Blocks(title="Enhanced RAG Chatbot") as demo:
345
+
346
+ gr.Markdown("""
347
+ # Enhanced RAG-Based Chatbot
348
+ Upload PDF/DOCX files and ask questions about their content!
349
+
350
+ **Features:**
351
+ - Multiple file support (PDF & DOCX)
352
+ - Semantic embeddings with sentence-transformers
353
+ - Document preview & summaries
354
+ - Conversational memory
355
+ - Source references with page numbers
356
+ - Download chat history
357
+ """)
358
+
359
+ with gr.Row():
360
+ with gr.Column(scale=1):
361
+ file_upload = gr.File(
362
+ label="Upload Documents (PDF/DOCX)",
363
+ file_count="multiple",
364
+ file_types=[".pdf", ".docx"]
365
+ )
366
+ process_btn = gr.Button("Process Documents", variant="primary")
367
+ process_output = gr.Markdown(label="Processing Status")
368
+
369
+ gr.Markdown("### Chat History Options")
370
+ download_btn = gr.Button("Download History (JSON)")
371
+ download_file = gr.File(label="Download", visible=True)
372
+ clear_btn = gr.Button("Clear History")
373
+ clear_msg = gr.Textbox(label="Status", interactive=False, visible=False)
374
 
375
+ with gr.Column(scale=2):
376
+ chatbot = gr.Chatbot(
377
+ label="Conversation",
378
+ height=500,
379
+ type="messages"
380
+ )
381
+ query_input = gr.Textbox(
382
+ label="Ask a question",
383
+ placeholder="Type your question here and press Enter...",
384
+ lines=2
385
+ )
386
+ submit_btn = gr.Button("Ask Question", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
388
+ # Event handlers
389
+ process_btn.click(
390
+ fn=process_files,
391
+ inputs=[file_upload],
392
+ outputs=[process_output]
393
+ )
394
 
395
+ def respond(message, chat_history):
396
+ """Handle user message and generate response"""
397
+ if not message or not message.strip():
398
+ return chat_history
399
+
400
+ # Ensure chat_history is a list
401
+ if chat_history is None:
402
+ chat_history = []
403
+
404
+ # Generate answer
405
+ bot_response = generate_answer(message, chat_history)
406
+
407
+ # Append user message and bot response in Gradio messages format
408
+ chat_history.append({"role": "user", "content": message})
409
+ chat_history.append({"role": "assistant", "content": bot_response})
410
+
411
+ return chat_history
412
+
413
+ # Submit button and enter key
414
+ submit_btn.click(
415
+ fn=respond,
416
+ inputs=[query_input, chatbot],
417
+ outputs=[chatbot]
418
+ ).then(
419
+ lambda: "",
420
+ outputs=[query_input]
421
+ )
422
 
423
+ query_input.submit(
424
+ fn=respond,
425
+ inputs=[query_input, chatbot],
426
+ outputs=[chatbot]
427
+ ).then(
428
+ lambda: "",
429
+ outputs=[query_input]
430
+ )
431
 
432
+ # Download history
433
+ download_btn.click(
434
+ fn=download_chat_history,
435
+ outputs=[download_file]
436
+ )
437
+
438
+ # Clear history
439
+ clear_btn.click(
440
+ fn=clear_history,
441
+ outputs=[chatbot, clear_msg]
442
+ )
443
+
444
+ gr.Markdown("""
445
+ ---
446
+ ### How RAG Works:
447
+ 1. **Retrieval**: Finds relevant text chunks from uploaded documents using semantic similarity
448
+ 2. **Augmentation**: Combines retrieved context with your question
449
+ 3. **Generation**: Uses Groq LLM to generate accurate answers based on the context
450
+
451
+ ### Usage Instructions:
452
+ 1. Upload one or more PDF/DOCX files
453
+ 2. Click "Process Documents" and wait for confirmation
454
+ 3. Ask questions about the content
455
+ 4. Download chat history anytime as JSON
456
+ """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
457
 
458
+ return demo
 
 
 
 
 
459
 
460
+ # Launch the app
461
  if __name__ == "__main__":
462
+ demo = create_demo()
463
  demo.launch(ssr_mode=False)