wahab5763 commited on
Commit
8d8049a
·
verified ·
1 Parent(s): e4652f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -43
app.py CHANGED
@@ -7,8 +7,6 @@ import os
7
  import torch
8
  import pickle
9
  import base64
10
- import re
11
- from pyngrok import ngrok
12
  from googleapiclient.discovery import build
13
  from google_auth_oauthlib.flow import InstalledAppFlow
14
  from google.auth.transport.requests import Request
@@ -54,8 +52,6 @@ if "candidates_message_shown" not in st.session_state:
54
  st.session_state.candidates_message_shown = False
55
  if "vector_db_message_shown" not in st.session_state:
56
  st.session_state.vector_db_message_shown = False
57
- if "pending_query" not in st.session_state:
58
- st.session_state.pending_query = False
59
 
60
  def count_tokens(text):
61
  return len(text.split())
@@ -77,7 +73,6 @@ def reset_session_state():
77
  st.session_state.messages = []
78
  st.session_state.candidates_message_shown = False
79
  st.session_state.vector_db_message_shown = False
80
- st.session_state.pending_query = False
81
  for filename in ["token.json", "data_chunks.pkl", "embeddings.pkl", "vector_store.index", "vector_database.pkl"]:
82
  if os.path.exists(filename):
83
  os.remove(filename)
@@ -400,50 +395,38 @@ def handle_user_query():
400
  # Append user message to chat
401
  st.session_state.messages.append({"role": "user", "content": user_input})
402
 
403
- # Append assistant "thinking" message
404
- st.session_state.messages.append({"role": "assistant", "content": "💭 Processing your query..."})
405
-
406
- # Display chat messages
407
- for msg in st.session_state.messages:
408
- if msg["role"] == "user":
409
- with st.chat_message("user"):
410
- st.markdown(msg["content"])
411
- elif msg["role"] == "assistant":
412
- with st.chat_message("assistant"):
413
- st.markdown(msg["content"])
414
-
415
  # Process the query
416
  process_candidate_emails(user_input, similarity_threshold)
417
 
418
- # If there's a candidate context, call the LLM API
419
  if st.session_state.candidate_context:
 
420
  call_llm_api(user_input)
421
 
422
- # Display chat messages again with updated AI response
423
- for msg in st.session_state.messages:
424
- if msg["role"] == "user":
425
- with st.chat_message("user"):
426
- st.markdown(msg["content"])
427
- elif msg["role"] == "assistant":
428
- with st.chat_message("assistant"):
429
- st.markdown(msg["content"])
430
-
431
- # Display matching email chunks in an expander
432
- if st.session_state.raw_candidates:
433
- with st.expander("🔎 Matching Email Chunks:", expanded=False):
434
- for candidate, sim in st.session_state.raw_candidates:
435
- # Get a snippet (first 150 characters) of the body instead of full body content.
436
- body = candidate.get('body', 'No Content')
437
- snippet = (body[:150] + "...") if len(body) > 150 else body
438
- st.markdown(
439
- f"**From:** {candidate.get('sender','Unknown')} <br>"
440
- f"**To:** {candidate.get('to','Unknown')} <br>"
441
- f"**Date:** {candidate.get('date','Unknown')} <br>"
442
- f"**Subject:** {candidate.get('subject','No Subject')} <br>"
443
- f"**Body Snippet:** {snippet} <br>"
444
- f"**Similarity:** {sim:.4f}",
445
- unsafe_allow_html=True
446
- )
447
 
448
  # ===============================
449
  # 6. Main Application Logic
 
7
  import torch
8
  import pickle
9
  import base64
 
 
10
  from googleapiclient.discovery import build
11
  from google_auth_oauthlib.flow import InstalledAppFlow
12
  from google.auth.transport.requests import Request
 
52
  st.session_state.candidates_message_shown = False
53
  if "vector_db_message_shown" not in st.session_state:
54
  st.session_state.vector_db_message_shown = False
 
 
55
 
56
  def count_tokens(text):
57
  return len(text.split())
 
73
  st.session_state.messages = []
74
  st.session_state.candidates_message_shown = False
75
  st.session_state.vector_db_message_shown = False
 
76
  for filename in ["token.json", "data_chunks.pkl", "embeddings.pkl", "vector_store.index", "vector_database.pkl"]:
77
  if os.path.exists(filename):
78
  os.remove(filename)
 
395
  # Append user message to chat
396
  st.session_state.messages.append({"role": "user", "content": user_input})
397
 
 
 
 
 
 
 
 
 
 
 
 
 
398
  # Process the query
399
  process_candidate_emails(user_input, similarity_threshold)
400
 
 
401
  if st.session_state.candidate_context:
402
+ # Send the query to the LLM API
403
  call_llm_api(user_input)
404
 
405
+ # Display chat messages
406
+ for msg in st.session_state.messages:
407
+ if msg["role"] == "user":
408
+ with st.chat_message("user"):
409
+ st.markdown(msg["content"])
410
+ elif msg["role"] == "assistant":
411
+ with st.chat_message("assistant"):
412
+ st.markdown(msg["content"])
413
+
414
+ # Display matching email chunks in an expander
415
+ if st.session_state.raw_candidates:
416
+ with st.expander("🔎 Matching Email Chunks:", expanded=False):
417
+ for candidate, sim in st.session_state.raw_candidates:
418
+ # Get a snippet (first 150 characters) of the body instead of full body content.
419
+ body = candidate.get('body', 'No Content')
420
+ snippet = (body[:150] + "...") if len(body) > 150 else body
421
+ st.markdown(
422
+ f"**From:** {candidate.get('sender','Unknown')} <br>"
423
+ f"**To:** {candidate.get('to','Unknown')} <br>"
424
+ f"**Date:** {candidate.get('date','Unknown')} <br>"
425
+ f"**Subject:** {candidate.get('subject','No Subject')} <br>"
426
+ f"**Body Snippet:** {snippet} <br>"
427
+ f"**Similarity:** {sim:.4f}",
428
+ unsafe_allow_html=True
429
+ )
430
 
431
  # ===============================
432
  # 6. Main Application Logic