purajith commited on
Commit
cf8f7fc
Β·
verified Β·
1 Parent(s): 2115b69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -26
app.py CHANGED
@@ -10,9 +10,9 @@ warnings.filterwarnings("ignore")
10
  st.set_page_config(layout="wide")
11
  st.title("AI Document Processor with Conversational RAG")
12
 
13
- # Initialize conversation history in session state
14
- if "conversation_history" not in st.session_state:
15
- st.session_state.conversation_history = []
16
 
17
  # Sidebar for file upload and settings
18
  with st.sidebar:
@@ -27,14 +27,16 @@ with st.sidebar:
27
  llm_option = st.selectbox("Select LLM Model", ["GPT-4o", "GPT-4o-mini"], index=1)
28
 
29
  temp_dir = "temp_uploaded_files"
30
-
 
 
 
31
  # Clear the previous uploads when new files are uploaded
32
  if uploaded_files:
33
  if os.path.exists(temp_dir):
34
  shutil.rmtree(temp_dir) # Delete the old directory and its contents
35
  os.makedirs(temp_dir) # Create a fresh directory
36
 
37
- file_paths = [] # List to store saved file paths
38
  for file in uploaded_files:
39
  file_path = os.path.join(temp_dir, file.name)
40
  with open(file_path, "wb") as f:
@@ -42,31 +44,37 @@ with st.sidebar:
42
  file_paths.append(file_path)
43
  st.write(f"βœ… Saved: {file.name}")
44
 
45
- # Clear conversation history if the user refreshes or starts a new session
46
  if st.button("Clear Conversation History"):
47
- st.session_state.conversation_history = [] # Reset history
48
-
49
- # Chat interface
50
- st.write("### Chat Interface")
51
 
52
- chat_display = "\n".join(st.session_state.conversation_history)
53
- # st.text_area("Conversation History", chat_display, height=300, disabled=True)
 
 
54
 
55
- # Input for user question
56
- user_input = st.text_input("Ask a question:")
57
- llm_model = llm_option
 
 
 
58
 
59
- if st.button("Retrieve and Answer"):
60
- if user_input or uploaded_files:
61
- answer = multimodelrag(user_input, file_paths, embeding, llm_model, conversation)
62
 
63
- # Update conversation history
64
- st.session_state.conversation_history.append(f"User: {user_input}")
65
- st.session_state.conversation_history.append(f"AI: {answer}")
 
 
 
 
 
66
 
67
- # Refresh chat display
68
- chat_display = "\n".join(st.session_state.conversation_history)
69
- st.text_area("Conversation History", chat_display, height=400, disabled=True)
70
 
71
- st.write("### Answer:")
72
- st.write(answer)
 
10
  st.set_page_config(layout="wide")
11
  st.title("AI Document Processor with Conversational RAG")
12
 
13
+ # Initialize chat history in session state
14
+ if "messages" not in st.session_state:
15
+ st.session_state.messages = []
16
 
17
  # Sidebar for file upload and settings
18
  with st.sidebar:
 
27
  llm_option = st.selectbox("Select LLM Model", ["GPT-4o", "GPT-4o-mini"], index=1)
28
 
29
  temp_dir = "temp_uploaded_files"
30
+
31
+ # Initialize file_paths to prevent undefined variable errors
32
+ file_paths = []
33
+
34
  # Clear the previous uploads when new files are uploaded
35
  if uploaded_files:
36
  if os.path.exists(temp_dir):
37
  shutil.rmtree(temp_dir) # Delete the old directory and its contents
38
  os.makedirs(temp_dir) # Create a fresh directory
39
 
 
40
  for file in uploaded_files:
41
  file_path = os.path.join(temp_dir, file.name)
42
  with open(file_path, "wb") as f:
 
44
  file_paths.append(file_path)
45
  st.write(f"βœ… Saved: {file.name}")
46
 
47
+ # Button to clear conversation history
48
  if st.button("Clear Conversation History"):
49
+ st.session_state.messages = []
 
 
 
50
 
51
+ # Display previous chat messages
52
+ for message in st.session_state.messages:
53
+ with st.chat_message(message["role"]):
54
+ st.markdown(message["content"], unsafe_allow_html=True) # Allow better formatting
55
 
56
+ # User input
57
+ if user_input := st.chat_input("Ask a question:"):
58
+ # Display user message
59
+ with st.chat_message("user"):
60
+ st.markdown(user_input)
61
+ st.session_state.messages.append({"role": "user", "content": user_input})
62
 
63
+ # Get AI response
64
+ answer = multimodelrag(user_input, file_paths, embeding, llm_option, conversation)
 
65
 
66
+ # Check if answer is valid
67
+ if isinstance(answer, dict):
68
+ chat_display = "\n\n".join(
69
+ [f"πŸ“‚ <b>File:</b> <code>{key}</code><br><br>πŸ“ <b>Extracted Text:</b><br><br>{value}<br>{'-'*50}"
70
+ for key, value in answer.items()]
71
+ )
72
+ else:
73
+ chat_display = f"❌ **Error:** Unexpected response format from AI model.\n\n{answer}"
74
 
75
+ # Display AI response in chat
76
+ with st.chat_message("assistant"):
77
+ st.markdown(chat_display, unsafe_allow_html=True) # Allow HTML formatting for bold text
78
 
79
+ # Store assistant response in conversation history
80
+ st.session_state.messages.append({"role": "assistant", "content": chat_display})