Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
from rag_components import load_documents, split_documents, create_embeddings, setup_vector_store, create_qa_chain
|
| 3 |
import os
|
| 4 |
|
| 5 |
# Ensure cache directories exist
|
|
@@ -9,10 +9,22 @@ for cache_dir in cache_dirs:
|
|
| 9 |
|
| 10 |
st.set_page_config(
|
| 11 |
page_title="Document Chatbot",
|
| 12 |
-
page_icon="
|
| 13 |
-
layout="wide"
|
|
|
|
| 14 |
)
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
@st.cache_resource
|
| 18 |
def initialize_rag_components(file_path="me.txt"):
|
|
@@ -36,13 +48,13 @@ def initialize_rag_components(file_path="me.txt"):
|
|
| 36 |
|
| 37 |
with st.spinner("Initializing QA chain..."):
|
| 38 |
qa_chain = create_qa_chain(retriever)
|
| 39 |
-
|
| 40 |
-
st.success("
|
| 41 |
return qa_chain, retriever
|
| 42 |
|
| 43 |
except Exception as e:
|
| 44 |
-
st.error(f"
|
| 45 |
-
st.info("
|
| 46 |
return None, None
|
| 47 |
|
| 48 |
qa_chain, retriever = initialize_rag_components()
|
|
@@ -58,7 +70,7 @@ if qa_chain is not None:
|
|
| 58 |
st.markdown(message["content"])
|
| 59 |
|
| 60 |
# React to user input
|
| 61 |
-
if prompt := st.chat_input("Ask me
|
| 62 |
# Display user message in chat message container
|
| 63 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 64 |
with st.chat_message("user"):
|
|
@@ -67,17 +79,14 @@ if qa_chain is not None:
|
|
| 67 |
# Display assistant response in chat message container
|
| 68 |
with st.chat_message("assistant"):
|
| 69 |
message_placeholder = st.empty()
|
| 70 |
-
|
| 71 |
try:
|
| 72 |
-
#
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
full_response += chunk['result']
|
| 76 |
-
message_placeholder.markdown(full_response + "β")
|
| 77 |
-
message_placeholder.markdown(full_response)
|
| 78 |
except Exception as e:
|
| 79 |
st.error(f"An error occurred: {e}")
|
| 80 |
-
full_response = "
|
| 81 |
|
| 82 |
# Add assistant response to chat history
|
| 83 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from rag_components import load_documents, split_documents, create_embeddings, setup_vector_store, create_qa_chain, create_streaming_response
|
| 3 |
import os
|
| 4 |
|
| 5 |
# Ensure cache directories exist
|
|
|
|
| 9 |
|
| 10 |
st.set_page_config(
|
| 11 |
page_title="Document Chatbot",
|
| 12 |
+
page_icon="π€",
|
| 13 |
+
layout="wide",
|
| 14 |
+
initial_sidebar_state="collapsed"
|
| 15 |
)
|
| 16 |
+
|
| 17 |
+
st.title("Juma's Assistant")
|
| 18 |
+
st.markdown("---")
|
| 19 |
+
|
| 20 |
+
# Add some helpful information
|
| 21 |
+
with st.expander("How to use this chatbot"):
|
| 22 |
+
st.markdown("""
|
| 23 |
+
- Ask questions about the content in your document
|
| 24 |
+
- The AI will search through the document to find relevant information
|
| 25 |
+
- Responses are generated in real-time with streaming
|
| 26 |
+
- Be specific in your questions for better answers
|
| 27 |
+
""")
|
| 28 |
|
| 29 |
@st.cache_resource
|
| 30 |
def initialize_rag_components(file_path="me.txt"):
|
|
|
|
| 48 |
|
| 49 |
with st.spinner("Initializing QA chain..."):
|
| 50 |
qa_chain = create_qa_chain(retriever)
|
| 51 |
+
|
| 52 |
+
st.success("Welcome! Ask me anything about Juma.")
|
| 53 |
return qa_chain, retriever
|
| 54 |
|
| 55 |
except Exception as e:
|
| 56 |
+
st.error(f"Error: initializing: {e}")
|
| 57 |
+
st.info("This might be due to model download issues. Please try refreshing the page.")
|
| 58 |
return None, None
|
| 59 |
|
| 60 |
qa_chain, retriever = initialize_rag_components()
|
|
|
|
| 70 |
st.markdown(message["content"])
|
| 71 |
|
| 72 |
# React to user input
|
| 73 |
+
if prompt := st.chat_input("Ask me any question..."):
|
| 74 |
# Display user message in chat message container
|
| 75 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 76 |
with st.chat_message("user"):
|
|
|
|
| 79 |
# Display assistant response in chat message container
|
| 80 |
with st.chat_message("assistant"):
|
| 81 |
message_placeholder = st.empty()
|
| 82 |
+
|
| 83 |
try:
|
| 84 |
+
# Use the new streaming response function
|
| 85 |
+
full_response = create_streaming_response(qa_chain, prompt, message_placeholder)
|
| 86 |
+
|
|
|
|
|
|
|
|
|
|
| 87 |
except Exception as e:
|
| 88 |
st.error(f"An error occurred: {e}")
|
| 89 |
+
full_response = "I apologize, but I encountered an error while processing your question. Please try again."
|
| 90 |
|
| 91 |
# Add assistant response to chat history
|
| 92 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|