Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import shutil
|
|
| 3 |
import streamlit as st
|
| 4 |
from io import BytesIO
|
| 5 |
|
| 6 |
-
|
| 7 |
from llama_index.llms.openai import OpenAI
|
| 8 |
from qdrant_client.http import models
|
| 9 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
@@ -12,37 +12,30 @@ from llama_index.vector_stores.qdrant import QdrantVectorStore
|
|
| 12 |
from llama_index.core.memory import ChatMemoryBuffer
|
| 13 |
import qdrant_client
|
| 14 |
|
| 15 |
-
# =============================================================================
|
| 16 |
-
# Configuration and Global Initialization
|
| 17 |
-
# =============================================================================
|
| 18 |
|
| 19 |
-
# Ensure that the OpenAI API key is available
|
| 20 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 21 |
if not openai_api_key:
|
| 22 |
raise ValueError("Please set your OPENAI_API_KEY environment variable.")
|
| 23 |
|
| 24 |
-
# System prompt for the chat engine
|
| 25 |
SYSTEM_PROMPT = (
|
| 26 |
"You are an AI assistant who answers the user questions, "
|
| 27 |
"use the schema fields to generate appropriate and valid json queries"
|
| 28 |
)
|
| 29 |
|
| 30 |
-
|
| 31 |
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4)
|
| 32 |
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
|
| 33 |
|
| 34 |
-
# Load initial documents from a directory called "new_file"
|
| 35 |
if os.path.exists("new_file"):
|
| 36 |
documents = SimpleDirectoryReader("new_file").load_data()
|
| 37 |
else:
|
| 38 |
documents = []
|
| 39 |
|
| 40 |
-
# Set up the Qdrant vector store (using an in-memory collection for simplicity)
|
| 41 |
|
| 42 |
client = qdrant_client.QdrantClient(location=":memory:")
|
| 43 |
|
| 44 |
vector_store = QdrantVectorStore(
|
| 45 |
-
collection_name="paper",
|
| 46 |
client=client,
|
| 47 |
vector_field="embedding",
|
| 48 |
enable_hybrid=True,
|
|
@@ -50,7 +43,6 @@ vector_store = QdrantVectorStore(
|
|
| 50 |
)
|
| 51 |
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
| 52 |
|
| 53 |
-
# Build the initial index and chat engine
|
| 54 |
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
|
| 55 |
chat_memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
|
| 56 |
chat_engine = index.as_chat_engine(
|
|
@@ -59,42 +51,28 @@ chat_engine = index.as_chat_engine(
|
|
| 59 |
system_prompt=SYSTEM_PROMPT,
|
| 60 |
)
|
| 61 |
|
| 62 |
-
# =============================================================================
|
| 63 |
-
# Helper Functions
|
| 64 |
-
# =============================================================================
|
| 65 |
|
| 66 |
def process_uploaded_file(uploaded_file: BytesIO) -> str:
|
| 67 |
-
|
| 68 |
-
Process the uploaded file:
|
| 69 |
-
1. Save the file to an "uploads" folder.
|
| 70 |
-
2. Copy it to a temporary folder ("temp_upload") for reading.
|
| 71 |
-
3. Update the global documents list and rebuild the index and chat engine.
|
| 72 |
-
"""
|
| 73 |
if uploaded_file is None:
|
| 74 |
return "No file uploaded."
|
| 75 |
|
| 76 |
-
# Ensure the uploads directory exists
|
| 77 |
uploads_dir = "uploads"
|
| 78 |
os.makedirs(uploads_dir, exist_ok=True)
|
| 79 |
|
| 80 |
-
# Save the uploaded file locally
|
| 81 |
file_name = uploaded_file.name
|
| 82 |
dest_path = os.path.join(uploads_dir, file_name)
|
| 83 |
with open(dest_path, "wb") as f:
|
| 84 |
f.write(uploaded_file.getbuffer())
|
| 85 |
|
| 86 |
-
# Prepare a temporary directory for processing the file
|
| 87 |
temp_dir = "temp_upload"
|
| 88 |
os.makedirs(temp_dir, exist_ok=True)
|
| 89 |
-
# Clear any existing file in temp_upload directory
|
| 90 |
for f_name in os.listdir(temp_dir):
|
| 91 |
os.remove(os.path.join(temp_dir, f_name))
|
| 92 |
shutil.copy(dest_path, temp_dir)
|
| 93 |
|
| 94 |
-
# Load new document(s) from the temporary folder using SimpleDirectoryReader
|
| 95 |
new_docs = SimpleDirectoryReader(temp_dir).load_data()
|
| 96 |
|
| 97 |
-
# Update global documents and rebuild the index and chat engine
|
| 98 |
global documents, index, chat_engine
|
| 99 |
documents.extend(new_docs)
|
| 100 |
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
|
|
@@ -107,11 +85,8 @@ def process_uploaded_file(uploaded_file: BytesIO) -> str:
|
|
| 107 |
return f"File '{file_name}' processed and added to the index."
|
| 108 |
|
| 109 |
def chat_with_ai(user_input: str) -> str:
|
| 110 |
-
|
| 111 |
-
Send user input to the chat engine and return the response.
|
| 112 |
-
"""
|
| 113 |
response = chat_engine.chat(user_input)
|
| 114 |
-
# Extract references from the response (if any)
|
| 115 |
references = response.source_nodes
|
| 116 |
ref = []
|
| 117 |
for node in references:
|
|
@@ -122,49 +97,36 @@ def chat_with_ai(user_input: str) -> str:
|
|
| 122 |
complete_response += "\n\nReferences: " + ", ".join(ref)
|
| 123 |
return complete_response
|
| 124 |
|
| 125 |
-
# =============================================================================
|
| 126 |
-
# Streamlit App Layout
|
| 127 |
-
# =============================================================================
|
| 128 |
-
|
| 129 |
st.set_page_config(page_title="LlamaIndex Chat & File Upload", layout="wide")
|
| 130 |
st.title("Chat Interface for LlamaIndex with File Upload")
|
| 131 |
|
| 132 |
-
|
| 133 |
tab1, tab2 = st.tabs(["Chat", "Upload"])
|
| 134 |
|
| 135 |
-
|
| 136 |
-
# Chat Tab
|
| 137 |
-
# -----------------------------------------------------------------------------
|
| 138 |
with tab1:
|
| 139 |
st.header("Chat with the AI")
|
| 140 |
-
# Initialize chat history in session state if it does not exist
|
| 141 |
if "chat_history" not in st.session_state:
|
| 142 |
st.session_state["chat_history"] = []
|
| 143 |
|
| 144 |
-
# Display conversation history
|
| 145 |
for chat in st.session_state["chat_history"]:
|
| 146 |
st.markdown(f"**User:** {chat[0]}")
|
| 147 |
st.markdown(f"**AI:** {chat[1]}")
|
| 148 |
st.markdown("---")
|
| 149 |
|
| 150 |
-
# Input text for user query
|
| 151 |
user_input = st.text_input("Enter your question:")
|
| 152 |
|
| 153 |
-
# When the "Send" button is clicked, process the chat
|
| 154 |
if st.button("Send") and user_input:
|
| 155 |
with st.spinner("Processing..."):
|
| 156 |
response = chat_with_ai(user_input)
|
| 157 |
st.session_state["chat_history"].append((user_input, response))
|
| 158 |
st.experimental_rerun() # Refresh the page to show updated history
|
| 159 |
|
| 160 |
-
# Button to clear the conversation history
|
| 161 |
if st.button("Clear History"):
|
| 162 |
st.session_state["chat_history"] = []
|
| 163 |
st.experimental_rerun()
|
| 164 |
|
| 165 |
-
|
| 166 |
-
# Upload Tab
|
| 167 |
-
# -----------------------------------------------------------------------------
|
| 168 |
with tab2:
|
| 169 |
st.header("Upload a File")
|
| 170 |
uploaded_file = st.file_uploader("Choose a file to upload", type=["txt", "pdf", "doc", "docx", "csv", "xlsx"])
|
|
|
|
| 3 |
import streamlit as st
|
| 4 |
from io import BytesIO
|
| 5 |
|
| 6 |
+
|
| 7 |
from llama_index.llms.openai import OpenAI
|
| 8 |
from qdrant_client.http import models
|
| 9 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
|
|
| 12 |
from llama_index.core.memory import ChatMemoryBuffer
|
| 13 |
import qdrant_client
|
| 14 |
|
|
|
|
|
|
|
|
|
|
| 15 |
|
|
|
|
| 16 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
| 17 |
if not openai_api_key:
|
| 18 |
raise ValueError("Please set your OPENAI_API_KEY environment variable.")
|
| 19 |
|
|
|
|
| 20 |
SYSTEM_PROMPT = (
|
| 21 |
"You are an AI assistant who answers the user questions, "
|
| 22 |
"use the schema fields to generate appropriate and valid json queries"
|
| 23 |
)
|
| 24 |
|
| 25 |
+
|
| 26 |
Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.4)
|
| 27 |
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
|
| 28 |
|
|
|
|
| 29 |
if os.path.exists("new_file"):
|
| 30 |
documents = SimpleDirectoryReader("new_file").load_data()
|
| 31 |
else:
|
| 32 |
documents = []
|
| 33 |
|
|
|
|
| 34 |
|
| 35 |
client = qdrant_client.QdrantClient(location=":memory:")
|
| 36 |
|
| 37 |
vector_store = QdrantVectorStore(
|
| 38 |
+
# collection_name="paper",
|
| 39 |
client=client,
|
| 40 |
vector_field="embedding",
|
| 41 |
enable_hybrid=True,
|
|
|
|
| 43 |
)
|
| 44 |
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
| 45 |
|
|
|
|
| 46 |
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
|
| 47 |
chat_memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
|
| 48 |
chat_engine = index.as_chat_engine(
|
|
|
|
| 51 |
system_prompt=SYSTEM_PROMPT,
|
| 52 |
)
|
| 53 |
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
def process_uploaded_file(uploaded_file: BytesIO) -> str:
|
| 56 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
if uploaded_file is None:
|
| 58 |
return "No file uploaded."
|
| 59 |
|
|
|
|
| 60 |
uploads_dir = "uploads"
|
| 61 |
os.makedirs(uploads_dir, exist_ok=True)
|
| 62 |
|
|
|
|
| 63 |
file_name = uploaded_file.name
|
| 64 |
dest_path = os.path.join(uploads_dir, file_name)
|
| 65 |
with open(dest_path, "wb") as f:
|
| 66 |
f.write(uploaded_file.getbuffer())
|
| 67 |
|
|
|
|
| 68 |
temp_dir = "temp_upload"
|
| 69 |
os.makedirs(temp_dir, exist_ok=True)
|
|
|
|
| 70 |
for f_name in os.listdir(temp_dir):
|
| 71 |
os.remove(os.path.join(temp_dir, f_name))
|
| 72 |
shutil.copy(dest_path, temp_dir)
|
| 73 |
|
|
|
|
| 74 |
new_docs = SimpleDirectoryReader(temp_dir).load_data()
|
| 75 |
|
|
|
|
| 76 |
global documents, index, chat_engine
|
| 77 |
documents.extend(new_docs)
|
| 78 |
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
|
|
|
|
| 85 |
return f"File '{file_name}' processed and added to the index."
|
| 86 |
|
| 87 |
def chat_with_ai(user_input: str) -> str:
|
| 88 |
+
|
|
|
|
|
|
|
| 89 |
response = chat_engine.chat(user_input)
|
|
|
|
| 90 |
references = response.source_nodes
|
| 91 |
ref = []
|
| 92 |
for node in references:
|
|
|
|
| 97 |
complete_response += "\n\nReferences: " + ", ".join(ref)
|
| 98 |
return complete_response
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
st.set_page_config(page_title="LlamaIndex Chat & File Upload", layout="wide")
|
| 101 |
st.title("Chat Interface for LlamaIndex with File Upload")
|
| 102 |
|
| 103 |
+
|
| 104 |
tab1, tab2 = st.tabs(["Chat", "Upload"])
|
| 105 |
|
| 106 |
+
|
|
|
|
|
|
|
| 107 |
with tab1:
|
| 108 |
st.header("Chat with the AI")
|
|
|
|
| 109 |
if "chat_history" not in st.session_state:
|
| 110 |
st.session_state["chat_history"] = []
|
| 111 |
|
|
|
|
| 112 |
for chat in st.session_state["chat_history"]:
|
| 113 |
st.markdown(f"**User:** {chat[0]}")
|
| 114 |
st.markdown(f"**AI:** {chat[1]}")
|
| 115 |
st.markdown("---")
|
| 116 |
|
|
|
|
| 117 |
user_input = st.text_input("Enter your question:")
|
| 118 |
|
|
|
|
| 119 |
if st.button("Send") and user_input:
|
| 120 |
with st.spinner("Processing..."):
|
| 121 |
response = chat_with_ai(user_input)
|
| 122 |
st.session_state["chat_history"].append((user_input, response))
|
| 123 |
st.experimental_rerun() # Refresh the page to show updated history
|
| 124 |
|
|
|
|
| 125 |
if st.button("Clear History"):
|
| 126 |
st.session_state["chat_history"] = []
|
| 127 |
st.experimental_rerun()
|
| 128 |
|
| 129 |
+
|
|
|
|
|
|
|
| 130 |
with tab2:
|
| 131 |
st.header("Upload a File")
|
| 132 |
uploaded_file = st.file_uploader("Choose a file to upload", type=["txt", "pdf", "doc", "docx", "csv", "xlsx"])
|