|
|
|
|
|
|
|
|
import os |
|
|
import streamlit as st |
|
|
from langchain.chains import RetrievalQA |
|
|
from PyPDF2 import PdfReader |
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
|
from langchain.callbacks.base import BaseCallbackHandler |
|
|
from langchain.vectorstores.neo4j_vector import Neo4jVector |
|
|
from streamlit.logger import get_logger |
|
|
from chains import ( |
|
|
load_embedding_model, |
|
|
load_llm, |
|
|
) |
|
|
from pymongo import MongoClient |
|
|
import certifi |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
|
from pymongo import MongoClient |
|
|
from openai import OpenAI |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
|
from pymongo import MongoClient |
|
|
import certifi |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ollama_base_url = os.getenv("OLLAMA_BASE_URL") |
|
|
embedding_model_name = os.getenv("EMBEDDING_MODEL", "SentenceTransformer" ) |
|
|
llm_name = os.getenv("LLM", "llama2") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not all([ |
|
|
ollama_base_url]): |
|
|
st.write("The application requires some information before running.") |
|
|
with st.form("connection_form"): |
|
|
|
|
|
|
|
|
|
|
|
ollama_base_url = st.text_input("Enter OLLAMA_BASE_URL") |
|
|
st.markdown("Only enter the OPENAI_APIKEY to use OpenAI instead of Ollama. Leave blank to use Ollama.") |
|
|
openai_apikey = st.text_input("Enter OPENAI_API_KEY", type="password") |
|
|
submit_button = st.form_submit_button("Submit") |
|
|
if submit_button: |
|
|
|
|
|
|
|
|
|
|
|
if not (ollama_base_url or openai_apikey): |
|
|
st.write("Enter the Ollama URL or OpenAI API Key.") |
|
|
if openai_apikey: |
|
|
llm_name = "gpt-3.5" |
|
|
os.environ['OPENAI_API_KEY'] = openai_apikey |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger = get_logger(__name__) |
|
|
|
|
|
embeddings, dimension = load_embedding_model( |
|
|
embedding_model_name, config={"ollama_base_url": ollama_base_url}, logger=logger |
|
|
) |
|
|
|
|
|
|
|
|
class StreamHandler(BaseCallbackHandler): |
|
|
def __init__(self, container, initial_text=""): |
|
|
self.container = container |
|
|
self.text = initial_text |
|
|
|
|
|
def on_llm_new_token(self, token: str, **kwargs) -> None: |
|
|
self.text += token |
|
|
self.container.markdown(self.text) |
|
|
|
|
|
llm = load_llm(llm_name, logger=logger, config={"ollama_base_url": ollama_base_url}) |
|
|
|
|
|
|
|
|
def main(): |
|
|
st.header("📄Chat with your pdf file") |
|
|
|
|
|
|
|
|
pdf = st.file_uploader("Upload your PDF", type="pdf") |
|
|
|
|
|
if pdf is not None: |
|
|
pdf_reader = PdfReader(pdf) |
|
|
|
|
|
text = "" |
|
|
for page in pdf_reader.pages: |
|
|
text += page.extract_text() |
|
|
|
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter( |
|
|
chunk_size=1000, chunk_overlap=200, length_function=len |
|
|
) |
|
|
|
|
|
chunks = text_splitter.split_text(text=text) |
|
|
|
|
|
|
|
|
vectorstore = Neo4jVector.from_texts( |
|
|
chunks, |
|
|
url=url, |
|
|
username=username, |
|
|
password=password, |
|
|
embedding=embeddings, |
|
|
index_name="pdf_bot", |
|
|
node_label="PdfBotChunk", |
|
|
pre_delete_collection=True, |
|
|
) |
|
|
qa = RetrievalQA.from_chain_type( |
|
|
llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever() |
|
|
) |
|
|
|
|
|
|
|
|
query = st.text_input("Ask questions about your PDF file") |
|
|
|
|
|
if query: |
|
|
stream_handler = StreamHandler(st.empty()) |
|
|
qa.run(query, callbacks=[stream_handler]) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|