Update app.py
Browse files
app.py
CHANGED
|
@@ -1,145 +1,42 @@
|
|
| 1 |
-
import
|
| 2 |
-
from
|
| 3 |
-
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 4 |
-
import os
|
| 5 |
-
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
| 6 |
-
import google.generativeai as genai
|
| 7 |
-
from langchain.vectorstores import FAISS
|
| 8 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 9 |
-
from langchain.chains.question_answering import load_qa_chain
|
| 10 |
from langchain.prompts import PromptTemplate
|
| 11 |
-
from
|
| 12 |
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
for pdf in pdf_docs:
|
| 20 |
-
pdf_reader= PdfReader(pdf)
|
| 21 |
-
for page in pdf_reader.pages:
|
| 22 |
-
text+= page.extract_text()
|
| 23 |
-
return text
|
| 24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
chunks = text_splitter.split_text(text)
|
| 30 |
-
return chunks
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def get_vector_store(text_chunks):
|
| 34 |
-
embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
|
| 35 |
-
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
| 36 |
-
vector_store.save_local("faiss_index")
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def get_conversational_chain():
|
| 40 |
-
|
| 41 |
-
prompt_template = """
|
| 42 |
-
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
|
| 43 |
-
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
|
| 44 |
-
Context:\n {context}?\n
|
| 45 |
-
Question: \n{question}\n
|
| 46 |
-
|
| 47 |
-
Answer:
|
| 48 |
-
"""
|
| 49 |
-
|
| 50 |
-
model = ChatGoogleGenerativeAI(model="gemini-pro",
|
| 51 |
-
temperature=0.3)
|
| 52 |
-
|
| 53 |
-
prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
|
| 54 |
-
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
| 55 |
-
|
| 56 |
-
return chain
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def user_input(user_question):
|
| 61 |
-
embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
|
| 62 |
-
|
| 63 |
-
new_db = FAISS.load_local("faiss_index", embeddings)
|
| 64 |
-
docs = new_db.similarity_search(user_question)
|
| 65 |
-
|
| 66 |
-
chain = get_conversational_chain()
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
response = chain(
|
| 70 |
-
{"input_documents":docs, "question": user_question}
|
| 71 |
-
, return_only_outputs=True)
|
| 72 |
-
|
| 73 |
-
print(response)
|
| 74 |
-
st.write("Reply: ", response["output_text"])
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
def main():
|
| 80 |
-
st.set_page_config("Chat PDF")
|
| 81 |
-
st.header("Gemini-Powered-MultiPDF-Chatbot")
|
| 82 |
-
|
| 83 |
-
user_question = st.text_input("Ask a Question from the PDF Files")
|
| 84 |
-
|
| 85 |
-
if user_question:
|
| 86 |
-
user_input(user_question)
|
| 87 |
-
|
| 88 |
-
with st.sidebar:
|
| 89 |
-
st.title("Menu:")
|
| 90 |
-
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
|
| 91 |
-
if st.button("Submit & Process"):
|
| 92 |
-
with st.spinner("Processing..."):
|
| 93 |
-
raw_text = get_pdf_text(pdf_docs)
|
| 94 |
-
text_chunks = get_text_chunks(raw_text)
|
| 95 |
-
get_vector_store(text_chunks)
|
| 96 |
-
st.success("Done")
|
| 97 |
|
|
|
|
|
|
|
|
|
|
| 98 |
|
|
|
|
| 99 |
if __name__ == "__main__":
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
def set_bg_from_url(url, opacity=1):
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
<footer>
|
| 107 |
-
<div style='visibility: visible;margin-top:7rem;justify-content:center;display:flex;'>
|
| 108 |
-
<p style="font-size:1.1rem;">
|
| 109 |
-
Made by Mohamed Shaad
|
| 110 |
-
|
| 111 |
-
<a href="https://www.linkedin.com/in/mohamedshaad">
|
| 112 |
-
<svg xmlns="http://www.w3.org/2000/svg" width="23" height="23" fill="white" class="bi bi-linkedin" viewBox="0 0 16 16">
|
| 113 |
-
<path d="M0 1.146C0 .513.526 0 1.175 0h13.65C15.474 0 16 .513 16 1.146v13.708c0 .633-.526 1.146-1.175 1.146H1.175C.526 16 0 15.487 0 14.854V1.146zm4.943 12.248V6.169H2.542v7.225h2.401zm-1.2-8.212c.837 0 1.358-.554 1.358-1.248-.015-.709-.52-1.248-1.342-1.248-.822 0-1.359.54-1.359 1.248 0 .694.521 1.248 1.327 1.248h.016zm4.908 8.212V9.359c0-.216.016-.432.08-.586.173-.431.568-.878 1.232-.878.869 0 1.216.662 1.216 1.634v3.865h2.401V9.25c0-2.22-1.184-3.252-2.764-3.252-1.274 0-1.845.7-2.165 1.193v.025h-.016a5.54 5.54 0 0 1 .016-.025V6.169h-2.4c.03.678 0 7.225 0 7.225h2.4z"/>
|
| 114 |
-
</svg>
|
| 115 |
-
</a>
|
| 116 |
-
|
| 117 |
-
<a href="https://github.com/shaadclt">
|
| 118 |
-
<svg xmlns="http://www.w3.org/2000/svg" width="23" height="23" fill="white" class="bi bi-github" viewBox="0 0 16 16">
|
| 119 |
-
<path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.012 8.012 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>
|
| 120 |
-
</svg>
|
| 121 |
-
</a>
|
| 122 |
-
</p>
|
| 123 |
-
</div>
|
| 124 |
-
</footer>
|
| 125 |
-
"""
|
| 126 |
-
st.markdown(footer, unsafe_allow_html=True)
|
| 127 |
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
st.markdown(
|
| 131 |
-
f"""
|
| 132 |
-
<style>
|
| 133 |
-
body {{
|
| 134 |
-
background: url('{url}') no-repeat center center fixed;
|
| 135 |
-
background-size: cover;
|
| 136 |
-
opacity: {opacity};
|
| 137 |
-
}}
|
| 138 |
-
</style>
|
| 139 |
-
""",
|
| 140 |
-
unsafe_allow_html=True
|
| 141 |
-
)
|
| 142 |
-
|
| 143 |
-
# Set background image from URL
|
| 144 |
-
set_bg_from_url("https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/awj1xai1s7tvk7zprgvh", opacity=0.875)
|
| 145 |
-
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from langchain.chains import SimpleChatChain
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from langchain.prompts import PromptTemplate
|
| 4 |
+
from langchain.llms import OpenAI
|
| 5 |
|
| 6 |
+
# Define your API key for OpenAI or any other LLM provider
|
| 7 |
+
openai_api_key = 'sk-nAqoChT9cmkAxALwMLdWT3BIbkFJcNHsH5Z5LN2ixPcDAopT'
|
|
|
|
| 8 |
|
| 9 |
+
# Initialize the LLM
|
| 10 |
+
llm = OpenAI(api_key=openai_api_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# Define a simple chat chain with a prompt template
|
| 13 |
+
chat_chain = SimpleChatChain(
|
| 14 |
+
llm=llm,
|
| 15 |
+
prompt_template=PromptTemplate(
|
| 16 |
+
input_variables=["user_input"],
|
| 17 |
+
template="User: {user_input}\nBot:"
|
| 18 |
+
)
|
| 19 |
+
)
|
| 20 |
|
| 21 |
+
def chat_with_json(input_json):
|
| 22 |
+
# Parse the input JSON
|
| 23 |
+
input_data = json.loads(input_json)
|
| 24 |
+
user_input = input_data.get('message', '')
|
| 25 |
|
| 26 |
+
# Generate a response using the chat chain
|
| 27 |
+
response = chat_chain.run(user_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
# Create the response JSON
|
| 30 |
+
response_json = json.dumps({'response': response})
|
| 31 |
+
return response_json
|
| 32 |
|
| 33 |
+
# Example usage
|
| 34 |
if __name__ == "__main__":
|
| 35 |
+
# Simulate a JSON input from the user
|
| 36 |
+
user_input_json = json.dumps({'message': 'Hello, how are you?'})
|
|
|
|
| 37 |
|
| 38 |
+
# Get the response from the chatbot
|
| 39 |
+
response_json = chat_with_json(user_input_json)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
# Print the response JSON
|
| 42 |
+
print(response_json)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|