PranavReddy18 commited on
Commit
09ee8f1
·
verified ·
1 Parent(s): a54d298

Upload 5 files

Browse files
Files changed (5) hide show
  1. .env +1 -0
  2. app.py +99 -0
  3. index.faiss +0 -0
  4. index.pkl +3 -0
  5. requirements.txt +51 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GOOGLE_API_KEY="AIzaSyADEPJi8dJy685LZdjmpsYuUEa8VnyxaxY"
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ import os
5
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
6
+ import google.generativeai as genai
7
+ from langchain.vectorstores import FAISS
8
+ from langchain_google_genai import ChatGoogleGenerativeAI
9
+ from langchain.chains.question_answering import load_qa_chain
10
+ from langchain.prompts import PromptTemplate
11
+ from dotenv import load_dotenv
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+ api_key = os.getenv("GOOGLE_API_KEY")
16
+ genai.configure(api_key=api_key)
17
+
18
+ def get_pdf_text(pdf_docs):
19
+ """Extract text from uploaded PDF files."""
20
+ text = ""
21
+ for pdf in pdf_docs:
22
+ pdf_reader = PdfReader(pdf)
23
+ for page in pdf_reader.pages:
24
+ text += page.extract_text()
25
+ return text
26
+
27
+ def get_text_chunks(text):
28
+ """Split text into manageable chunks for processing."""
29
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
30
+ chunks = text_splitter.split_text(text)
31
+ return chunks
32
+
33
+ def get_vector_store(text_chunks):
34
+ """Create a FAISS vector store from text chunks."""
35
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
36
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
37
+ vector_store.save_local("faiss_index")
38
+
39
+ def get_conversational_chain():
40
+ """Create a conversational chain with a custom prompt template."""
41
+ prompt_template = """
42
+ Answer the question as detailed as possible from the provided context. If the answer is not in the provided context, say "answer is not available in the context". Do not provide incorrect answers.
43
+
44
+ Context:
45
+ {context}
46
+
47
+ Question:
48
+ {question}
49
+
50
+ Answer:
51
+ """
52
+
53
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
54
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
55
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
56
+
57
+ return chain
58
+
59
+ def user_input(user_question):
60
+ """Process user input, search FAISS index, and generate a response."""
61
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
62
+
63
+ # Load FAISS index with deserialization flag
64
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
65
+
66
+ # Perform similarity search
67
+ docs = new_db.similarity_search(user_question)
68
+
69
+ # Generate response using the conversational chain
70
+ chain = get_conversational_chain()
71
+ response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
72
+
73
+ # Display the response
74
+ st.write("Reply:", response["output_text"])
75
+
76
+ def main():
77
+ """Main function to run the Streamlit app."""
78
+ st.set_page_config("Chat With Multiple PDF")
79
+ st.header("Chat with Multiple PDF using Gemini💁")
80
+
81
+ # User input for questions
82
+ user_question = st.text_input("Ask a Question from the PDF Files")
83
+
84
+ if user_question:
85
+ user_input(user_question)
86
+
87
+ # Sidebar for uploading and processing PDFs
88
+ with st.sidebar:
89
+ st.title("Menu:")
90
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
91
+ if st.button("Submit & Process"):
92
+ with st.spinner("Processing..."):
93
+ raw_text = get_pdf_text(pdf_docs)
94
+ text_chunks = get_text_chunks(raw_text)
95
+ get_vector_store(text_chunks)
96
+ st.success("Done")
97
+
98
+ if __name__ == "__main__":
99
+ main()
index.faiss ADDED
Binary file (53.8 kB). View file
 
index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a9c2684d79fc931ff028b0e1286333e39b05dd146d57d6cbd237ae27eea1d7
3
+ size 346011
requirements.txt ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ google-generativeai
3
+ python-dotenv
4
+ langchain
5
+ PyPDF2
6
+ chromadb
7
+ faiss-cpu
8
+ langchain_google_genai
9
+ langchain
10
+ python-dotenv
11
+ ipykernel
12
+ langchain-community
13
+ bs4
14
+ arxiv
15
+ pymupdf
16
+ wikipedia
17
+ langchain-text-splitters
18
+ langchain-openai
19
+ chromadb
20
+ sentence_transformers
21
+ langchain_huggingface
22
+ faiss-cpu
23
+ langchain_chroma
24
+ streamlit
25
+ langchain_groq
26
+ fastapi
27
+ uvicorn
28
+ langserve[all]
29
+ sse_starlette
30
+ streamlit
31
+ google.generativeai
32
+ arxiv
33
+ wikipedia
34
+ streamlit-pydantic
35
+ validators
36
+ youtube_transcript_api
37
+ pytube
38
+ Unstructured
39
+ yt-dlp
40
+ numexpr
41
+ langchain_huggingface
42
+ huggingface_hub
43
+ duckduckgo-search
44
+ langchain_nvidia_ai_endpoints
45
+ crewai
46
+ crewai_tools
47
+ pinecone
48
+ pinecone-client
49
+ neo4j==5.14
50
+ langchain-google-genai
51
+