Itzadityapandey commited on
Commit
7cbe877
·
verified ·
1 Parent(s): 83b698a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -4,22 +4,20 @@ from PyPDF2 import PdfReader
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
6
  from langchain_community.vectorstores import FAISS
7
- from langchain_classic.chains.question_answering import load_qa_chain # Fixed import
8
  from langchain_core.prompts import PromptTemplate
9
  from dotenv import load_dotenv
10
 
11
- # Load environment variables
12
  load_dotenv()
13
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
14
 
15
- # Directory to save FAISS index
16
  INDEX_PATH = "faiss_index"
17
 
18
  def get_pdf_text(pdf_files):
19
  text = ""
20
  for pdf in pdf_files:
21
  try:
22
- pdf_reader = PdfReader(pdf.name) # Gradio gives tempfile
23
  for page in pdf_reader.pages:
24
  extracted = page.extract_text()
25
  if extracted:
@@ -64,7 +62,7 @@ def get_conversational_chain():
64
 
65
  Answer:
66
  """
67
- model = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0.3, google_api_key=GOOGLE_API_KEY) # Updated to a current fast model
68
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
69
  return load_qa_chain(model, chain_type="stuff", prompt=prompt)
70
 
@@ -95,7 +93,6 @@ def process_pdfs(pdf_files):
95
  result = create_vector_store(text_chunks)
96
  return result
97
 
98
- # Gradio UI
99
  with gr.Blocks(title="Chat with PDF") as demo:
100
  gr.Markdown("## Chat with PDF 💁")
101
  pdf_input = gr.File(file_types=[".pdf"], label="Upload PDF(s)", file_count="multiple")
 
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
6
  from langchain_community.vectorstores import FAISS
7
+ from langchain_classic.chains.question_answering import load_qa_chain
8
  from langchain_core.prompts import PromptTemplate
9
  from dotenv import load_dotenv
10
 
 
11
  load_dotenv()
12
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
13
 
 
14
  INDEX_PATH = "faiss_index"
15
 
16
  def get_pdf_text(pdf_files):
17
  text = ""
18
  for pdf in pdf_files:
19
  try:
20
+ pdf_reader = PdfReader(pdf.name)
21
  for page in pdf_reader.pages:
22
  extracted = page.extract_text()
23
  if extracted:
 
62
 
63
  Answer:
64
  """
65
+ model = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0.3, google_api_key=GOOGLE_API_KEY)
66
  prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
67
  return load_qa_chain(model, chain_type="stuff", prompt=prompt)
68
 
 
93
  result = create_vector_store(text_chunks)
94
  return result
95
 
 
96
  with gr.Blocks(title="Chat with PDF") as demo:
97
  gr.Markdown("## Chat with PDF 💁")
98
  pdf_input = gr.File(file_types=[".pdf"], label="Upload PDF(s)", file_count="multiple")