zeeshan4801 commited on
Commit
71ea100
·
verified ·
1 Parent(s): 435e5bc

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+ from langchain_community.embeddings import HuggingFaceEmbeddings
4
+ from langchain_community.vectorstores import FAISS
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from PyPDF2 import PdfReader
7
+ import streamlit as st
8
+ from tempfile import NamedTemporaryFile
9
+
10
+ # Initialize Groq client
11
+ client = Groq("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")
12
+
13
+ # Function to extract text from a PDF
14
+ def extract_text_from_pdf(pdf_file_path):
15
+ pdf_reader = PdfReader(pdf_file_path)
16
+ text = ""
17
+ for page in pdf_reader.pages:
18
+ text += page.extract_text()
19
+ return text
20
+
21
+ # Function to split text into chunks
22
+ def chunk_text(text, chunk_size=500, chunk_overlap=50):
23
+ text_splitter = RecursiveCharacterTextSplitter(
24
+ chunk_size=chunk_size, chunk_overlap=chunk_overlap
25
+ )
26
+ return text_splitter.split_text(text)
27
+
28
+ # Function to create embeddings and store them in FAISS
29
+ def create_embeddings_and_store(chunks):
30
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
31
+ vector_db = FAISS.from_texts(chunks, embedding=embeddings)
32
+ return vector_db
33
+
34
+ # Function to query the vector database and interact with Groq
35
+ def query_vector_db(query, vector_db):
36
+ # Retrieve relevant documents
37
+ docs = vector_db.similarity_search(query, k=3)
38
+ context = "\n".join([doc.page_content for doc in docs])
39
+
40
+ # Interact with Groq API
41
+ chat_completion = client.chat.completions.create(
42
+ messages=[
43
+ {"role": "system", "content": f"Use the following context:\n{context}"},
44
+ {"role": "user", "content": query},
45
+ ],
46
+ model="llama3-8b-8192",
47
+ )
48
+ return chat_completion.choices[0].message.content
49
+
50
+ # Streamlit app
51
+ st.title("RAG-Based Application")
52
+
53
+ # Upload PDF
54
+ uploaded_file = st.file_uploader("Upload a PDF document", type=["pdf"])
55
+
56
+ if uploaded_file:
57
+ with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
58
+ temp_file.write(uploaded_file.read())
59
+ pdf_path = temp_file.name
60
+
61
+ # Extract text
62
+ text = extract_text_from_pdf(pdf_path)
63
+ st.write("PDF Text Extracted Successfully!")
64
+
65
+ # Chunk text
66
+ chunks = chunk_text(text)
67
+ st.write("Text Chunked Successfully!")
68
+
69
+ # Generate embeddings and store in FAISS
70
+ vector_db = create_embeddings_and_store(chunks)
71
+ st.write("Embeddings Generated and Stored Successfully!")
72
+
73
+ # User query input
74
+ user_query = st.text_input("Enter your query:")
75
+ if user_query:
76
+ response = query_vector_db(user_query, vector_db)
77
+ st.write("Response from LLM:")
78
+ st.write(response)