SumbalFatima1122 commited on
Commit
767b999
·
verified ·
1 Parent(s): 725a038

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -0
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+ from langchain_community.embeddings import HuggingFaceEmbeddings
4
+ from langchain_community.vectorstores import FAISS
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from PyPDF2 import PdfReader
7
+ import streamlit as st
8
+ from tempfile import NamedTemporaryFile
9
+
10
+ # Initialize Groq client
11
+ client = Groq(api_key="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")
12
+
13
+ # Function to extract text from a PDF
14
+ def extract_text_from_pdf(pdf_file_path):
15
+ pdf_reader = PdfReader(pdf_file_path)
16
+ text = ""
17
+ for page in pdf_reader.pages:
18
+ text += page.extract_text()
19
+ return text
20
+
21
+ # Function to split text into chunks
22
+ def chunk_text(text, chunk_size=500, chunk_overlap=50):
23
+ text_splitter = RecursiveCharacterTextSplitter(
24
+ chunk_size=chunk_size, chunk_overlap=chunk_overlap
25
+ )
26
+ return text_splitter.split_text(text)
27
+
28
+ # Function to create embeddings and store them in FAISS
29
+ def create_embeddings_and_store(chunks):
30
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
31
+ vector_db = FAISS.from_texts(chunks, embedding=embeddings)
32
+ return vector_db
33
+
34
+ # Function to query the vector database and interact with Groq
35
+ def query_vector_db(query, vector_db):
36
+ # Retrieve relevant documents
37
+ docs = vector_db.similarity_search(query, k=3)
38
+ context = "\n".join([doc.page_content for doc in docs])
39
+
40
+ # Interact with Groq API
41
+ chat_completion = client.chat.completions.create(
42
+ messages=[
43
+ {"role": "system", "content": f"Use the following context:\n{context}"},
44
+ {"role": "user", "content": query},
45
+ ],
46
+ model="llama3-8b-8192",
47
+ )
48
+ return chat_completion.choices[0].message.content
49
+
50
+ # Streamlit app
51
+ st.set_page_config(
52
+ page_title="Auto Buddy: RAG Application",
53
+ page_icon="💻",
54
+ layout="wide",
55
+ initial_sidebar_state="expanded",
56
+ )
57
+
58
+ st.title("📚 Auto Buddy: Your RAG-Powered Assistant")
59
+ st.markdown(
60
+ """
61
+ Welcome to **Auto Buddy**, your AI-powered assistant that leverages **Retrieval-Augmented Generation (RAG)** for powerful insights.
62
+ Upload your PDF documents, ask questions, and receive precise answers effortlessly.
63
+ """
64
+ )
65
+
66
+ # Sidebar Instructions
67
+ st.sidebar.header("Instructions")
68
+ st.sidebar.write(
69
+ "1. Upload a PDF document.\n"
70
+ "2. Wait for the text extraction and chunking process.\n"
71
+ "3. Enter your query to receive AI-driven answers."
72
+ )
73
+
74
+ # Upload PDF
75
+ uploaded_file = st.file_uploader("Upload a PDF Document", type=["pdf"])
76
+
77
+ if uploaded_file:
78
+ with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
79
+ temp_file.write(uploaded_file.read())
80
+ pdf_path = temp_file.name
81
+
82
+ # Extract text
83
+ st.subheader("Step 1: Text Extraction")
84
+ text = extract_text_from_pdf(pdf_path)
85
+ st.success("PDF Text Extracted Successfully!")
86
+
87
+ # Chunk text
88
+ st.subheader("Step 2: Text Chunking")
89
+ chunks = chunk_text(text)
90
+ st.success("Text Chunked Successfully!")
91
+
92
+ # Generate embeddings and store in FAISS
93
+ st.subheader("Step 3: Embeddings and Storage")
94
+ vector_db = create_embeddings_and_store(chunks)
95
+ st.success("Embeddings Generated and Stored Successfully!")
96
+
97
+ # User query input
98
+ st.subheader("Step 4: Ask Your Question")
99
+ user_query = st.text_input("The issue with my car is:")
100
+ if user_query:
101
+ response = query_vector_db(user_query, vector_db)
102
+ st.subheader("Response from LLM")
103
+ st.write(response)