Spaces:
Sleeping
Sleeping
File size: 3,496 Bytes
767b999 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import os
from groq import Groq
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from PyPDF2 import PdfReader
import streamlit as st
from tempfile import NamedTemporaryFile
# Initialize Groq client
client = Groq(api_key="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")
# Function to extract text from a PDF
def extract_text_from_pdf(pdf_file_path):
pdf_reader = PdfReader(pdf_file_path)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
return text
# Function to split text into chunks
def chunk_text(text, chunk_size=500, chunk_overlap=50):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
return text_splitter.split_text(text)
# Function to create embeddings and store them in FAISS
def create_embeddings_and_store(chunks):
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vector_db = FAISS.from_texts(chunks, embedding=embeddings)
return vector_db
# Function to query the vector database and interact with Groq
def query_vector_db(query, vector_db):
# Retrieve relevant documents
docs = vector_db.similarity_search(query, k=3)
context = "\n".join([doc.page_content for doc in docs])
# Interact with Groq API
chat_completion = client.chat.completions.create(
messages=[
{"role": "system", "content": f"Use the following context:\n{context}"},
{"role": "user", "content": query},
],
model="llama3-8b-8192",
)
return chat_completion.choices[0].message.content
# Streamlit app
st.set_page_config(
page_title="Auto Buddy: RAG Application",
page_icon="💻",
layout="wide",
initial_sidebar_state="expanded",
)
st.title("📚 Auto Buddy: Your RAG-Powered Assistant")
st.markdown(
"""
Welcome to **Auto Buddy**, your AI-powered assistant that leverages **Retrieval-Augmented Generation (RAG)** for powerful insights.
Upload your PDF documents, ask questions, and receive precise answers effortlessly.
"""
)
# Sidebar Instructions
st.sidebar.header("Instructions")
st.sidebar.write(
"1. Upload a PDF document.\n"
"2. Wait for the text extraction and chunking process.\n"
"3. Enter your query to receive AI-driven answers."
)
# Upload PDF
uploaded_file = st.file_uploader("Upload a PDF Document", type=["pdf"])
if uploaded_file:
with NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file.write(uploaded_file.read())
pdf_path = temp_file.name
# Extract text
st.subheader("Step 1: Text Extraction")
text = extract_text_from_pdf(pdf_path)
st.success("PDF Text Extracted Successfully!")
# Chunk text
st.subheader("Step 2: Text Chunking")
chunks = chunk_text(text)
st.success("Text Chunked Successfully!")
# Generate embeddings and store in FAISS
st.subheader("Step 3: Embeddings and Storage")
vector_db = create_embeddings_and_store(chunks)
st.success("Embeddings Generated and Stored Successfully!")
# User query input
st.subheader("Step 4: Ask Your Question")
user_query = st.text_input("The issue with my car is:")
if user_query:
response = query_vector_db(user_query, vector_db)
st.subheader("Response from LLM")
st.write(response) |