Spaces:
Sleeping
Sleeping
| import os | |
| import requests | |
| from groq import Groq | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from PyPDF2 import PdfReader | |
| import streamlit as st | |
| from tempfile import NamedTemporaryFile | |
| # Initialize Groq client | |
| client = Groq(api_key=os.environ['GROQ_API_KEY']) | |
| # Function to extract text from a PDF | |
| def extract_text_from_pdf(pdf_file_path): | |
| pdf_reader = PdfReader(pdf_file_path) | |
| text = "" | |
| for page in pdf_reader.pages: | |
| page_text = page.extract_text() | |
| if page_text: | |
| text += page_text | |
| return text | |
| # Function to split text into chunks | |
| def chunk_text(text, chunk_size=500, chunk_overlap=50): | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=chunk_size, chunk_overlap=chunk_overlap | |
| ) | |
| return text_splitter.split_text(text) | |
| # Function to create embeddings and store them in FAISS | |
| def create_embeddings_and_store(chunks, vector_db=None): | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| if vector_db is None: | |
| vector_db = FAISS.from_texts(chunks, embedding=embeddings) | |
| else: | |
| vector_db.add_texts(chunks) | |
| return vector_db | |
| # Function to query the vector database and interact with Groq | |
| def query_vector_db(query, vector_db): | |
| docs = vector_db.similarity_search(query, k=3) | |
| context = "\n".join([doc.page_content for doc in docs]) | |
| chat_completion = client.chat.completions.create( | |
| messages=[ | |
| {"role": "system", "content": f"Use the following context:\n{context}"}, | |
| {"role": "user", "content": query}, | |
| ], | |
| model="llama-3.3-70b-versatile", | |
| ) | |
| return chat_completion.choices[0].message.content | |
| # Function to convert Google Drive view link to downloadable link | |
| def get_direct_download_link(view_url): | |
| if "drive.google.com/file/d/" in view_url: | |
| file_id = view_url.split("/file/d/")[1].split("/")[0] | |
| return f"https://drive.google.com/uc?export=download&id={file_id}" | |
| return None | |
| # Function to download and save a PDF from a URL | |
| def download_pdf_from_url(url): | |
| direct_url = get_direct_download_link(url) | |
| if not direct_url: | |
| return None | |
| response = requests.get(direct_url) | |
| if response.status_code == 200: | |
| temp_file = NamedTemporaryFile(delete=False, suffix=".pdf") | |
| temp_file.write(response.content) | |
| temp_file.close() | |
| return temp_file.name | |
| else: | |
| return None | |
| # Streamlit app | |
| st.title("RAG-Based QA on Google Drive PDFs") | |
| # Only fetch from provided links | |
| doc_links = [ | |
| "https://drive.google.com/file/d/1YWX-RYxgtcKO1QETnz1N3rboZUhRZwcH/view?usp=sharing", | |
| "https://drive.google.com/file/d/1JPf0XvDhn8QoDOlZDrxCOpu4WzKFESNz/view?usp=sharing", | |
| ] | |
| vector_db = None | |
| # Process Google Drive documents | |
| for idx, link in enumerate(doc_links): | |
| st.write(f"π Fetching and processing PDF from Link {idx + 1}...") | |
| pdf_path = download_pdf_from_url(link) | |
| if pdf_path: | |
| text = extract_text_from_pdf(pdf_path) | |
| chunks = chunk_text(text) | |
| vector_db = create_embeddings_and_store(chunks, vector_db=vector_db) | |
| st.success(f"β Processed document {idx + 1}") | |
| else: | |
| st.error(f"β Failed to download or process PDF from Link {idx + 1}") | |
| # User query input | |
| user_query = st.text_input("π Enter your query:") | |
| if user_query and vector_db: | |
| response = query_vector_db(user_query, vector_db) | |
| st.subheader("π¬ Response from LLM:") | |
| st.write(response) | |
| elif user_query: | |
| st.warning("β οΈ No documents processed to query.") | |