| | import streamlit as st |
| | import os |
| | import faiss |
| | import pickle |
| | import time |
| | from langchain_community.document_loaders import PyPDFLoader |
| | from langchain.text_splitter import RecursiveCharacterTextSplitter |
| | from langchain.embeddings import HuggingFaceEmbeddings |
| | from langchain.vectorstores import FAISS |
| | from dotenv import load_dotenv |
| | from groq import Groq |
| |
|
| | |
| | load_dotenv() |
| | GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
| |
|
| | |
| | client = Groq(api_key=GROQ_API_KEY) |
| |
|
| | |
| | def main(): |
| | st.set_page_config(page_title="AskMyPdf", layout="wide") |
| | st.title("📄 AskMyPdf - AI-Powered PDF Q&A") |
| | |
| | |
| | with st.sidebar: |
| | st.header("How to Use") |
| | st.markdown("1️⃣ Upload a PDF document") |
| | st.markdown("2️⃣ Ask any question related to the document") |
| | st.markdown("3️⃣ Get AI-powered answers instantly! 📌") |
| | |
| | st.subheader("Settings") |
| | theme = st.radio("Choose Theme:", ["Light", "Dark"], index=0) |
| | st.subheader("Language") |
| | language = st.selectbox("Select Language", ["English", "French", "Spanish", "German"], index=0) |
| | |
| | st.subheader("Upload your PDF") |
| | uploaded_file = st.file_uploader("Choose a PDF file", type=["pdf"]) |
| | |
| | if uploaded_file is not None: |
| | st.success(f"Uploaded: {uploaded_file.name}") |
| | |
| | |
| | file_path = os.path.join("temp_files", uploaded_file.name) |
| | os.makedirs("temp_files", exist_ok=True) |
| | |
| | with open(file_path, "wb") as f: |
| | f.write(uploaded_file.getbuffer()) |
| |
|
| | |
| | if not os.path.exists(file_path): |
| | st.error("Error: File was not saved properly. Please try again.") |
| | return |
| |
|
| | |
| | progress_bar = st.progress(0) |
| | with st.spinner("Processing your document..."): |
| | time.sleep(1) |
| | try: |
| | loader = PyPDFLoader(file_path) |
| | documents = loader.load() |
| | progress_bar.progress(25) |
| |
|
| | text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) |
| | docs = text_splitter.split_documents(documents) |
| | progress_bar.progress(50) |
| |
|
| | embeddings = HuggingFaceEmbeddings() |
| | vector_db = FAISS.from_documents(docs, embeddings) |
| | progress_bar.progress(75) |
| |
|
| | faiss.write_index(vector_db.index, "faiss_index") |
| | progress_bar.progress(100) |
| | st.success("Document processed successfully!") |
| |
|
| | except Exception as e: |
| | st.error(f"Error processing document: {e}") |
| | return |
| |
|
| | st.subheader("Ask a Question") |
| | query = st.text_input("Enter your question") |
| | if st.button("Get Answer") and query: |
| | with st.spinner("Generating response..."): |
| | try: |
| | docs = vector_db.similarity_search(query, k=5) |
| | context = "\n".join([doc.page_content for doc in docs]) |
| |
|
| | chat_completion = client.chat.completions.create( |
| | messages=[{"role": "user", "content": context + "\n" + query}], |
| | model="llama-3.3-70b-versatile", |
| | ) |
| |
|
| | response = chat_completion.choices[0].message.content |
| | st.markdown("### Answer") |
| | st.write(response) |
| |
|
| | except Exception as e: |
| | st.error(f"Error generating response: {e}") |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|