import gradio as gr
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
import getpass
import os
from langchain_groq import ChatGroq
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GROQ_API_KEY"] = "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"
# Set up Inference Client
embeddings = HuggingFaceEmbeddings()
# Load the vector database with embeddings
vectorstore = FAISS.load_local("vectorstore.db", embeddings=embeddings,allow_dangerous_deserialization=True)
# Load the vector database
retriever = vectorstore.as_retriever()
# Define LLM configuration
llm = ChatGroq(model="gemma-7b-it")
prompt_template = """
You are an assistant for question-answering tasks.
Answer the given questions.
Ask Me Anything")
# Gradio Chat Interface
with gr.Blocks() as demo:
gr.HTML("