Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| from langchain.vectorstores import FAISS | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain_text_splitters import RecursiveCharacterTextSplitter | |
| from langchain import hub | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_core.runnables import RunnablePassthrough | |
| from langchain.chains.combine_documents import create_stuff_documents_chain | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain.chains import create_retrieval_chain | |
| import getpass | |
| import os | |
| from langchain_groq import ChatGroq | |
| os.environ["LANGCHAIN_TRACING_V2"] = "true" | |
| os.environ["LANGCHAIN_API_KEY"] = "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" | |
| os.environ["GROQ_API_KEY"] = "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" | |
| # Set up Inference Client | |
| embeddings = HuggingFaceEmbeddings() | |
| # Load the vector database with embeddings | |
| vectorstore = FAISS.load_local("vectorstore.db", embeddings=embeddings,allow_dangerous_deserialization=True) | |
| # Load the vector database | |
| retriever = vectorstore.as_retriever() | |
| # Define LLM configuration | |
| llm = ChatGroq(model="gemma-7b-it") | |
| prompt_template = """ | |
| You are an assistant for question-answering tasks. | |
| Answer the given questions. | |
| <context> | |
| {context} | |
| </context> | |
| Question: {input} | |
| """ | |
| # Function to create the prompt template | |
| prompt = ChatPromptTemplate.from_template(prompt_template) | |
| doc_chain = create_stuff_documents_chain(llm, prompt) | |
| chain = create_retrieval_chain(retriever, doc_chain) | |
| # Chatbot response function | |
| def answer_query( | |
| message, | |
| history | |
| ): | |
| # Check if message is a list and convert it to the first element as a string | |
| if isinstance(message, list): | |
| message = str(message[0]) # Convert the first element to a string | |
| # Use the RAG model to retrieve relevant context and answer the question | |
| response = chain.invoke({"input": message}) | |
| # Extract the answer from the response | |
| answer = str(response['answer']) | |
| # Ensure that the answer is a string | |
| if isinstance(answer, list): | |
| answer = ' '.join([str(item) for item in answer]) # Convert list to string | |
| elif isinstance(answer, tuple): | |
| answer = ' '.join([str(item) for item in answer]) | |
| # Add conversation to history | |
| history.append((message, answer)) | |
| # Return the updated history | |
| return "", history | |
| chatbot = gr.Chatbot(placeholder="<strong>Chatbot that answers questions on agile processes</strong><br>Ask Me Anything") | |
| # Gradio Chat Interface | |
| with gr.Blocks() as demo: | |
| gr.HTML("<h1 align = 'center'>Smart Assistant</h1>") | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(label = "Enter your question here") | |
| msg.submit(answer_query,[msg,chatbot],[msg,chatbot]) | |
| if __name__ == "__main__": | |
| demo.launch() | |