File size: 2,843 Bytes
0d646fe
ee0fd46
 
 
 
 
 
 
 
 
 
18e2ce9
 
 
 
 
 
 
 
0d646fe
ee0fd46
c954281
0d646fe
c954281
23caabd
ee0fd46
a8cc11d
ee0fd46
 
 
18e2ce9
 
ee0fd46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0d646fe
ee0fd46
e416abe
0d646fe
5fa9406
0d646fe
c0cbbc4
 
 
 
 
ee0fd46
 
0d646fe
ee0fd46
c0cbbc4
451de25
 
 
 
 
 
ee0fd46
 
0d646fe
ee0fd46
 
0d646fe
5fa9406
ee0fd46
e416abe
 
 
 
 
 
 
0d646fe
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import gradio as gr
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
import getpass
import os
from langchain_groq import ChatGroq

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["GROQ_API_KEY"] = "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"


# Set up Inference Client
embeddings = HuggingFaceEmbeddings()

# Load the vector database with embeddings
vectorstore = FAISS.load_local("vectorstore.db", embeddings=embeddings,allow_dangerous_deserialization=True)
# Load the vector database

retriever = vectorstore.as_retriever()

# Define LLM configuration
llm = ChatGroq(model="gemma-7b-it")  

prompt_template = """
You are an assistant for question-answering tasks.
Answer the given questions.

<context>
{context}
</context>

Question: {input}
"""

# Function to create the prompt template
prompt = ChatPromptTemplate.from_template(prompt_template)
doc_chain = create_stuff_documents_chain(llm, prompt)
chain = create_retrieval_chain(retriever, doc_chain)

# Chatbot response function
def answer_query(
    message,
    history
):
     # Check if message is a list and convert it to the first element as a string
    if isinstance(message, list):
        message = str(message[0])  # Convert the first element to a string


    # Use the RAG model to retrieve relevant context and answer the question
    response = chain.invoke({"input": message})

    # Extract the answer from the response
    answer = str(response['answer'])

     # Ensure that the answer is a string
    if isinstance(answer, list):
        answer = ' '.join([str(item) for item in answer])  # Convert list to string
    elif isinstance(answer, tuple):
        answer = ' '.join([str(item) for item in answer])  
    # Add conversation to history
    history.append((message, answer))

    # Return the updated history
    return "", history

chatbot = gr.Chatbot(placeholder="<strong>Chatbot that answers questions on agile processes</strong><br>Ask Me Anything")
# Gradio Chat Interface
with gr.Blocks() as demo:
    gr.HTML("<h1 align = 'center'>Smart Assistant</h1>")

    chatbot = gr.Chatbot()
    msg = gr.Textbox(label = "Enter your question here")
    msg.submit(answer_query,[msg,chatbot],[msg,chatbot])

if __name__ == "__main__":
    demo.launch()