|
|
|
|
|
import streamlit as st |
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
from langchain_groq import ChatGroq |
|
|
from langchain_chroma import Chroma |
|
|
from langchain_community.document_loaders import WebBaseLoader, MongodbLoader |
|
|
from langchain_core.prompts import ChatPromptTemplate |
|
|
from langchain_text_splitters import RecursiveCharacterTextSplitter |
|
|
from langchain.chains import create_retrieval_chain, create_history_aware_retriever |
|
|
from langchain.chains.combine_documents import create_stuff_documents_chain |
|
|
from langchain_core.messages import AIMessage, HumanMessage |
|
|
from langchain_core.prompts import MessagesPlaceholder |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
groq_api_key = os.getenv('GROQ_API_KEY') |
|
|
hf_token = os.getenv('HF_TOKEN') |
|
|
|
|
|
|
|
|
llm = ChatGroq(groq_api_key=groq_api_key, model_name="llama3-8b-8192") |
|
|
|
|
|
|
|
|
from langchain_huggingface.embeddings import HuggingFaceEmbeddings |
|
|
embeddings = HuggingFaceEmbeddings(model_name='all-MiniLM-L6-v2') |
|
|
|
|
|
|
|
|
loader = MongodbLoader( |
|
|
connection_string="mongodb+srv://deshcode0:helloworld@deshcode0.ftigm.mongodb.net/?retryWrites=true&w=majority&appName=deshcode0", |
|
|
db_name="sample_mflix", |
|
|
collection_name="movies", |
|
|
field_names = ["_id", "plot", "genres", "runtime", "cast", "poster", "title", "fullplot", "languages", "released", "directors", "rated", "awards", "lastupdated", "year", "imdb", "countries", "type", "tomatoes", "num_mflix_comments"], |
|
|
) |
|
|
docs = loader.load() |
|
|
|
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) |
|
|
splits = text_splitter.split_documents(docs) |
|
|
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings) |
|
|
retriever = vectorstore.as_retriever() |
|
|
|
|
|
|
|
|
system_prompt = ( |
|
|
"You are an assistant for question-answering tasks. " |
|
|
"Use the following pieces of retrieved context to answer " |
|
|
"the question. If you don't know the answer, say that you " |
|
|
"don't know. Use three sentences maximum and keep the " |
|
|
"answer concise.\n\n{context}" |
|
|
) |
|
|
|
|
|
qa_prompt = ChatPromptTemplate.from_messages( |
|
|
[ |
|
|
("system", system_prompt), |
|
|
MessagesPlaceholder("chat_history"), |
|
|
("human", "{input}"), |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) |
|
|
rag_chain = create_retrieval_chain(retriever, question_answer_chain) |
|
|
|
|
|
|
|
|
st.title("LLM-Powered Question Answering with Memory") |
|
|
|
|
|
|
|
|
if "chat_history" not in st.session_state: |
|
|
st.session_state.chat_history = [] |
|
|
|
|
|
|
|
|
st.sidebar.title("Ask a Question") |
|
|
question = st.sidebar.text_input("Enter your question:") |
|
|
|
|
|
|
|
|
if question: |
|
|
|
|
|
st.session_state.chat_history.append(HumanMessage(content=question)) |
|
|
|
|
|
|
|
|
response = rag_chain.invoke({"input": question, "chat_history": st.session_state.chat_history}) |
|
|
|
|
|
|
|
|
st.write("**Answer:**") |
|
|
st.write(response['answer']) |
|
|
|
|
|
|
|
|
st.session_state.chat_history.append(AIMessage(content=response['answer'])) |
|
|
|
|
|
|
|
|
st.write("## Chat History") |
|
|
for message in st.session_state.chat_history: |
|
|
if isinstance(message, HumanMessage): |
|
|
st.write(f"**You:** {message.content}") |
|
|
elif isinstance(message, AIMessage): |
|
|
st.write(f"**Bot:** {message.content}") |
|
|
|