|
|
import os |
|
|
import gradio as gr |
|
|
from langchain_community.document_loaders import TextLoader |
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
|
from langchain_community.vectorstores import Chroma |
|
|
from langchain.chains import RetrievalQA |
|
|
from langchain_groq import ChatGroq |
|
|
|
|
|
|
|
|
loader = TextLoader("sample_readme.txt") |
|
|
documents = loader.load() |
|
|
|
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) |
|
|
docs = text_splitter.split_documents(documents) |
|
|
|
|
|
|
|
|
embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
|
vectorstore = Chroma.from_documents(docs, embedding, persist_directory="rag_chroma_groq") |
|
|
retriever = vectorstore.as_retriever() |
|
|
|
|
|
|
|
|
groq_llm = ChatGroq(api_key=os.getenv("GROQ_API_KEY"), model_name="llama3-70b-8192") |
|
|
|
|
|
|
|
|
qa_chain = RetrievalQA.from_chain_type( |
|
|
llm=groq_llm, |
|
|
retriever=retriever, |
|
|
return_source_documents=False |
|
|
) |
|
|
|
|
|
|
|
|
def chatbot_interface(user_query): |
|
|
result = qa_chain({"query": user_query}) |
|
|
return result["result"] |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=chatbot_interface, |
|
|
inputs=gr.Textbox(label="Ask a question about the document"), |
|
|
outputs=gr.Textbox(label="Answer"), |
|
|
title="RAG Chatbot with Groq + LangChain", |
|
|
description="Ask questions about sample_readme.txt using Groq LLM" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
iface.launch() |