File size: 1,492 Bytes
a9917b9 0ce2499 a9917b9 e92d4b8 0ce2499 e92d4b8 0ce2499 a9917b9 0ce2499 e92d4b8 0ce2499 e92d4b8 0ce2499 e92d4b8 0ce2499 e92d4b8 0ce2499 e92d4b8 a9917b9 0ce2499 e92d4b8 0ce2499 e92d4b8 0ce2499 e92d4b8 0ce2499 a9917b9 0ce2499 e92d4b8 0ce2499 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
import gradio as gr
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain_groq import ChatGroq
# Load documents
loader = TextLoader("sample_readme.txt")
documents = loader.load()
# Split into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
docs = text_splitter.split_documents(documents)
# Create embeddings
embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# Vector DB
vectorstore = Chroma.from_documents(docs, embedding, persist_directory="rag_chroma_groq")
retriever = vectorstore.as_retriever()
# Groq LLM
groq_llm = ChatGroq(api_key=os.getenv("GROQ_API_KEY"), model_name="llama3-70b-8192")
# RAG chain
qa_chain = RetrievalQA.from_chain_type(
llm=groq_llm,
retriever=retriever,
return_source_documents=False
)
# Chat function
def chatbot_interface(user_query):
result = qa_chain({"query": user_query})
return result["result"]
# Gradio UI
iface = gr.Interface(
fn=chatbot_interface,
inputs=gr.Textbox(label="Ask a question about the document"),
outputs=gr.Textbox(label="Answer"),
title="RAG Chatbot with Groq + LangChain",
description="Ask questions about sample_readme.txt using Groq LLM"
)
if __name__ == "__main__":
iface.launch() |