Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from langchain_community.document_loaders import TextLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain.chains import RetrievalQA | |
| from langchain_groq import ChatGroq | |
| # Load documents | |
| loader = TextLoader("sample_readme.txt") | |
| documents = loader.load() | |
| # Split into chunks | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) | |
| docs = text_splitter.split_documents(documents) | |
| # Create embeddings | |
| embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") | |
| # Vector DB | |
| vectorstore = Chroma.from_documents(docs, embedding, persist_directory="rag_chroma_groq") | |
| retriever = vectorstore.as_retriever() | |
| # Groq LLM | |
| groq_llm = ChatGroq(api_key=os.getenv("GROQ_API_KEY"), model_name="llama3-70b-8192") | |
| # RAG chain | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm=groq_llm, | |
| retriever=retriever, | |
| return_source_documents=False | |
| ) | |
| # Chat function | |
| def chatbot_interface(user_query): | |
| result = qa_chain({"query": user_query}) | |
| return result["result"] | |
| # Gradio UI | |
| iface = gr.Interface( | |
| fn=chatbot_interface, | |
| inputs=gr.Textbox(label="Ask a question about the document"), | |
| outputs=gr.Textbox(label="Answer"), | |
| title="RAG Chatbot with Groq + LangChain", | |
| description="Ask questions about sample_readme.txt using Groq LLM" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |