Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| from langchain_core.documents import Document | |
| from langchain_community.vectorstores import FAISS | |
| from langchain.chains import RetrievalQA | |
| from langchain_community.llms import HuggingFacePipeline | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline | |
| import torch | |
| import gradio as gr | |
| # Load dataset | |
| dataset = pd.read_csv("dataset.csv") | |
| # Convert to LangChain Documents | |
| documents = [ | |
| Document( | |
| page_content=str(row["answer"]), | |
| metadata={"question": str(row["question"])} | |
| ) | |
| for _, row in dataset.iterrows() | |
| ] | |
| # Setup embeddings | |
| embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
| # Load LLM | |
| model_name = "google/flan-t5-base" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| pipe = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_length=512, | |
| device=0 if torch.cuda.is_available() else -1) | |
| llm = HuggingFacePipeline(pipeline=pipe) | |
| # Create vector store | |
| vector_store = FAISS.from_documents(documents, embeddings) | |
| retriever = vector_store.as_retriever(search_kwargs={"k": 3}) | |
| # Create QA chain | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm=llm, | |
| retriever=retriever, | |
| return_source_documents=True | |
| ) | |
| # Chatbot function | |
| def chatbot_interface(question: str) -> str: | |
| if not qa_chain: | |
| return "Chatbot backend not initialized properly." | |
| try: | |
| response = qa_chain.invoke({"query": question}) | |
| answer = response.get("result", "No answer found.") | |
| sources = response.get("source_documents", []) | |
| source_texts = [doc.page_content for doc in sources] | |
| return f"Answer: {answer}\n\nSources:\n" + "\n".join(f"- {text}" for text in source_texts) | |
| except Exception as e: | |
| return f"Error: {e}" | |
| # Gradio UI | |
| interface = gr.Interface( | |
| fn=chatbot_interface, | |
| inputs=gr.Textbox(label="Enter your question"), | |
| outputs=gr.Textbox(label="Response"), | |
| title="RAG Chatbot", | |
| description="Ask questions about AI, ChatBots, NLP, and more.", | |
| theme="default" | |
| ) | |
| # Launch the interface | |
| interface.launch() |