File size: 2,207 Bytes
c3236c3 12c9f7b e283cbb c3236c3 12c9f7b e110be9 d42b9d4 e110be9 d42b9d4 12c9f7b c3236c3 12c9f7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import gradio as gr
from bs4 import BeautifulSoup as bs
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.vectorstores import Chroma
from sentence_transformers import SentenceTransformer
from transformers import pipeline
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
# Function to load, split, and retrieve documents
def load_and_retrieve_docs(url):
loader = WebBaseLoader(
web_paths=(url,),
bs_kwargs=dict()
)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
# Define a custom embedding function compatible with Chroma's interface
class CustomEmbeddings:
def __init__(self, model):
self.model = model
def embed_documents(self, texts):
return self.model.encode(texts, convert_to_tensor=True).tolist()
embeddings = CustomEmbeddings(embedding_model)
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
return vectorstore.as_retriever()
# Function to format documents
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# Function that defines the RAG chain
def rag_chain(url, question):
retriever = load_and_retrieve_docs(url)
retrieved_docs = retriever.invoke(question)
formatted_context = format_docs(retrieved_docs)
formatted_prompt = f"Question: {question}\n\nContext: {formatted_context}"
# Using Hugging Face's pipeline for text generation (QA model)
qa_pipeline = pipeline("text-generation", model="gpt-2")
response = qa_pipeline(formatted_prompt, max_length=200)
return response[0]['generated_text']
# Gradio interface
iface = gr.Interface(
fn=rag_chain,
inputs=["text", "text"],
outputs="text",
title="RAG Chain Question Answering",
description="Enter a URL and a query to get answers from the RAG chain."
)
# Launch the app
iface.launch()
|