| import gradio as gr |
| from langchain.vectorstores import FAISS |
| from langchain.embeddings import HuggingFaceEmbeddings |
| from langchain.chains import RetrievalQA |
| from langchain.llms import HuggingFaceHub |
| from langchain.text_splitter import CharacterTextSplitter |
| from langchain.document_loaders import TextLoader |
| import os |
|
|
| |
| embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
| |
| loader = TextLoader("docs.txt") |
| documents = loader.load() |
| text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=50) |
| docs = text_splitter.split_documents(documents) |
|
|
| |
| db = FAISS.from_documents(docs, embedding_model) |
|
|
| |
| llm = HuggingFaceHub(repo_id="google/flan-t5-small", model_kwargs={"temperature": 0.5, "max_length": 100}) |
|
|
| |
| qa = RetrievalQA.from_chain_type(llm=llm, retriever=db.as_retriever()) |
|
|
| def answer_question(query): |
| return qa.run(query) |
|
|
| iface = gr.Interface(fn=answer_question, inputs="text", outputs="text", title="Simple RAG App") |
| iface.launch() |