shora_llm / app.py
arifshora's picture
Upload 3 files
8454979 verified
import gradio as gr
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFaceHub
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
import os
# Initialize embedding model
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Load and split documents
loader = TextLoader("docs.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=50)
docs = text_splitter.split_documents(documents)
# Create FAISS vector store
db = FAISS.from_documents(docs, embedding_model)
# Load LLM from Hugging Face Hub
llm = HuggingFaceHub(repo_id="google/flan-t5-small", model_kwargs={"temperature": 0.5, "max_length": 100})
# Set up RetrievalQA chain
qa = RetrievalQA.from_chain_type(llm=llm, retriever=db.as_retriever())
def answer_question(query):
return qa.run(query)
iface = gr.Interface(fn=answer_question, inputs="text", outputs="text", title="Simple RAG App")
iface.launch()