Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from sentence_transformers import SentenceTransformer | |
| from transformers import pipeline | |
| import faiss | |
| import numpy as np | |
| # Sample documents (customize these) | |
| documents = [ | |
| "Hugging Face provides a platform called Spaces for deploying ML apps.", | |
| "RAG combines document retrieval with generative models.", | |
| "FAISS enables efficient similarity search in dense vectors.", | |
| "Gradio allows quick creation of ML UIs." | |
| ] | |
| # Initialize models and FAISS index | |
| embedder = SentenceTransformer('all-MiniLM-L6-v2') #converts text to vectors | |
| generator = pipeline('text2text-generation', model='google/flan-t5-base') | |
| # Process documents and create FAISS index | |
| chunk_embeddings = embedder.encode(documents) | |
| dimension = chunk_embeddings.shape[1] | |
| index = faiss.IndexFlatL2(dimension) | |
| index.add(chunk_embeddings) | |
| def answer_question(question): | |
| # Retrieve relevant chunks | |
| question_embed = embedder.encode([question]) | |
| distances, indices = index.search(question_embed, k=2) | |
| context = "\n".join([documents[i] for i in indices[0]]) | |
| # Generate answer | |
| prompt = f"Answer based on context: {context}\nQuestion: {question}\nAnswer:" | |
| return generator(prompt, max_length=1000)[0]['generated_text'] | |
| # Gradio interface | |
| interface = gr.Interface( | |
| answer_question, | |
| inputs=gr.Textbox(label="Ask a question"), | |
| outputs=gr.Textbox(label="Answer"), | |
| title="RAG Demo" | |
| ) | |
| interface.launch() |