LLMdemo / app.py
harshananddev's picture
Update app.py
c3b3bde verified
### Set Up the Language Model
from transformers import pipeline
# Load a pre-trained model
language_model = pipeline("text-generation", model="gpt2", clean_up_tokenization_spaces=True)
### Index with LlamaIndex
from llama_index import LlamaIndex
# Initialize LlamaIndex
index = LlamaIndex()
# Add documents to the index
documents = ["demo_data_for_RAG.pdf"]
index.add_documents(documents)
### Implement RAG Logic
def retrieve_and_generate_answer(question):
# Retrieve relevant documents
retrieved_docs = index.retrieve(question)
# Generate answer using the language model
context = " ".join(retrieved_docs)
answer = language_model(context + " " + question, max_length=100)
return answer[0]['generated_text']
### Gradio Interface
import gradio as gr
def answer_question(question):
return retrieve_and_generate_answer(question)
# Create Gradio interface
iface = gr.Interface(fn=answer_question, inputs="text", outputs="text", title="Contextual QA System")
# Launch the interface
iface.launch()