decodingdatascience's picture
Update app.py
7598214 verified
raw
history blame
4.2 kB
# app.py — Minimal RAG over ./data/insurance.pdf with LlamaIndex + Pinecone
import os
import logging
import gradio as gr
# ---- Vector + LLM stack ----
from pinecone import Pinecone, ServerlessSpec
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, Settings
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
# ========== CONFIG ==========
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Optional overrides via Space Variables
PINECONE_INDEX_NAME = os.getenv("PINECONE_INDEX_NAME", "dds-insurance-index")
PINECONE_REGION = os.getenv("PINECONE_REGION", "us-east-1")
PINECONE_CLOUD = os.getenv("PINECONE_CLOUD", "aws")
EMBED_MODEL = os.getenv("EMBED_MODEL", "text-embedding-3-small") # 1536 dims
LLM_MODEL = os.getenv("LLM_MODEL", "gpt-4o-mini")
DATA_DIR = "data" # place insurance.pdf inside this folder
if not PINECONE_API_KEY:
raise RuntimeError("Missing PINECONE_API_KEY (set it in your Space → Settings → Variables).")
if not OPENAI_API_KEY:
raise RuntimeError("Missing OPENAI_API_KEY (set it in your Space → Settings → Variables).")
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dds-space")
# ========== CLIENTS / GLOBALS ==========
# LlamaIndex global settings
Settings.embed_model = OpenAIEmbedding(model=EMBED_MODEL, api_key=OPENAI_API_KEY)
Settings.llm = OpenAI(model=LLM_MODEL, api_key=OPENAI_API_KEY)
# Pinecone
pc = Pinecone(api_key=PINECONE_API_KEY)
def ensure_index(name: str, dim: int = 1536):
names = [i["name"] for i in pc.list_indexes()]
if name not in names:
log.info(f"Creating Pinecone index '{name}' (dim={dim})...")
pc.create_index(
name=name,
dimension=dim,
metric="cosine",
spec=ServerlessSpec(cloud=PINECONE_CLOUD, region=PINECONE_REGION),
)
return pc.Index(name)
pinecone_index = ensure_index(PINECONE_INDEX_NAME, dim=1536)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
# Build once on startup if index is empty (idempotent — safe to re-run)
def bootstrap_index():
# If you want a quick “is empty” check, you can skip or keep this; many set-ups
# just upsert blindly (Pinecone dedup keys if you supply your own ids).
log.info("Loading documents from ./data ...")
if not os.path.isdir(DATA_DIR):
raise RuntimeError("No 'data/' directory found. Create it and add insurance.pdf.")
# Read everything in ./data (PDF/TXT/DOCX supported by LlamaIndex readers)
docs = SimpleDirectoryReader(DATA_DIR).load_data()
log.info(f"Docs loaded: {len(docs)}. Upserting into Pinecone…")
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
# Creates a VectorStoreIndex that writes directly to Pinecone
VectorStoreIndex.from_documents(docs, storage_context=storage_ctx, show_progress=True)
log.info("Index upsert complete.")
# Initialize the index once at app start
bootstrap_index()
# Lightweight query function (wraps the existing vector store)
def answer(query: str, top_k: int = 4) -> str:
if not query.strip():
return "Please enter a question about the insurance document."
index = VectorStoreIndex.from_vector_store(vector_store)
engine = index.as_query_engine(similarity_top_k=top_k)
resp = engine.query(query)
return str(resp)
# ========== UI ==========
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("<h1 style='text-align:center;'>Insurance Q&A (RAG)</h1>")
gr.Markdown(
"This app indexes the file(s) in <code>./data</code> (e.g., <b>insurance.pdf</b>) "
"into Pinecone, then answers questions using LlamaIndex + OpenAI."
)
q = gr.Textbox(label="Ask a question", placeholder="e.g., What is covered under outpatient benefits?")
topk = gr.Slider(1, 10, value=4, step=1, label="Top-K matches")
btn = gr.Button("Ask")
out = gr.Markdown()
btn.click(answer, inputs=[q, topk], outputs=[out])
if __name__ == "__main__":
demo.launch()