decodingdatascience's picture
Update app.py
577c788 verified
raw
history blame
6.16 kB
# app.py — Insurance Q&A (RAG) with Omantel branding, FAQ dropdown, no Top-K control
# Hugging Face Spaces (Gradio) – uses Pinecone + LlamaIndex + OpenAI
import os
import logging
import gradio as gr
# ---- Vector + LLM stack ----
from pinecone import Pinecone, ServerlessSpec
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, Settings
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
# ========== CONFIG ==========
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Optional overrides via Space Variables
PINECONE_INDEX_NAME = os.getenv("PINECONE_INDEX_NAME", "dds-insurance-index")
PINECONE_REGION = os.getenv("PINECONE_REGION", "us-east-1")
PINECONE_CLOUD = os.getenv("PINECONE_CLOUD", "aws")
EMBED_MODEL = os.getenv("EMBED_MODEL", "text-embedding-3-small") # 1536-dim
LLM_MODEL = os.getenv("LLM_MODEL", "gpt-4o-mini")
DATA_DIR = "data" # Place documents (e.g., insurance.pdf) here
DEFAULT_TOP_K = 4 # Internal similarity_top_k (no UI slider)
# Omantel branding — using the exact logo you provided (raw URL to ensure it displays)
LOGO_URL = "https://raw.githubusercontent.com/Decoding-Data-Science/Omantel/main/Omantel_Logo%20(1).png"
if not PINECONE_API_KEY:
raise RuntimeError("Missing PINECONE_API_KEY (set it in your Space → Settings → Variables).")
if not OPENAI_API_KEY:
raise RuntimeError("Missing OPENAI_API_KEY (set it in your Space → Settings → Variables).")
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dds-space")
# ========== CLIENTS / GLOBALS ==========
# LlamaIndex global settings
Settings.embed_model = OpenAIEmbedding(model=EMBED_MODEL, api_key=OPENAI_API_KEY)
Settings.llm = OpenAI(model=LLM_MODEL, api_key=OPENAI_API_KEY)
# Pinecone
pc = Pinecone(api_key=PINECONE_API_KEY)
def ensure_index(name: str, dim: int = 1536):
names = [i["name"] for i in pc.list_indexes()]
if name not in names:
log.info(f"Creating Pinecone index '{name}' (dim={dim})...")
pc.create_index(
name=name,
dimension=dim,
metric="cosine",
spec=ServerlessSpec(cloud=PINECONE_CLOUD, region=PINECONE_REGION),
)
return pc.Index(name)
pinecone_index = ensure_index(PINECONE_INDEX_NAME, dim=1536)
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
def bootstrap_index():
"""Index all files in ./data into Pinecone (idempotent safe)."""
if not os.path.isdir(DATA_DIR):
raise RuntimeError("No 'data/' directory found. Commit your documents to data/ in the Space repo.")
log.info("Loading documents from ./data ...")
docs = SimpleDirectoryReader(DATA_DIR).load_data()
if not docs:
raise RuntimeError("No documents found in data/. Add e.g., data/insurance.pdf")
log.info(f"Docs loaded: {len(docs)}. Upserting into Pinecone…")
storage_ctx = StorageContext.from_defaults(vector_store=vector_store)
VectorStoreIndex.from_documents(docs, storage_context=storage_ctx, show_progress=True)
log.info("Index upsert complete.")
# Build once at startup
bootstrap_index()
def answer(query: str) -> str:
"""Query the existing vector store and return an answer string."""
if not query or not query.strip():
return "Please enter a question (or select one from the FAQ list)."
index = VectorStoreIndex.from_vector_store(vector_store)
engine = index.as_query_engine(similarity_top_k=DEFAULT_TOP_K)
resp = engine.query(query)
return str(resp)
# ---- Frequently Asked Questions (edit to your document) ----
FAQS = [
"",
"What benefits are covered under the policy?",
"How do I file a claim and what documents are required?",
"What are the exclusions and limitations?",
"Is pre-authorization needed for hospitalization?",
"What is the reimbursement timeline?",
"How are outpatient vs inpatient services handled?",
"How can I check my network hospitals/clinics?",
"What is the co-pay or deductible policy?",
]
def use_faq(selected_faq: str, free_text: str):
"""
If a FAQ is selected, prefer it; otherwise use free_text.
Returns the chosen prompt (echo in UI) and the model answer.
"""
prompt = (selected_faq or "").strip() or (free_text or "").strip()
if not prompt:
return "", "Please select a FAQ or type your question."
return prompt, answer(prompt)
# ========== UI ==========
CSS = """
.header {
display: flex;
align-items: center;
gap: 12px;
justify-content: center;
margin-top: 8px;
}
.header img {
height: 42px;
}
.header h1 {
margin: 0;
font-weight: 700;
font-size: 1.4rem;
}
.subnote {
text-align: center;
margin-top: -6px;
opacity: 0.8;
}
"""
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
# Header with logo + centered title
with gr.Row():
gr.Markdown(
f"""
<div class="header">
<img src="{LOGO_URL}" alt="Omantel logo" />
<h1>Omantel Insurance Q&A — RAG Assistant</h1>
</div>
<p class="subnote">Ask about coverage, claims, exclusions, and more — powered by LlamaIndex + Pinecone</p>
""",
elem_id="header_md"
)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Ask from Frequently Asked Questions")
faq = gr.Dropdown(choices=FAQS, value=FAQS[0], label="Select a common question")
gr.Markdown("### Or type your question")
user_q = gr.Textbox(
label="Your question",
placeholder="e.g., What is covered under outpatient benefits?",
lines=2
)
ask_btn = gr.Button("Ask", variant="primary")
with gr.Column(scale=1):
chosen_prompt = gr.Textbox(label="Query sent", interactive=False)
answer_box = gr.Markdown()
ask_btn.click(use_faq, inputs=[faq, user_q], outputs=[chosen_prompt, answer_box])
if __name__ == "__main__":
demo.launch()