policybot / app.py
Balaprime's picture
Update app.py
584cd3e verified
import gradio as gr
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import os
from pathlib import Path
import shutil
# Set up Hugging Face API token (replace with your token or set in HF Spaces secrets)
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN", "your-huggingface-api-token")
# Initialize Mistral model for text generation
llm = HuggingFaceInferenceAPI(
model_name="mistralai/Mistral-7B-Instruct-v0.3",
api_key=os.environ["HF_TOKEN"]
)
# Initialize embedding model for document indexing
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Directory to store uploaded documents
DOCS_DIR = "policy_docs"
if not os.path.exists(DOCS_DIR):
os.makedirs(DOCS_DIR)
# Global variable to store the index
index = None
def process_document(file):
"""Process uploaded policy document and create an index."""
global index
try:
# Clear previous documents
if os.path.exists(DOCS_DIR):
shutil.rmtree(DOCS_DIR)
os.makedirs(DOCS_DIR)
# Save uploaded file
file_path = os.path.join(DOCS_DIR, file.name)
shutil.copy(file.name, file_path)
# Load documents
documents = SimpleDirectoryReader(DOCS_DIR).load_data()
# Create index
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
return "Document processed successfully! You can now ask questions about the policy."
except Exception as e:
return f"Error processing document: {str(e)}"
def policy_chat(message, history):
"""Handle user queries about the policy with context-aware responses."""
global index
if index is None:
return "Please upload a policy document first."
try:
# Create query engine
query_engine = index.as_query_engine(llm=llm, similarity_top_k=3)
# Craft a prompt that considers financial and other perspectives
prompt = (
f"You are a policy expert. A user has asked: '{message}'. "
"Based on the uploaded policy document, provide a concise response addressing the user's query. "
"Consider financial, practical, and other relevant perspectives. "
"If the query is about whether the policy works for the user, evaluate eligibility and benefits clearly. "
"Keep the response brief and clear."
)
# Query the index
response = query_engine.query(prompt)
# Append to history
history.append({"role": "user", "content": message})
history.append({"role": "assistant", "content": str(response)})
return history
except Exception as e:
return f"Error processing query: {str(e)}"
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Policy Bot")
gr.Markdown("Upload a policy document (PDF, text, etc.) and ask questions about it. The bot will analyze the policy and respond from financial, practical, and other perspectives.")
# File upload for policy documents
file_input = gr.File(label="Upload Policy Document")
upload_button = gr.Button("Process Document")
upload_output = gr.Textbox(label="Upload Status")
# Chat interface
chatbot = gr.Chatbot(type="messages")
msg = gr.Textbox(placeholder="Ask about the policy (e.g., 'Will this policy cover my medical expenses?')")
clear = gr.ClearButton([msg, chatbot])
# Event handlers
upload_button.click(process_document, inputs=file_input, outputs=upload_output)
msg.submit(policy_chat, inputs=[msg, chatbot], outputs=chatbot)
if __name__ == "__main__":
demo.launch()