|
|
import gradio as gr |
|
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader |
|
|
from llama_index.llms.huggingface import HuggingFaceInferenceAPI |
|
|
from llama_index.embeddings.huggingface import HuggingFaceEmbedding |
|
|
import os |
|
|
from pathlib import Path |
|
|
import shutil |
|
|
|
|
|
|
|
|
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN", "your-huggingface-api-token") |
|
|
|
|
|
|
|
|
llm = HuggingFaceInferenceAPI( |
|
|
model_name="mistralai/Mistral-7B-Instruct-v0.3", |
|
|
api_key=os.environ["HF_TOKEN"] |
|
|
) |
|
|
|
|
|
|
|
|
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
|
DOCS_DIR = "policy_docs" |
|
|
if not os.path.exists(DOCS_DIR): |
|
|
os.makedirs(DOCS_DIR) |
|
|
|
|
|
|
|
|
index = None |
|
|
|
|
|
def process_document(file): |
|
|
"""Process uploaded policy document and create an index.""" |
|
|
global index |
|
|
try: |
|
|
|
|
|
if os.path.exists(DOCS_DIR): |
|
|
shutil.rmtree(DOCS_DIR) |
|
|
os.makedirs(DOCS_DIR) |
|
|
|
|
|
|
|
|
file_path = os.path.join(DOCS_DIR, file.name) |
|
|
shutil.copy(file.name, file_path) |
|
|
|
|
|
|
|
|
documents = SimpleDirectoryReader(DOCS_DIR).load_data() |
|
|
|
|
|
|
|
|
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model) |
|
|
|
|
|
return "Document processed successfully! You can now ask questions about the policy." |
|
|
except Exception as e: |
|
|
return f"Error processing document: {str(e)}" |
|
|
|
|
|
def policy_chat(message, history): |
|
|
"""Handle user queries about the policy with context-aware responses.""" |
|
|
global index |
|
|
if index is None: |
|
|
return "Please upload a policy document first." |
|
|
|
|
|
try: |
|
|
|
|
|
query_engine = index.as_query_engine(llm=llm, similarity_top_k=3) |
|
|
|
|
|
|
|
|
prompt = ( |
|
|
f"You are a policy expert. A user has asked: '{message}'. " |
|
|
"Based on the uploaded policy document, provide a concise response addressing the user's query. " |
|
|
"Consider financial, practical, and other relevant perspectives. " |
|
|
"If the query is about whether the policy works for the user, evaluate eligibility and benefits clearly. " |
|
|
"Keep the response brief and clear." |
|
|
) |
|
|
|
|
|
|
|
|
response = query_engine.query(prompt) |
|
|
|
|
|
|
|
|
history.append({"role": "user", "content": message}) |
|
|
history.append({"role": "assistant", "content": str(response)}) |
|
|
return history |
|
|
except Exception as e: |
|
|
return f"Error processing query: {str(e)}" |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Policy Bot") |
|
|
gr.Markdown("Upload a policy document (PDF, text, etc.) and ask questions about it. The bot will analyze the policy and respond from financial, practical, and other perspectives.") |
|
|
|
|
|
|
|
|
file_input = gr.File(label="Upload Policy Document") |
|
|
upload_button = gr.Button("Process Document") |
|
|
upload_output = gr.Textbox(label="Upload Status") |
|
|
|
|
|
|
|
|
chatbot = gr.Chatbot(type="messages") |
|
|
msg = gr.Textbox(placeholder="Ask about the policy (e.g., 'Will this policy cover my medical expenses?')") |
|
|
clear = gr.ClearButton([msg, chatbot]) |
|
|
|
|
|
|
|
|
upload_button.click(process_document, inputs=file_input, outputs=upload_output) |
|
|
msg.submit(policy_chat, inputs=[msg, chatbot], outputs=chatbot) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |