File size: 3,768 Bytes
ff92e62
584cd3e
 
 
 
 
 
 
 
 
 
 
 
 
 
ff92e62
 
584cd3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff92e62
 
584cd3e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
import os
from pathlib import Path
import shutil

# Set up Hugging Face API token (replace with your token or set in HF Spaces secrets)
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN", "your-huggingface-api-token")

# Initialize Mistral model for text generation
llm = HuggingFaceInferenceAPI(
    model_name="mistralai/Mistral-7B-Instruct-v0.3",
    api_key=os.environ["HF_TOKEN"]
)

# Initialize embedding model for document indexing
embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")

# Directory to store uploaded documents
DOCS_DIR = "policy_docs"
if not os.path.exists(DOCS_DIR):
    os.makedirs(DOCS_DIR)

# Global variable to store the index
index = None

def process_document(file):
    """Process uploaded policy document and create an index."""
    global index
    try:
        # Clear previous documents
        if os.path.exists(DOCS_DIR):
            shutil.rmtree(DOCS_DIR)
        os.makedirs(DOCS_DIR)

        # Save uploaded file
        file_path = os.path.join(DOCS_DIR, file.name)
        shutil.copy(file.name, file_path)

        # Load documents
        documents = SimpleDirectoryReader(DOCS_DIR).load_data()

        # Create index
        index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)

        return "Document processed successfully! You can now ask questions about the policy."
    except Exception as e:
        return f"Error processing document: {str(e)}"

def policy_chat(message, history):
    """Handle user queries about the policy with context-aware responses."""
    global index
    if index is None:
        return "Please upload a policy document first."

    try:
        # Create query engine
        query_engine = index.as_query_engine(llm=llm, similarity_top_k=3)

        # Craft a prompt that considers financial and other perspectives
        prompt = (
            f"You are a policy expert. A user has asked: '{message}'. "
            "Based on the uploaded policy document, provide a concise response addressing the user's query. "
            "Consider financial, practical, and other relevant perspectives. "
            "If the query is about whether the policy works for the user, evaluate eligibility and benefits clearly. "
            "Keep the response brief and clear."
        )

        # Query the index
        response = query_engine.query(prompt)

        # Append to history
        history.append({"role": "user", "content": message})
        history.append({"role": "assistant", "content": str(response)})
        return history
    except Exception as e:
        return f"Error processing query: {str(e)}"

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Policy Bot")
    gr.Markdown("Upload a policy document (PDF, text, etc.) and ask questions about it. The bot will analyze the policy and respond from financial, practical, and other perspectives.")

    # File upload for policy documents
    file_input = gr.File(label="Upload Policy Document")
    upload_button = gr.Button("Process Document")
    upload_output = gr.Textbox(label="Upload Status")

    # Chat interface
    chatbot = gr.Chatbot(type="messages")
    msg = gr.Textbox(placeholder="Ask about the policy (e.g., 'Will this policy cover my medical expenses?')")
    clear = gr.ClearButton([msg, chatbot])

    # Event handlers
    upload_button.click(process_document, inputs=file_input, outputs=upload_output)
    msg.submit(policy_chat, inputs=[msg, chatbot], outputs=chatbot)

if __name__ == "__main__":
    demo.launch()