LMACMY-Affairs / app.py
ageraustine's picture
Update app.py
dc37fde verified
import gradio as gr
import os
import json
from dotenv import load_dotenv
from langchain_anthropic import ChatAnthropic
load_dotenv()
class LegionMariaAssistant:
def __init__(self):
# Router LLM - lightweight for section classification
self.router_llm = ChatAnthropic(
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
model="claude-3-5-haiku-20241022",
temperature=0.1
)
# Response LLM - for generating final answers
self.response_llm = ChatAnthropic(
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
model="claude-3-5-haiku-20241022",
temperature=0.1 # Lower temperature for more consistent, direct responses
)
self.data_content = {}
self.load_data()
def load_data(self):
"""Load structured JSON data"""
data_file = "./data.json"
if os.path.exists(data_file):
try:
with open(data_file, 'r', encoding='utf-8') as f:
self.data_content = json.load(f)
print("Loaded Legion Maria JSON data successfully")
print(f"Available sections: {list(self.data_content.keys())}")
except Exception as e:
print(f"Error loading data: {str(e)}")
self.data_content = {}
else:
print("data.json not found")
self.data_content = {}
def route_query(self, message):
"""Router LLM decides which section of data to use"""
available_sections = list(self.data_content.keys())
router_prompt = f"""You are a query router for the Legion Maria Youth Affairs system.
Available data sections: {available_sections}
DETAILED SECTION CONTENTS:
ABOUT section contains:
- organization, headquarters_location, head_office, contact info (phone, email, websites)
- mission, vision, core_values (faithfulness, inclusivity, service, formation, community, integrity)
- church_beliefs (core_belief, reach, membership, dress_code, sacred_practice)
- worship (practices, prohibited, prayer_schedule)
OFFICE section contains:
- departments (30+ departments with officers and roles like administration, treasury, education, procurement, events, medical, auditing, communications, projects, ICT, diaspora, facilities, music, logistics, grants, data engineering, coordination, security, etc.)
- completed_projects, pending_projects (washrooms, radio/TV station, hospital, mausoleum, refurbishments)
- current_activities (festivals, sports, workshops, conventions, camps)
- support_donations (mpesa, bank_account details)
LEADERSHIP section contains:
- supreme_leadership (patron_pope, matron_mother_superior, dean_of_cardinals)
- youth_affairs_director (title, contact)
- organizational_structure
User query: "{message}"
Respond with ONLY the most relevant section name. If the query spans multiple sections or is general, respond with "general".
ROUTING EXAMPLES:
- "Who is the director/pope/patron?" -> leadership
- "What is your mission/vision/values?" -> about
- "Where is headquarters/location/office?" -> about
- "Contact info/phone/email/website?" -> about
- "Tell me about projects/departments?" -> office
- "How to donate/support/contribute?" -> office
- "What activities do you do?" -> office
- "Church beliefs/worship/practices?" -> about
- "What do you do?" -> general
Section:"""
try:
response = self.router_llm.invoke([{"role": "user", "content": router_prompt}])
section = response.content.strip().lower()
# Validate section exists
if section in available_sections:
return section
elif section == "general":
return "general"
else:
return "about" # Default fallback
except Exception as e:
print(f"Router error: {str(e)}")
return "about" # Default fallback
def chat_response(self, message, history):
"""Two-tier LLM system: Router + Specialist response"""
if not message.strip():
return "Please ask me something about our Legion Maria Youth Affairs!"
try:
if not self.data_content:
return "I don't have that information available right now."
# Step 1: Router LLM decides which section to use
selected_section = self.route_query(message)
print(f"Router selected section: {selected_section}")
# Step 2: Get relevant data based on routing decision
if selected_section == "general":
# Use all data for general queries
relevant_data = self.data_content
else:
# Use only the specific section
relevant_data = {selected_section: self.data_content.get(selected_section, {})}
# Build conversation context
conversation_context = ""
if history:
conversation_context = "Previous conversation:\n"
for user_msg, assistant_msg in history[-3:]: # Keep last 3 exchanges
conversation_context += f"User: {user_msg}\nAssistant: {assistant_msg}\n\n"
conversation_context += "Current conversation:\n"
# Step 3: Response LLM generates answer using only relevant data
response_prompt = f"""You are Santa Legion from the Legion Maria Directorate of Youth Affairs. IMPORTANT: You must ONLY use the information provided below. Do not use any external knowledge or make assumptions.
ONLY USE THIS INFORMATION:
{json.dumps(relevant_data, indent=2)}
{conversation_context}User: {message}
STRICT RULES:
- You are Santa Legion, an assistant for the Legion Maria Youth Affairs (NOT the director yourself)
- Speak as "I" when referring to yourself, "we/our" for the organization
- ONLY answer using the information provided above
- When asked about leadership, refer to them in third person (e.g., "Our director is...")
- Keep responses SHORT (1-3 sentences maximum)
- Do not invent or assume any information not in the provided data
- Never mention being provided documents or data
- If asked about something not in your data, say "I don't have that information"
Answer based ONLY on the provided information:"""
# Get response from specialist LLM
response = self.response_llm.invoke([{"role": "user", "content": response_prompt}])
return response.content
except Exception as e:
print(f"Error generating response: {str(e)}")
return "I'm sorry, I'm having trouble right now. Please try again."
def main():
assistant = LegionMariaAssistant()
# Initial greeting message
initial_greeting = [
[None, "πŸ‘‹ Hello! I'm Santa Legion from the Legion Maria Youth Affairs. I'm here to help you learn about our mission, leadership, projects, and activities. What would you like to know?"]
]
# Create mobile-optimized Gradio chat interface
with gr.Blocks(title="Legion Maria Chat", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ’¬ Legion Maria YA")
# Mobile-optimized chat interface
chatbot = gr.Chatbot(
value=initial_greeting,
height=400, # Reduced for mobile
show_label=False,
container=True,
bubble_full_width=True, # Better for mobile
show_share_button=False
)
# Mobile-friendly input layout
with gr.Row():
msg = gr.Textbox(
placeholder="Ask me anything...",
show_label=False,
scale=5,
container=False,
lines=1
)
send_btn = gr.Button("πŸ“€", variant="primary", scale=1, size="sm")
clear = gr.Button("πŸ”„ New Chat", variant="secondary", size="sm")
# Chat functionality
def respond(message, history):
if message.strip():
bot_response = assistant.chat_response(message, history)
history.append([message, bot_response])
return history, ""
# Event handlers
msg.submit(respond, [msg, chatbot], [chatbot, msg])
send_btn.click(respond, [msg, chatbot], [chatbot, msg])
clear.click(lambda: initial_greeting, None, chatbot)
# Launch with better settings for chat app
demo.launch(
share=False,
server_name="0.0.0.0",
server_port=7860,
show_api=False
)
if __name__ == "__main__":
main()