Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
from dotenv import load_dotenv
|
| 5 |
+
from langchain_anthropic import ChatAnthropic
|
| 6 |
+
|
| 7 |
+
load_dotenv()
|
| 8 |
+
|
| 9 |
+
class LegionMariaAssistant:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
# Router LLM - lightweight for section classification
|
| 12 |
+
self.router_llm = ChatAnthropic(
|
| 13 |
+
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
|
| 14 |
+
model="claude-3-5-haiku-20241022",
|
| 15 |
+
temperature=0.1
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
# Response LLM - for generating final answers
|
| 19 |
+
self.response_llm = ChatAnthropic(
|
| 20 |
+
anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
|
| 21 |
+
model="claude-3-5-haiku-20241022",
|
| 22 |
+
temperature=0.3
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
self.data_content = {}
|
| 26 |
+
self.load_data()
|
| 27 |
+
|
| 28 |
+
def load_data(self):
|
| 29 |
+
"""Load structured JSON data"""
|
| 30 |
+
data_file = "./data.json"
|
| 31 |
+
if os.path.exists(data_file):
|
| 32 |
+
try:
|
| 33 |
+
with open(data_file, 'r', encoding='utf-8') as f:
|
| 34 |
+
self.data_content = json.load(f)
|
| 35 |
+
|
| 36 |
+
print("Loaded Legion Maria JSON data successfully")
|
| 37 |
+
print(f"Available sections: {list(self.data_content.keys())}")
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
print(f"Error loading data: {str(e)}")
|
| 41 |
+
self.data_content = {}
|
| 42 |
+
else:
|
| 43 |
+
print("data.json not found")
|
| 44 |
+
self.data_content = {}
|
| 45 |
+
|
| 46 |
+
def route_query(self, message):
|
| 47 |
+
"""Router LLM decides which section of data to use"""
|
| 48 |
+
available_sections = list(self.data_content.keys())
|
| 49 |
+
|
| 50 |
+
router_prompt = f"""You are a query router for the Legion Maria Youth Affairs system.
|
| 51 |
+
|
| 52 |
+
Available data sections: {available_sections}
|
| 53 |
+
|
| 54 |
+
Each section contains:
|
| 55 |
+
- about: mission, vision, core values, organizational information
|
| 56 |
+
- office: projects, community outreach, operational details
|
| 57 |
+
- leadership: organizational structure, leadership team, roles
|
| 58 |
+
|
| 59 |
+
User query: "{message}"
|
| 60 |
+
|
| 61 |
+
Respond with ONLY the most relevant section name from the available sections. If the query spans multiple sections or is general, respond with "general".
|
| 62 |
+
|
| 63 |
+
Examples:
|
| 64 |
+
- "Who is the director?" -> leadership
|
| 65 |
+
- "What is your mission?" -> about
|
| 66 |
+
- "Tell me about your projects" -> office
|
| 67 |
+
- "What do you do?" -> general
|
| 68 |
+
|
| 69 |
+
Section:"""
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
response = self.router_llm.invoke([{"role": "user", "content": router_prompt}])
|
| 73 |
+
section = response.content.strip().lower()
|
| 74 |
+
|
| 75 |
+
# Validate section exists
|
| 76 |
+
if section in available_sections:
|
| 77 |
+
return section
|
| 78 |
+
elif section == "general":
|
| 79 |
+
return "general"
|
| 80 |
+
else:
|
| 81 |
+
return "about" # Default fallback
|
| 82 |
+
|
| 83 |
+
except Exception as e:
|
| 84 |
+
print(f"Router error: {str(e)}")
|
| 85 |
+
return "about" # Default fallback
|
| 86 |
+
|
| 87 |
+
def chat_response(self, message, history):
|
| 88 |
+
"""Two-tier LLM system: Router + Specialist response"""
|
| 89 |
+
if not message.strip():
|
| 90 |
+
return "Please ask me something about Legion Maria Youth Affairs!"
|
| 91 |
+
|
| 92 |
+
try:
|
| 93 |
+
if not self.data_content:
|
| 94 |
+
return "I don't have information available to answer that question."
|
| 95 |
+
|
| 96 |
+
# Step 1: Router LLM decides which section to use
|
| 97 |
+
selected_section = self.route_query(message)
|
| 98 |
+
print(f"Router selected section: {selected_section}")
|
| 99 |
+
|
| 100 |
+
# Step 2: Get relevant data based on routing decision
|
| 101 |
+
if selected_section == "general":
|
| 102 |
+
# Use all data for general queries
|
| 103 |
+
relevant_data = self.data_content
|
| 104 |
+
else:
|
| 105 |
+
# Use only the specific section
|
| 106 |
+
relevant_data = {selected_section: self.data_content.get(selected_section, {})}
|
| 107 |
+
|
| 108 |
+
# Build conversation context
|
| 109 |
+
conversation_context = ""
|
| 110 |
+
if history:
|
| 111 |
+
conversation_context = "Previous conversation:\n"
|
| 112 |
+
for user_msg, assistant_msg in history[-3:]: # Keep last 3 exchanges
|
| 113 |
+
conversation_context += f"User: {user_msg}\nAssistant: {assistant_msg}\n\n"
|
| 114 |
+
conversation_context += "Current conversation:\n"
|
| 115 |
+
|
| 116 |
+
# Step 3: Response LLM generates answer using only relevant data
|
| 117 |
+
response_prompt = f"""You are a friendly chatbot assistant for the Legion Maria Directorate of Youth Affairs. You help people learn about Legion Maria Youth Affairs through natural conversation.
|
| 118 |
+
|
| 119 |
+
Relevant Legion Maria Youth Affairs Information:
|
| 120 |
+
{json.dumps(relevant_data, indent=2)}
|
| 121 |
+
|
| 122 |
+
{conversation_context}User: {message}
|
| 123 |
+
|
| 124 |
+
Respond in a conversational, helpful way. Reference previous parts of our conversation when relevant. If the user's question isn't fully covered by the information provided, acknowledge this clearly but still try to be helpful. Focus on the information that's most relevant to their specific question."""
|
| 125 |
+
|
| 126 |
+
# Get response from specialist LLM
|
| 127 |
+
response = self.response_llm.invoke([{"role": "user", "content": response_prompt}])
|
| 128 |
+
return response.content
|
| 129 |
+
|
| 130 |
+
except Exception as e:
|
| 131 |
+
print(f"Error generating response: {str(e)}")
|
| 132 |
+
return "I'm sorry, I encountered an error while processing your request. Please try again."
|
| 133 |
+
|
| 134 |
+
def main():
|
| 135 |
+
assistant = LegionMariaAssistant()
|
| 136 |
+
|
| 137 |
+
# Create enhanced Gradio chat interface
|
| 138 |
+
with gr.Blocks(title="Legion Maria Youth Affairs Chat", theme=gr.themes.Soft()) as demo:
|
| 139 |
+
gr.Markdown("# 💬 Legion Maria Youth Affairs Chat Assistant")
|
| 140 |
+
gr.Markdown("Welcome! I'm here to help you learn about Legion Maria Youth Affairs. Start a conversation below!")
|
| 141 |
+
|
| 142 |
+
# Chat interface with better styling
|
| 143 |
+
chatbot = gr.Chatbot(
|
| 144 |
+
height=500,
|
| 145 |
+
show_label=False,
|
| 146 |
+
container=True,
|
| 147 |
+
bubble_full_width=False
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
with gr.Row():
|
| 151 |
+
msg = gr.Textbox(
|
| 152 |
+
placeholder="Type your message here and press Enter...",
|
| 153 |
+
show_label=False,
|
| 154 |
+
scale=4,
|
| 155 |
+
container=False
|
| 156 |
+
)
|
| 157 |
+
send_btn = gr.Button("Send", variant="primary", scale=1)
|
| 158 |
+
|
| 159 |
+
with gr.Row():
|
| 160 |
+
clear = gr.Button("Clear Chat", variant="secondary")
|
| 161 |
+
|
| 162 |
+
# Chat functionality
|
| 163 |
+
def respond(message, history):
|
| 164 |
+
if message.strip():
|
| 165 |
+
bot_response = assistant.chat_response(message, history)
|
| 166 |
+
history.append([message, bot_response])
|
| 167 |
+
return history, ""
|
| 168 |
+
|
| 169 |
+
# Event handlers
|
| 170 |
+
msg.submit(respond, [msg, chatbot], [chatbot, msg])
|
| 171 |
+
send_btn.click(respond, [msg, chatbot], [chatbot, msg])
|
| 172 |
+
clear.click(lambda: [], None, chatbot)
|
| 173 |
+
|
| 174 |
+
# Launch with better settings for chat app
|
| 175 |
+
demo.launch(
|
| 176 |
+
share=False,
|
| 177 |
+
server_name="0.0.0.0",
|
| 178 |
+
server_port=7860,
|
| 179 |
+
show_api=False
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
if __name__ == "__main__":
|
| 183 |
+
main()
|