ageraustine commited on
Commit
590d9bb
·
verified ·
1 Parent(s): 5133f95

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +197 -0
app.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import json
4
+ from dotenv import load_dotenv
5
+ from langchain_anthropic import ChatAnthropic
6
+
7
+ load_dotenv()
8
+
9
+ class LegionMariaAssistant:
10
+ def __init__(self):
11
+ # Router LLM - lightweight for section classification
12
+ self.router_llm = ChatAnthropic(
13
+ anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
14
+ model="claude-3-5-haiku-20241022",
15
+ temperature=0.1
16
+ )
17
+
18
+ # Response LLM - for generating final answers
19
+ self.response_llm = ChatAnthropic(
20
+ anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"),
21
+ model="claude-3-5-haiku-20241022",
22
+ temperature=0.1 # Lower temperature for more consistent, direct responses
23
+ )
24
+
25
+ self.data_content = {}
26
+ self.load_data()
27
+
28
+ def load_data(self):
29
+ """Load structured JSON data"""
30
+ data_file = "./data.json"
31
+ if os.path.exists(data_file):
32
+ try:
33
+ with open(data_file, 'r', encoding='utf-8') as f:
34
+ self.data_content = json.load(f)
35
+
36
+ print("Loaded Legion Maria JSON data successfully")
37
+ print(f"Available sections: {list(self.data_content.keys())}")
38
+
39
+ except Exception as e:
40
+ print(f"Error loading data: {str(e)}")
41
+ self.data_content = {}
42
+ else:
43
+ print("data.json not found")
44
+ self.data_content = {}
45
+
46
+ def route_query(self, message):
47
+ """Router LLM decides which section of data to use"""
48
+ available_sections = list(self.data_content.keys())
49
+
50
+ router_prompt = f"""You are a query router for the Legion Maria Youth Affairs system.
51
+
52
+ Available data sections: {available_sections}
53
+
54
+ Each section contains:
55
+ - about: mission, vision, core values, organizational information
56
+ - office: projects, community outreach, operational details
57
+ - leadership: organizational structure, leadership team, roles
58
+
59
+ User query: "{message}"
60
+
61
+ Respond with ONLY the most relevant section name from the available sections. If the query spans multiple sections or is general, respond with "general".
62
+
63
+ Examples:
64
+ - "Who is the director?" -> leadership
65
+ - "What is your mission?" -> about
66
+ - "Tell me about your projects" -> office
67
+ - "What do you do?" -> general
68
+
69
+ Section:"""
70
+
71
+ try:
72
+ response = self.router_llm.invoke([{"role": "user", "content": router_prompt}])
73
+ section = response.content.strip().lower()
74
+
75
+ # Validate section exists
76
+ if section in available_sections:
77
+ return section
78
+ elif section == "general":
79
+ return "general"
80
+ else:
81
+ return "about" # Default fallback
82
+
83
+ except Exception as e:
84
+ print(f"Router error: {str(e)}")
85
+ return "about" # Default fallback
86
+
87
+ def chat_response(self, message, history):
88
+ """Two-tier LLM system: Router + Specialist response"""
89
+ if not message.strip():
90
+ return "Please ask me something about Legion Maria Youth Affairs!"
91
+
92
+ try:
93
+ if not self.data_content:
94
+ return "I don't have information available to answer that question."
95
+
96
+ # Step 1: Router LLM decides which section to use
97
+ selected_section = self.route_query(message)
98
+ print(f"Router selected section: {selected_section}")
99
+
100
+ # Step 2: Get relevant data based on routing decision
101
+ if selected_section == "general":
102
+ # Use all data for general queries
103
+ relevant_data = self.data_content
104
+ else:
105
+ # Use only the specific section
106
+ relevant_data = {selected_section: self.data_content.get(selected_section, {})}
107
+
108
+ # Build conversation context
109
+ conversation_context = ""
110
+ if history:
111
+ conversation_context = "Previous conversation:\n"
112
+ for user_msg, assistant_msg in history[-3:]: # Keep last 3 exchanges
113
+ conversation_context += f"User: {user_msg}\nAssistant: {assistant_msg}\n\n"
114
+ conversation_context += "Current conversation:\n"
115
+
116
+ # Step 3: Response LLM generates answer using only relevant data
117
+ response_prompt = f"""You are a concise assistant for the Legion Maria Directorate of Youth Affairs. Provide brief, direct answers.
118
+
119
+ Relevant Information:
120
+ {json.dumps(relevant_data, indent=2)}
121
+
122
+ {conversation_context}User: {message}
123
+
124
+ Guidelines:
125
+ - Keep responses SHORT (1-3 sentences maximum)
126
+ - Be direct and to the point
127
+ - Only include essential information
128
+ - No unnecessary elaboration
129
+ - If information is missing, briefly say so
130
+
131
+ Answer:"""
132
+
133
+ # Get response from specialist LLM
134
+ response = self.response_llm.invoke([{"role": "user", "content": response_prompt}])
135
+ return response.content
136
+
137
+ except Exception as e:
138
+ print(f"Error generating response: {str(e)}")
139
+ return "I'm sorry, I encountered an error while processing your request. Please try again."
140
+
141
+ def main():
142
+ assistant = LegionMariaAssistant()
143
+
144
+ # Initial greeting message
145
+ initial_greeting = [
146
+ [None, "👋 Hello! I'm your Legion Maria Youth Affairs assistant. I can help you with information about our mission, leadership, projects, and activities. What would you like to know?"]
147
+ ]
148
+
149
+ # Create mobile-optimized Gradio chat interface
150
+ with gr.Blocks(title="Legion Maria Chat", theme=gr.themes.Soft()) as demo:
151
+ gr.Markdown("# 💬 Legion Maria YA")
152
+
153
+ # Mobile-optimized chat interface
154
+ chatbot = gr.Chatbot(
155
+ value=initial_greeting,
156
+ height=400, # Reduced for mobile
157
+ show_label=False,
158
+ container=True,
159
+ bubble_full_width=True, # Better for mobile
160
+ show_share_button=False
161
+ )
162
+
163
+ # Mobile-friendly input layout
164
+ with gr.Row():
165
+ msg = gr.Textbox(
166
+ placeholder="Ask me anything...",
167
+ show_label=False,
168
+ scale=5,
169
+ container=False,
170
+ lines=1
171
+ )
172
+ send_btn = gr.Button("📤", variant="primary", scale=1, size="sm")
173
+
174
+ clear = gr.Button("🔄 New Chat", variant="secondary", size="sm")
175
+
176
+ # Chat functionality
177
+ def respond(message, history):
178
+ if message.strip():
179
+ bot_response = assistant.chat_response(message, history)
180
+ history.append([message, bot_response])
181
+ return history, ""
182
+
183
+ # Event handlers
184
+ msg.submit(respond, [msg, chatbot], [chatbot, msg])
185
+ send_btn.click(respond, [msg, chatbot], [chatbot, msg])
186
+ clear.click(lambda: initial_greeting, None, chatbot)
187
+
188
+ # Launch with better settings for chat app
189
+ demo.launch(
190
+ share=False,
191
+ server_name="0.0.0.0",
192
+ server_port=7860,
193
+ show_api=False
194
+ )
195
+
196
+ if __name__ == "__main__":
197
+ main()