ckharche commited on
Commit
93419f5
Β·
verified Β·
1 Parent(s): 3239b8a

Update llm_agent.py

Browse files
Files changed (1) hide show
  1. llm_agent.py +380 -377
llm_agent.py CHANGED
@@ -1,377 +1,380 @@
1
- """
2
- llm_agent.py
3
-
4
- FIXED VERSION:
5
- 1. Mistral 7B for Finance (NOT specialized finance model - good for conversations)
6
- 2. Phi-2 2.7B for Education (better than specialized - data quality > size)
7
- 3. 7B max per model (no huge models that fail)
8
- 4. Groq fallback with ONLY available models (mixtral deprecated, using llama)
9
- 5. No falling back immediately - models are ACTUALLY available
10
- ENHANCEMENTS:
11
- 1. Scenario-aware prompts for what-if queries
12
- 2. Proactive insights integration
13
- 3. Better formatting for comparisons
14
- 4. Unchanged: Multi-LLM strategy (HF + Groq)
15
- 5. Unchanged: All existing analytics integration
16
- """
17
-
18
- from groq import Groq
19
- from analytics import (
20
- analyze_exam_performance,
21
- calculate_budget_metrics,
22
- get_time_improvement_suggestions,
23
- generate_practice_questions,
24
- calculate_projected_improvement,
25
- parse_budget_query,
26
- apply_scenario_changes,
27
- compare_scenarios,
28
- generate_proactive_insights
29
- )
30
- import json
31
- import os
32
- from huggingface_hub import InferenceClient
33
-
34
- # HF Models - All 7B or smaller
35
- HF_MODELS = {
36
- "Finance": "mistralai/Mistral-7B-Instruct-v0.1",
37
- "Education": "microsoft/phi-2",
38
- "Family": "HuggingFaceH4/zephyr-7b-beta",
39
- "Friends": "HuggingFaceH4/zephyr-7b-beta",
40
- "Weekend/Vacation": "HuggingFaceH4/zephyr-7b-beta"
41
- }
42
-
43
- # Groq models - Available models only
44
- GROQ_MODELS = {
45
- "Finance": "llama-3.1-8b-instant",
46
- "Education": "llama-3.3-70b-versatile",
47
- "Family": "llama-3.1-8b-instant",
48
- "Friends": "llama-3.1-8b-instant",
49
- "Weekend/Vacation": "llama-3.1-8b-instant"
50
- }
51
-
52
- GROQ_API_KEY = os.getenv("GROQ_API_KEY", "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")
53
- HF_API_KEY = os.getenv("HF_API_KEY", "hf_kuaUsin8tkP44NrlwmsUWGdyb3FYb8ziZyHAU4")
54
-
55
- GENZ_PERSONA = r"""You are Aqua, a GenZ AI mentor who's like that super helpful older friend who's been through it all.
56
-
57
- PERSONALITY TRAITS:
58
- - Warm, friendly, and supportive (but not fake or overly enthusiastic)
59
- - Direct and honest (no sugarcoating, but always constructive)
60
- - Uses casual language naturally (occasional "fr", "ngl", "lowkey" - but don't overdo it)
61
- - Encouraging but realistic about challenges
62
- - Uses emojis purposefully (1-2 per response max)
63
- - Breaks down complex stuff into digestible chunks
64
-
65
- COMMUNICATION STYLE:
66
- - Keep responses concise (2-4 sentences for simple questions, more for complex ones)
67
- - Lead with the most actionable insight
68
- - Use bullet points for steps or lists (but format naturally)
69
- - Reference specific data from the user's profile when relevant
70
- - Ask follow-up questions when you need more context
71
- - Celebrate wins genuinely
72
-
73
- CRITICAL: CURRENCY FORMATTING
74
- - NEVER use bare dollar signs like $750
75
- - ALWAYS write currency as: "750 dollars" or "USD 750"
76
- - Example: "You've spent 750 dollars out of 2000 dollars"
77
-
78
- WHAT TO AVOID:
79
- - Being overly formal or corporate
80
- - Using too many emojis or exclamation marks
81
- - Giving vague advice like "try your best"
82
- - Long walls of text without structure
83
- - Ignoring the user's actual situation/data
84
-
85
- YOUR GOAL:
86
- Help this GenZ user make smart decisions about their life, finances, and education. Be the mentor they wish they had."""
87
-
88
-
89
- def get_system_prompt(profile, category, tool_results="", scenario_data=None):
90
- """Enhanced system prompt with scenario awareness"""
91
- exam_status = f"Recent exam: {profile['recent_exam']}/{profile['goal']}"
92
- budget_status = f"Spending: {profile['spend']} dollars/{profile['budget']} dollars"
93
-
94
- weak_areas = []
95
- if profile.get('math_weakness'):
96
- weak_areas.append("math")
97
- if profile.get('rushing'):
98
- weak_areas.append("time management")
99
-
100
- # Add scenario context if present
101
- scenario_context = ""
102
- if scenario_data:
103
- scenario_context = f"""
104
- SCENARIO ANALYSIS:
105
- The user is asking a 'what-if' question. Here's the comparison:
106
-
107
- Current Situation:
108
- - Budget: {profile['budget']} dollars
109
- - Spent: {profile['spend']} dollars
110
- - Remaining: {profile['budget'] - profile['spend']} dollars
111
-
112
- Scenario Result:
113
- - Budget: {scenario_data['scenario']['budget']} dollars
114
- - Spent: {scenario_data['scenario']['spent']} dollars
115
- - Remaining: {scenario_data['scenario']['remaining']} dollars
116
-
117
- Key Differences:
118
- {chr(10).join(f"β€’ {insight}" for insight in scenario_data['insights'])}
119
-
120
- IMPORTANT: Frame your response around this comparison. Be specific about the trade-offs.
121
- """
122
-
123
- context = f"""
124
- {GENZ_PERSONA}
125
-
126
- CURRENT USER CONTEXT:
127
- Name: {profile.get('name', 'User')}
128
- Category Focus: {category}
129
- {exam_status if category == 'Education' else ''}
130
- {budget_status if category == 'Finance' else ''}
131
- {'Areas needing work: ' + ', '.join(weak_areas) if weak_areas else ''}
132
- Today's goals progress: {profile.get('goals_today', {})}
133
-
134
- {scenario_context}
135
-
136
- {tool_results}
137
-
138
- IMPORTANT:
139
- 1. Reference specific numbers and data from the user's profile in your responses.
140
- 2. ALWAYS format currency as "X dollars" or "USD X" - NEVER use bare dollar signs like $X
141
- 3. If this is a scenario/what-if question, focus on the comparison and trade-offs.
142
- """
143
-
144
- return context
145
-
146
-
147
- def should_use_analytics(query, category):
148
- """Detect if analytics tools are needed"""
149
- analysis_keywords = [
150
- "analyze", "analysis", "break down", "breakdown", "how am i doing",
151
- "performance", "progress", "score", "exam", "test", "spending",
152
- "budget", "where", "what", "show me", "tell me about", "compare",
153
- "what if", "if i", "suppose", "imagine"
154
- ]
155
-
156
- return any(keyword in query.lower() for keyword in analysis_keywords)
157
-
158
-
159
- def get_tool_results(query, profile, category, scenario_profile=None):
160
- """Get analytics results, with scenario comparison if applicable"""
161
- results = ""
162
-
163
- # Check if this is a scenario query
164
- if scenario_profile:
165
- comparison = compare_scenarios(profile, scenario_profile)
166
-
167
- results += "\n\nπŸ“Š SCENARIO COMPARISON ANALYSIS\n\n"
168
- results += "Current vs Scenario:\n"
169
- results += f"β€’ Spending: {profile['spend']} dollars β†’ {scenario_profile['spend']} dollars\n"
170
- results += f"β€’ Change: {comparison['differences']['spend']:+.0f} dollars\n"
171
- results += f"β€’ Remaining: {comparison['current']['remaining']} dollars β†’ {comparison['scenario']['remaining']} dollars\n"
172
- results += f"β€’ Change: {comparison['differences']['remaining']:+.0f} dollars\n\n"
173
-
174
- results += "Key Insights:\n"
175
- for insight in comparison['insights']:
176
- results += f"β€’ {insight}\n"
177
- results += "\n"
178
-
179
- return results
180
-
181
- # Regular analytics (existing code)
182
- if category == "Education" and should_use_analytics(query, category):
183
- exam_data = analyze_exam_performance(profile)
184
- results += f"\n\nπŸ“š EDUCATION ANALYSIS\n\n"
185
- results += f"Total Score: {exam_data['total_score']}/{exam_data['goal_score']}\n"
186
- results += f"Progress: {exam_data['progress_pct']}%\n"
187
- results += f"Points Needed: {exam_data['points_needed']}\n"
188
- results += f"Weakest Subject: {exam_data['weakest_subject']} ({exam_data['score_breakdown'][exam_data['weakest_subject']]} points)\n"
189
- results += f"Strongest Subject: {exam_data['strongest_subject']} ({exam_data['score_breakdown'][exam_data['strongest_subject']]} points)\n"
190
- results += f"\nScore Breakdown:\n"
191
- for subject, score in exam_data['score_breakdown'].items():
192
- results += f" - {subject}: {score} ({exam_data['score_percentages'][subject]}%)\n"
193
- results += f"\nKey Insights:\n"
194
- for insight in exam_data['insights']:
195
- results += f" β€’ {insight}\n"
196
- results += f"\n"
197
-
198
- if any(word in query.lower() for word in ['improve', 'better', 'increase', 'boost']):
199
- suggestions = get_time_improvement_suggestions(profile)
200
- results += f"\n\nπŸ’‘ IMPROVEMENT SUGGESTIONS\n\n"
201
- for suggestion in suggestions:
202
- results += f" β€’ {suggestion}\n"
203
- results += f"\n"
204
-
205
- if any(word in query.lower() for word in ['practice', 'questions', 'quiz', 'drill']):
206
- practice_qs = generate_practice_questions(exam_data['weakest_subject'])
207
- results += f"\n\nπŸ“ PRACTICE QUESTIONS\n\n"
208
- for i, q in enumerate(practice_qs[:3], 1):
209
- results += f" {i}. {q}\n"
210
- results += f"\n"
211
-
212
- elif category == "Finance" and should_use_analytics(query, category):
213
- budget_data = calculate_budget_metrics(profile)
214
- results += f"\n\nπŸ’° FINANCE ANALYSIS\n\n"
215
- results += f"Budget: {budget_data['budget']} dollars\n"
216
- results += f"Spent: {budget_data['spent']} dollars ({budget_data['spend_percentage']}%)\n"
217
- results += f"Remaining: {budget_data['remaining']} dollars\n"
218
- results += f"Status: {'βœ… On track' if budget_data['on_track'] else '⚠️ Over target'}\n"
219
- results += f"\nSpending Breakdown:\n"
220
- for cat_name, amount in budget_data['categories'].items():
221
- pct = round((amount / budget_data['budget']) * 100, 1)
222
- results += f" - {cat_name}: {amount} dollars ({pct}%)\n"
223
- if budget_data['potential_savings']:
224
- results += f"\nPotential Savings:\n"
225
- for saving in budget_data['potential_savings']:
226
- results += f" β€’ {saving}\n"
227
- results += f"\n"
228
-
229
- return results
230
-
231
-
232
- def aqua_response_hf(query, profile, category, scenario_profile=None):
233
- """Try Hugging Face inference first"""
234
-
235
- try:
236
- hf_client = InferenceClient(api_key=HF_API_KEY)
237
-
238
- model_name = HF_MODELS.get(category, "mistralai/Mistral-7B-Instruct-v0.1")
239
- tool_results = get_tool_results(query, profile, category, scenario_profile)
240
-
241
- # Build scenario data if present
242
- scenario_data = None
243
- if scenario_profile:
244
- scenario_data = compare_scenarios(profile, scenario_profile)
245
-
246
- system_message = get_system_prompt(profile, category, tool_results, scenario_data)
247
-
248
- full_prompt = f"{system_message}\n\nUser Query: {query}"
249
-
250
- response = hf_client.text_generation(
251
- model=model_name,
252
- prompt=full_prompt,
253
- max_new_tokens=350,
254
- temperature=0.75,
255
- top_p=0.9
256
- )
257
-
258
- return response.strip()
259
-
260
- except Exception as e:
261
- print(f"HF Error: {str(e)[:100]}")
262
- return None
263
-
264
-
265
- def aqua_response_groq(query, profile, category, scenario_profile=None):
266
- """Fallback to Groq with available models only"""
267
-
268
- if not GROQ_API_KEY or GROQ_API_KEY == "gsk_xxx":
269
- return "⚠️ Groq API Key Missing! Add it to your environment."
270
-
271
- client = Groq(api_key=GROQ_API_KEY)
272
-
273
- tool_results = get_tool_results(query, profile, category, scenario_profile)
274
-
275
- # Build scenario data if present
276
- scenario_data = None
277
- if scenario_profile:
278
- scenario_data = compare_scenarios(profile, scenario_profile)
279
-
280
- system_message = get_system_prompt(profile, category, tool_results, scenario_data)
281
-
282
- model = GROQ_MODELS.get(category, "llama-3.3-70b-versatile")
283
-
284
- messages = [
285
- {"role": "system", "content": system_message},
286
- {"role": "user", "content": query}
287
- ]
288
-
289
- try:
290
- chat_completion = client.chat.completions.create(
291
- messages=messages,
292
- model=model,
293
- temperature=0.75,
294
- max_tokens=350,
295
- top_p=0.9,
296
- stop=None
297
- )
298
-
299
- response_content = chat_completion.choices[0].message.content
300
- response_content = response_content.replace('$', '\\$')
301
- return response_content.strip()
302
-
303
- except Exception as e:
304
- error_msg = str(e)[:100]
305
- print(f"Groq Error: {error_msg}")
306
- return f"😬 Something went wrong: {error_msg}... Try again?"
307
-
308
-
309
- def aqua_response(query, profile, category, scenario_profile=None):
310
- """
311
- Main response function with scenario awareness:
312
- 1. Try HF models first (area-specific, <= 7B)
313
- 2. Fallback to Groq (area-specific, available models)
314
- 3. Return error if both fail
315
-
316
- NEW: Accepts optional scenario_profile for what-if queries
317
- """
318
-
319
- # Try HF first
320
- print(f"[DEBUG] Using HF model for {category}: {HF_MODELS.get(category)}")
321
- hf_response = aqua_response_hf(query, profile, category, scenario_profile)
322
-
323
- if hf_response:
324
- return hf_response
325
-
326
- # If HF fails, use Groq with same focus-area model
327
- print(f"[DEBUG] HF failed, falling back to Groq: {GROQ_MODELS.get(category)}")
328
- groq_response = aqua_response_groq(query, profile, category, scenario_profile)
329
-
330
- return groq_response
331
-
332
-
333
- def get_contextual_welcome_message(category, profile):
334
- """Generate proactive welcome with insights"""
335
- # Get proactive insights
336
- insights = generate_proactive_insights(profile, category)
337
-
338
- base_messages = {
339
- "Finance": f"Hey! πŸ’° I see you're at {profile['spend']} dollars/{profile['budget']} dollars this month.",
340
- "Education": f"Hey! πŸ“š Last score was {profile['recent_exam']} - you need {profile['goal'] - profile['recent_exam']} more points to hit your goal.",
341
- "Family": f"Hey! πŸ‘¨πŸ‘©πŸ‘§ Family stuff can be tricky to balance. I'm here to help you stay connected while crushing your own goals.",
342
- "Friends": f"Hey! πŸ‘₯ Balancing friends and responsibilities is an art. Let's figure out how to stay social without breaking the bank.",
343
- "Weekend/Vacation": f"Hey! πŸ–οΈ Everyone needs a break. Let's plan something fun that won't wreck your budget."
344
- }
345
-
346
- base = base_messages.get(category, "Hey! πŸ‘‹ I'm Aqua, your personal mentor. What's on your mind?")
347
-
348
- # Add top insight if critical
349
- if insights and insights[0].get('type') == 'critical':
350
- base += f" {insights[0]['icon']} {insights[0]['text']}"
351
-
352
- return base
353
-
354
-
355
- def get_default_profile():
356
- return {
357
- "name": "Suzy",
358
- "recent_exam": 1200,
359
- "goal": 1600,
360
- "math_weakness": True,
361
- "rushing": True,
362
- "budget": 2000,
363
- "spend": 1576,
364
- "goals_today": {
365
- "Calories": 85,
366
- "Money": 75,
367
- "Steps": 54
368
- },
369
- "last_scores": {
370
- "Reading": 240,
371
- "Writing": 220,
372
- "Reasoning": 140,
373
- "Algebra": 100,
374
- "Geometry": 100
375
- },
376
- }
377
-
 
 
 
 
1
+ """
2
+ llm_agent.py
3
+
4
+ FIXED VERSION:
5
+ 1. Mistral 7B for Finance (NOT specialized finance model - good for conversations)
6
+ 2. Phi-2 2.7B for Education (better than specialized - data quality > size)
7
+ 3. 7B max per model (no huge models that fail)
8
+ 4. Groq fallback with ONLY available models (mixtral deprecated, using llama)
9
+ 5. No falling back immediately - models are ACTUALLY available
10
+ ENHANCEMENTS:
11
+ 1. Scenario-aware prompts for what-if queries
12
+ 2. Proactive insights integration
13
+ 3. Better formatting for comparisons
14
+ 4. Unchanged: Multi-LLM strategy (HF + Groq)
15
+ 5. Unchanged: All existing analytics integration
16
+ """
17
+
18
+ from groq import Groq
19
+ from analytics import (
20
+ analyze_exam_performance,
21
+ calculate_budget_metrics,
22
+ get_time_improvement_suggestions,
23
+ generate_practice_questions,
24
+ calculate_projected_improvement,
25
+ parse_budget_query,
26
+ apply_scenario_changes,
27
+ compare_scenarios,
28
+ generate_proactive_insights
29
+ )
30
+ import json
31
+ import os
32
+ from huggingface_hub import InferenceClient
33
+
34
+ # HF Models - All 7B or smaller
35
+ HF_MODELS = {
36
+ "Finance": "mistralai/Mistral-7B-Instruct-v0.1",
37
+ "Education": "microsoft/phi-2",
38
+ "Family": "HuggingFaceH4/zephyr-7b-beta",
39
+ "Friends": "HuggingFaceH4/zephyr-7b-beta",
40
+ "Weekend/Vacation": "HuggingFaceH4/zephyr-7b-beta"
41
+ }
42
+
43
+ # Groq models - Available models only
44
+ GROQ_MODELS = {
45
+ "Finance": "llama-3.1-8b-instant",
46
+ "Education": "llama-3.3-70b-versatile",
47
+ "Family": "llama-3.1-8b-instant",
48
+ "Friends": "llama-3.1-8b-instant",
49
+ "Weekend/Vacation": "llama-3.1-8b-instant"
50
+ }
51
+
52
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
53
+ HF_API_KEY = os.getenv("HF_API_KEY")
54
+
55
+ if not GROQ_API_KEY or not HF_API_KEY:
56
+ raise ValueError("Missing API keys in environment")
57
+
58
+ GENZ_PERSONA = r"""You are Aqua, a GenZ AI mentor who's like that super helpful older friend who's been through it all.
59
+
60
+ PERSONALITY TRAITS:
61
+ - Warm, friendly, and supportive (but not fake or overly enthusiastic)
62
+ - Direct and honest (no sugarcoating, but always constructive)
63
+ - Uses casual language naturally (occasional "fr", "ngl", "lowkey" - but don't overdo it)
64
+ - Encouraging but realistic about challenges
65
+ - Uses emojis purposefully (1-2 per response max)
66
+ - Breaks down complex stuff into digestible chunks
67
+
68
+ COMMUNICATION STYLE:
69
+ - Keep responses concise (2-4 sentences for simple questions, more for complex ones)
70
+ - Lead with the most actionable insight
71
+ - Use bullet points for steps or lists (but format naturally)
72
+ - Reference specific data from the user's profile when relevant
73
+ - Ask follow-up questions when you need more context
74
+ - Celebrate wins genuinely
75
+
76
+ CRITICAL: CURRENCY FORMATTING
77
+ - NEVER use bare dollar signs like $750
78
+ - ALWAYS write currency as: "750 dollars" or "USD 750"
79
+ - Example: "You've spent 750 dollars out of 2000 dollars"
80
+
81
+ WHAT TO AVOID:
82
+ - Being overly formal or corporate
83
+ - Using too many emojis or exclamation marks
84
+ - Giving vague advice like "try your best"
85
+ - Long walls of text without structure
86
+ - Ignoring the user's actual situation/data
87
+
88
+ YOUR GOAL:
89
+ Help this GenZ user make smart decisions about their life, finances, and education. Be the mentor they wish they had."""
90
+
91
+
92
+ def get_system_prompt(profile, category, tool_results="", scenario_data=None):
93
+ """Enhanced system prompt with scenario awareness"""
94
+ exam_status = f"Recent exam: {profile['recent_exam']}/{profile['goal']}"
95
+ budget_status = f"Spending: {profile['spend']} dollars/{profile['budget']} dollars"
96
+
97
+ weak_areas = []
98
+ if profile.get('math_weakness'):
99
+ weak_areas.append("math")
100
+ if profile.get('rushing'):
101
+ weak_areas.append("time management")
102
+
103
+ # Add scenario context if present
104
+ scenario_context = ""
105
+ if scenario_data:
106
+ scenario_context = f"""
107
+ SCENARIO ANALYSIS:
108
+ The user is asking a 'what-if' question. Here's the comparison:
109
+
110
+ Current Situation:
111
+ - Budget: {profile['budget']} dollars
112
+ - Spent: {profile['spend']} dollars
113
+ - Remaining: {profile['budget'] - profile['spend']} dollars
114
+
115
+ Scenario Result:
116
+ - Budget: {scenario_data['scenario']['budget']} dollars
117
+ - Spent: {scenario_data['scenario']['spent']} dollars
118
+ - Remaining: {scenario_data['scenario']['remaining']} dollars
119
+
120
+ Key Differences:
121
+ {chr(10).join(f"β€’ {insight}" for insight in scenario_data['insights'])}
122
+
123
+ IMPORTANT: Frame your response around this comparison. Be specific about the trade-offs.
124
+ """
125
+
126
+ context = f"""
127
+ {GENZ_PERSONA}
128
+
129
+ CURRENT USER CONTEXT:
130
+ Name: {profile.get('name', 'User')}
131
+ Category Focus: {category}
132
+ {exam_status if category == 'Education' else ''}
133
+ {budget_status if category == 'Finance' else ''}
134
+ {'Areas needing work: ' + ', '.join(weak_areas) if weak_areas else ''}
135
+ Today's goals progress: {profile.get('goals_today', {})}
136
+
137
+ {scenario_context}
138
+
139
+ {tool_results}
140
+
141
+ IMPORTANT:
142
+ 1. Reference specific numbers and data from the user's profile in your responses.
143
+ 2. ALWAYS format currency as "X dollars" or "USD X" - NEVER use bare dollar signs like $X
144
+ 3. If this is a scenario/what-if question, focus on the comparison and trade-offs.
145
+ """
146
+
147
+ return context
148
+
149
+
150
+ def should_use_analytics(query, category):
151
+ """Detect if analytics tools are needed"""
152
+ analysis_keywords = [
153
+ "analyze", "analysis", "break down", "breakdown", "how am i doing",
154
+ "performance", "progress", "score", "exam", "test", "spending",
155
+ "budget", "where", "what", "show me", "tell me about", "compare",
156
+ "what if", "if i", "suppose", "imagine"
157
+ ]
158
+
159
+ return any(keyword in query.lower() for keyword in analysis_keywords)
160
+
161
+
162
+ def get_tool_results(query, profile, category, scenario_profile=None):
163
+ """Get analytics results, with scenario comparison if applicable"""
164
+ results = ""
165
+
166
+ # Check if this is a scenario query
167
+ if scenario_profile:
168
+ comparison = compare_scenarios(profile, scenario_profile)
169
+
170
+ results += "\n\nπŸ“Š SCENARIO COMPARISON ANALYSIS\n\n"
171
+ results += "Current vs Scenario:\n"
172
+ results += f"β€’ Spending: {profile['spend']} dollars β†’ {scenario_profile['spend']} dollars\n"
173
+ results += f"β€’ Change: {comparison['differences']['spend']:+.0f} dollars\n"
174
+ results += f"β€’ Remaining: {comparison['current']['remaining']} dollars β†’ {comparison['scenario']['remaining']} dollars\n"
175
+ results += f"β€’ Change: {comparison['differences']['remaining']:+.0f} dollars\n\n"
176
+
177
+ results += "Key Insights:\n"
178
+ for insight in comparison['insights']:
179
+ results += f"β€’ {insight}\n"
180
+ results += "\n"
181
+
182
+ return results
183
+
184
+ # Regular analytics (existing code)
185
+ if category == "Education" and should_use_analytics(query, category):
186
+ exam_data = analyze_exam_performance(profile)
187
+ results += f"\n\nπŸ“š EDUCATION ANALYSIS\n\n"
188
+ results += f"Total Score: {exam_data['total_score']}/{exam_data['goal_score']}\n"
189
+ results += f"Progress: {exam_data['progress_pct']}%\n"
190
+ results += f"Points Needed: {exam_data['points_needed']}\n"
191
+ results += f"Weakest Subject: {exam_data['weakest_subject']} ({exam_data['score_breakdown'][exam_data['weakest_subject']]} points)\n"
192
+ results += f"Strongest Subject: {exam_data['strongest_subject']} ({exam_data['score_breakdown'][exam_data['strongest_subject']]} points)\n"
193
+ results += f"\nScore Breakdown:\n"
194
+ for subject, score in exam_data['score_breakdown'].items():
195
+ results += f" - {subject}: {score} ({exam_data['score_percentages'][subject]}%)\n"
196
+ results += f"\nKey Insights:\n"
197
+ for insight in exam_data['insights']:
198
+ results += f" β€’ {insight}\n"
199
+ results += f"\n"
200
+
201
+ if any(word in query.lower() for word in ['improve', 'better', 'increase', 'boost']):
202
+ suggestions = get_time_improvement_suggestions(profile)
203
+ results += f"\n\nπŸ’‘ IMPROVEMENT SUGGESTIONS\n\n"
204
+ for suggestion in suggestions:
205
+ results += f" β€’ {suggestion}\n"
206
+ results += f"\n"
207
+
208
+ if any(word in query.lower() for word in ['practice', 'questions', 'quiz', 'drill']):
209
+ practice_qs = generate_practice_questions(exam_data['weakest_subject'])
210
+ results += f"\n\nπŸ“ PRACTICE QUESTIONS\n\n"
211
+ for i, q in enumerate(practice_qs[:3], 1):
212
+ results += f" {i}. {q}\n"
213
+ results += f"\n"
214
+
215
+ elif category == "Finance" and should_use_analytics(query, category):
216
+ budget_data = calculate_budget_metrics(profile)
217
+ results += f"\n\nπŸ’° FINANCE ANALYSIS\n\n"
218
+ results += f"Budget: {budget_data['budget']} dollars\n"
219
+ results += f"Spent: {budget_data['spent']} dollars ({budget_data['spend_percentage']}%)\n"
220
+ results += f"Remaining: {budget_data['remaining']} dollars\n"
221
+ results += f"Status: {'βœ… On track' if budget_data['on_track'] else '⚠️ Over target'}\n"
222
+ results += f"\nSpending Breakdown:\n"
223
+ for cat_name, amount in budget_data['categories'].items():
224
+ pct = round((amount / budget_data['budget']) * 100, 1)
225
+ results += f" - {cat_name}: {amount} dollars ({pct}%)\n"
226
+ if budget_data['potential_savings']:
227
+ results += f"\nPotential Savings:\n"
228
+ for saving in budget_data['potential_savings']:
229
+ results += f" β€’ {saving}\n"
230
+ results += f"\n"
231
+
232
+ return results
233
+
234
+
235
+ def aqua_response_hf(query, profile, category, scenario_profile=None):
236
+ """Try Hugging Face inference first"""
237
+
238
+ try:
239
+ hf_client = InferenceClient(api_key=HF_API_KEY)
240
+
241
+ model_name = HF_MODELS.get(category, "mistralai/Mistral-7B-Instruct-v0.1")
242
+ tool_results = get_tool_results(query, profile, category, scenario_profile)
243
+
244
+ # Build scenario data if present
245
+ scenario_data = None
246
+ if scenario_profile:
247
+ scenario_data = compare_scenarios(profile, scenario_profile)
248
+
249
+ system_message = get_system_prompt(profile, category, tool_results, scenario_data)
250
+
251
+ full_prompt = f"{system_message}\n\nUser Query: {query}"
252
+
253
+ response = hf_client.text_generation(
254
+ model=model_name,
255
+ prompt=full_prompt,
256
+ max_new_tokens=350,
257
+ temperature=0.75,
258
+ top_p=0.9
259
+ )
260
+
261
+ return response.strip()
262
+
263
+ except Exception as e:
264
+ print(f"HF Error: {str(e)[:100]}")
265
+ return None
266
+
267
+
268
+ def aqua_response_groq(query, profile, category, scenario_profile=None):
269
+ """Fallback to Groq with available models only"""
270
+
271
+ if not GROQ_API_KEY or GROQ_API_KEY == "gsk_xxx":
272
+ return "⚠️ Groq API Key Missing! Add it to your environment."
273
+
274
+ client = Groq(api_key=GROQ_API_KEY)
275
+
276
+ tool_results = get_tool_results(query, profile, category, scenario_profile)
277
+
278
+ # Build scenario data if present
279
+ scenario_data = None
280
+ if scenario_profile:
281
+ scenario_data = compare_scenarios(profile, scenario_profile)
282
+
283
+ system_message = get_system_prompt(profile, category, tool_results, scenario_data)
284
+
285
+ model = GROQ_MODELS.get(category, "llama-3.3-70b-versatile")
286
+
287
+ messages = [
288
+ {"role": "system", "content": system_message},
289
+ {"role": "user", "content": query}
290
+ ]
291
+
292
+ try:
293
+ chat_completion = client.chat.completions.create(
294
+ messages=messages,
295
+ model=model,
296
+ temperature=0.75,
297
+ max_tokens=350,
298
+ top_p=0.9,
299
+ stop=None
300
+ )
301
+
302
+ response_content = chat_completion.choices[0].message.content
303
+ response_content = response_content.replace('$', '\\$')
304
+ return response_content.strip()
305
+
306
+ except Exception as e:
307
+ error_msg = str(e)[:100]
308
+ print(f"Groq Error: {error_msg}")
309
+ return f"😬 Something went wrong: {error_msg}... Try again?"
310
+
311
+
312
+ def aqua_response(query, profile, category, scenario_profile=None):
313
+ """
314
+ Main response function with scenario awareness:
315
+ 1. Try HF models first (area-specific, <= 7B)
316
+ 2. Fallback to Groq (area-specific, available models)
317
+ 3. Return error if both fail
318
+
319
+ NEW: Accepts optional scenario_profile for what-if queries
320
+ """
321
+
322
+ # Try HF first
323
+ print(f"[DEBUG] Using HF model for {category}: {HF_MODELS.get(category)}")
324
+ hf_response = aqua_response_hf(query, profile, category, scenario_profile)
325
+
326
+ if hf_response:
327
+ return hf_response
328
+
329
+ # If HF fails, use Groq with same focus-area model
330
+ print(f"[DEBUG] HF failed, falling back to Groq: {GROQ_MODELS.get(category)}")
331
+ groq_response = aqua_response_groq(query, profile, category, scenario_profile)
332
+
333
+ return groq_response
334
+
335
+
336
+ def get_contextual_welcome_message(category, profile):
337
+ """Generate proactive welcome with insights"""
338
+ # Get proactive insights
339
+ insights = generate_proactive_insights(profile, category)
340
+
341
+ base_messages = {
342
+ "Finance": f"Hey! πŸ’° I see you're at {profile['spend']} dollars/{profile['budget']} dollars this month.",
343
+ "Education": f"Hey! πŸ“š Last score was {profile['recent_exam']} - you need {profile['goal'] - profile['recent_exam']} more points to hit your goal.",
344
+ "Family": f"Hey! πŸ‘¨πŸ‘©πŸ‘§ Family stuff can be tricky to balance. I'm here to help you stay connected while crushing your own goals.",
345
+ "Friends": f"Hey! πŸ‘₯ Balancing friends and responsibilities is an art. Let's figure out how to stay social without breaking the bank.",
346
+ "Weekend/Vacation": f"Hey! πŸ–οΈ Everyone needs a break. Let's plan something fun that won't wreck your budget."
347
+ }
348
+
349
+ base = base_messages.get(category, "Hey! πŸ‘‹ I'm Aqua, your personal mentor. What's on your mind?")
350
+
351
+ # Add top insight if critical
352
+ if insights and insights[0].get('type') == 'critical':
353
+ base += f" {insights[0]['icon']} {insights[0]['text']}"
354
+
355
+ return base
356
+
357
+
358
+ def get_default_profile():
359
+ return {
360
+ "name": "Suzy",
361
+ "recent_exam": 1200,
362
+ "goal": 1600,
363
+ "math_weakness": True,
364
+ "rushing": True,
365
+ "budget": 2000,
366
+ "spend": 1576,
367
+ "goals_today": {
368
+ "Calories": 85,
369
+ "Money": 75,
370
+ "Steps": 54
371
+ },
372
+ "last_scores": {
373
+ "Reading": 240,
374
+ "Writing": 220,
375
+ "Reasoning": 140,
376
+ "Algebra": 100,
377
+ "Geometry": 100
378
+ },
379
+ }
380
+