shreejan4603 commited on
Commit
8a8a771
Β·
verified Β·
1 Parent(s): 9f950d0

Upload 8 files

Browse files
src/config.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ class Config:
7
+ # API Keys
8
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
+ OLLAMA_BASE_URL = "http://localhost:11434/api/generate"
10
+ GRANITE_MODEL = "granite3.3:8b"
11
+ # Watson Configuration
12
+ WATSON_API_KEY = "fPuHCEVgnNbPf5A2fSDXA9kDwppGdrUye7Fmq3DTe9vv"
13
+ WATSON_URL = "https://eu-de.ml.cloud.ibm.com"
14
+ WATSON_PROJECT_ID = "71f0101e-62d9-4532-bab8-d784ce8fb5c3"
15
+
16
+ # AI Models - Updated to use Watson Granite model
17
+ GROQ_MODEL = "qwen/qwen3-32b"
18
+ GRANITE_MODEL = "ibm/granite-3-3-8b-instruct"
19
+
20
+ # MongoDB Configuration
21
+ MONGODB_URI = "mongodb://localhost:27017/"
22
+ DATABASE_NAME = "wellness_platform"
23
+
24
+ # Collections
25
+ USERS_COLLECTION = "users"
26
+ CONVERSATIONS_COLLECTION = "conversations"
27
+ TASKS_COLLECTION = "tasks"
28
+ REWARDS_COLLECTION = "rewards"
29
+
30
+ # Task Reward Values
31
+ TASK_REWARDS = {
32
+ "meditation": 15,
33
+ "exercise": 20,
34
+ "sleep_schedule": 10,
35
+ "social_connection": 12,
36
+ "journaling": 8,
37
+ "breathing_exercise": 10,
38
+ "nature_walk": 15,
39
+ "healthy_meal": 12,
40
+ "screen_break": 5,
41
+ "gratitude_practice": 8,
42
+ "professional_help": 25
43
+ }
src/data_simulator.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ import random
4
+ import threading
5
+ from datetime import datetime
6
+ from typing import Dict, Optional
7
+ import numpy as np
8
+
9
+ class FitnessDataSimulator:
10
+ def __init__(self, config):
11
+ self.config = config
12
+ self.running = False
13
+ self.thread = None
14
+
15
+ # Base patterns for realistic simulation
16
+ self.sleep_pattern = {"base": 7.5, "variance": 1.5}
17
+ self.steps_pattern = {"base": 8000, "variance": 3000}
18
+ self.mood_pattern = {"base": 7, "variance": 2}
19
+ self.calories_pattern = {"base": 400, "variance": 200}
20
+ self.water_pattern = {"base": 6, "variance": 2}
21
+
22
+ # Daily progression
23
+ self.daily_progression = 0
24
+
25
+ def generate_realistic_data(self) -> Dict:
26
+ """Generate realistic fitness data with daily patterns"""
27
+ hour = datetime.now().hour
28
+
29
+ # Adjust patterns based on time of day
30
+ steps_multiplier = self._get_activity_multiplier(hour)
31
+ mood_adjustment = self._get_mood_adjustment(hour)
32
+
33
+ data = {
34
+ "timestamp": datetime.now().isoformat(),
35
+ "sleep_hours": max(4, min(12,
36
+ np.random.normal(self.sleep_pattern["base"], self.sleep_pattern["variance"])
37
+ )),
38
+ "steps": max(0, int(
39
+ np.random.normal(self.steps_pattern["base"] * steps_multiplier, self.steps_pattern["variance"])
40
+ )),
41
+ "mood_score": max(1, min(10,
42
+ np.random.normal(self.mood_pattern["base"] + mood_adjustment, self.mood_pattern["variance"])
43
+ )),
44
+ "calories_burned": max(0, int(
45
+ np.random.normal(self.calories_pattern["base"] * steps_multiplier, self.calories_pattern["variance"])
46
+ )),
47
+ "water_intake": max(0,
48
+ np.random.normal(self.water_pattern["base"], self.water_pattern["variance"])
49
+ ),
50
+ "heart_rate": random.randint(60, 100),
51
+ "active_minutes": random.randint(20, 120)
52
+ }
53
+
54
+ return data
55
+
56
+ def _get_activity_multiplier(self, hour: int) -> float:
57
+ """Get activity multiplier based on hour of day"""
58
+ if 6 <= hour <= 9: # Morning
59
+ return 1.2
60
+ elif 12 <= hour <= 14: # Lunch
61
+ return 1.1
62
+ elif 17 <= hour <= 19: # Evening
63
+ return 1.3
64
+ elif 22 <= hour or hour <= 5: # Night
65
+ return 0.3
66
+ else:
67
+ return 1.0
68
+
69
+ def _get_mood_adjustment(self, hour: int) -> float:
70
+ """Get mood adjustment based on hour of day"""
71
+ if 7 <= hour <= 11: # Morning
72
+ return 0.5
73
+ elif 14 <= hour <= 16: # Afternoon dip
74
+ return -0.3
75
+ elif 18 <= hour <= 21: # Evening
76
+ return 0.3
77
+ else:
78
+ return 0
79
+
80
+ def save_data(self, data: Dict):
81
+ """Save data to JSON file"""
82
+ try:
83
+ with open(self.config.FIT_STREAM_FILE, 'w') as f:
84
+ json.dump(data, f, indent=2)
85
+ except Exception as e:
86
+ print(f"Error saving data: {e}")
87
+
88
+ def _simulate_loop(self):
89
+ """Main simulation loop"""
90
+ while self.running:
91
+ try:
92
+ data = self.generate_realistic_data()
93
+ self.save_data(data)
94
+ time.sleep(5) # Update every 5 seconds
95
+ except Exception as e:
96
+ print(f"Simulation error: {e}")
97
+ time.sleep(5)
98
+
99
+ def start_simulation(self):
100
+ """Start the data simulation"""
101
+ if not self.running:
102
+ self.running = True
103
+ self.thread = threading.Thread(target=self._simulate_loop, daemon=True)
104
+ self.thread.start()
105
+ print("βœ… Fitness data simulation started")
106
+
107
+ def stop_simulation(self):
108
+ """Stop the data simulation"""
109
+ self.running = False
110
+ if self.thread:
111
+ self.thread.join()
112
+ print("πŸ›‘ Fitness data simulation stopped")
113
+
114
+ def get_latest_data(self) -> Optional[Dict]:
115
+ """Get the latest fitness data"""
116
+ try:
117
+ with open(self.config.FIT_STREAM_FILE, 'r') as f:
118
+ return json.load(f)
119
+ except FileNotFoundError:
120
+ return None
121
+ except Exception as e:
122
+ print(f"Error reading data: {e}")
123
+ return None
src/database_manager.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pymongo
2
+ from datetime import datetime
3
+ import json
4
+ import uuid
5
+ from typing import Dict, List, Optional
6
+
7
+ class DatabaseManager:
8
+ def __init__(self, config):
9
+ self.config = config
10
+ self.client = pymongo.MongoClient(config.MONGODB_URI)
11
+ self.db = self.client[config.DATABASE_NAME]
12
+ self._init_collections()
13
+
14
+ def _init_collections(self):
15
+ """Initialize collections with indexes"""
16
+ self.db[self.config.USERS_COLLECTION].create_index("user_id", unique=True)
17
+ self.db[self.config.CONVERSATIONS_COLLECTION].create_index([("user_id", 1), ("timestamp", -1)])
18
+ self.db[self.config.TASKS_COLLECTION].create_index([("user_id", 1), ("created_at", -1)])
19
+ self.db[self.config.REWARDS_COLLECTION].create_index([("user_id", 1), ("timestamp", -1)])
20
+
21
+ def save_user_profile(self, user_data: Dict) -> bool:
22
+ """Save or update user profile"""
23
+ try:
24
+ user_data["last_updated"] = datetime.now()
25
+ if "coins" not in user_data:
26
+ user_data["coins"] = 0
27
+ if "total_coins_earned" not in user_data:
28
+ user_data["total_coins_earned"] = 0
29
+
30
+ result = self.db[self.config.USERS_COLLECTION].update_one(
31
+ {"user_id": user_data["user_id"]},
32
+ {"$set": user_data},
33
+ upsert=True
34
+ )
35
+ return True
36
+ except Exception as e:
37
+ print(f"Error saving user profile: {e}")
38
+ return False
39
+
40
+ def get_user_profile(self, user_id: str) -> Optional[Dict]:
41
+ """Get user profile"""
42
+ return self.db[self.config.USERS_COLLECTION].find_one({"user_id": user_id})
43
+
44
+ def save_conversation(self, user_id: str, conversation_data: Dict) -> bool:
45
+ """Save conversation data"""
46
+ try:
47
+ conversation_data.update({
48
+ "user_id": user_id,
49
+ "timestamp": datetime.now(),
50
+ "conversation_id": str(uuid.uuid4())
51
+ })
52
+ self.db[self.config.CONVERSATIONS_COLLECTION].insert_one(conversation_data)
53
+ return True
54
+ except Exception as e:
55
+ print(f"Error saving conversation: {e}")
56
+ return False
57
+
58
+ def save_task(self, user_id: str, task_data: Dict) -> str:
59
+ """Save assigned task and return task ID"""
60
+ try:
61
+ task_id = str(uuid.uuid4())
62
+ task_data.update({
63
+ "user_id": user_id,
64
+ "task_id": task_id,
65
+ "created_at": datetime.now(),
66
+ "status": "pending",
67
+ "completed_at": None
68
+ })
69
+ self.db[self.config.TASKS_COLLECTION].insert_one(task_data)
70
+ return task_id
71
+ except Exception as e:
72
+ print(f"Error saving task: {e}")
73
+ return None
74
+
75
+ def get_user_tasks(self, user_id: str, status: str = None) -> List[Dict]:
76
+ """Get user tasks, optionally filtered by status"""
77
+ query = {"user_id": user_id}
78
+ if status:
79
+ query["status"] = status
80
+
81
+ cursor = self.db[self.config.TASKS_COLLECTION].find(query).sort("created_at", -1)
82
+ return list(cursor)
83
+
84
+ def complete_task(self, task_id: str, completion_data: Dict = None) -> bool:
85
+ """Mark task as completed"""
86
+ try:
87
+ update_data = {
88
+ "status": "completed",
89
+ "completed_at": datetime.now()
90
+ }
91
+ if completion_data:
92
+ update_data["completion_data"] = completion_data
93
+
94
+ result = self.db[self.config.TASKS_COLLECTION].update_one(
95
+ {"task_id": task_id},
96
+ {"$set": update_data}
97
+ )
98
+ return result.modified_count > 0
99
+ except Exception as e:
100
+ print(f"Error completing task: {e}")
101
+ return False
102
+
103
+ def update_user_coins(self, user_id: str, coins_to_add: int, reward_type: str) -> bool:
104
+ """Update user coins and log reward"""
105
+ try:
106
+ # Update user coins
107
+ user_result = self.db[self.config.USERS_COLLECTION].update_one(
108
+ {"user_id": user_id},
109
+ {
110
+ "$inc": {
111
+ "coins": coins_to_add,
112
+ "total_coins_earned": coins_to_add
113
+ }
114
+ }
115
+ )
116
+
117
+ if user_result.modified_count > 0:
118
+ # Log reward
119
+ reward_data = {
120
+ "user_id": user_id,
121
+ "reward_type": reward_type,
122
+ "coins": coins_to_add,
123
+ "timestamp": datetime.now()
124
+ }
125
+ self.db[self.config.REWARDS_COLLECTION].insert_one(reward_data)
126
+ return True
127
+ return False
128
+ except Exception as e:
129
+ print(f"Error updating coins: {e}")
130
+ return False
src/granite_agent.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import time
4
+ from typing import Dict, List, Optional, Tuple
5
+
6
+ class GraniteAgent:
7
+ def __init__(self, config):
8
+ self.config = config
9
+ self.base_url = config.OLLAMA_BASE_URL
10
+ self.model = config.GRANITE_MODEL
11
+ self.max_retries = 3
12
+ self.retry_delay = 2 # seconds
13
+
14
+ def assign_wellness_tasks(self, user_profile: Dict, mental_health_assessment: str, risk_level: int) -> List[Dict]:
15
+ """Assign wellness tasks based on mental health assessment using Granite AI"""
16
+
17
+ # First attempt: Comprehensive AI generation
18
+ tasks = self._generate_ai_tasks(user_profile, mental_health_assessment, risk_level)
19
+
20
+ if tasks:
21
+ return tasks
22
+
23
+ # Second attempt: Simplified AI generation with retry
24
+ tasks = self._generate_simplified_tasks(user_profile, risk_level)
25
+
26
+ if tasks:
27
+ return tasks
28
+
29
+ # Final fallback: Basic AI generation with minimal context
30
+ tasks = self._generate_basic_tasks(risk_level)
31
+
32
+ if tasks:
33
+ return tasks
34
+
35
+ # Emergency fallback: Only if AI is completely unavailable
36
+ print("WARNING: Granite AI completely unavailable, using emergency preset tasks")
37
+ return self._get_emergency_preset_tasks(risk_level)
38
+
39
+ def _generate_ai_tasks(self, user_profile: Dict, mental_health_assessment: str, risk_level: int) -> Optional[List[Dict]]:
40
+ """Primary method: Generate comprehensive personalized tasks using Granite AI"""
41
+
42
+ prompt = self._build_comprehensive_prompt(user_profile, mental_health_assessment, risk_level)
43
+
44
+ for attempt in range(self.max_retries):
45
+ try:
46
+ response = self._call_granite_api(prompt, temperature=0.5)
47
+
48
+ if response:
49
+ tasks = self._extract_and_validate_tasks(response, risk_level)
50
+ if tasks and len(tasks) >= 3: # Ensure we have enough tasks
51
+ print(f"βœ“ Generated {len(tasks)} comprehensive AI tasks")
52
+ return tasks
53
+
54
+ except Exception as e:
55
+ print(f"Attempt {attempt + 1} failed for comprehensive generation: {e}")
56
+ if attempt < self.max_retries - 1:
57
+ time.sleep(self.retry_delay)
58
+
59
+ return None
60
+
61
+ def _generate_simplified_tasks(self, user_profile: Dict, risk_level: int) -> Optional[List[Dict]]:
62
+ """Fallback method: Generate simplified tasks with reduced context"""
63
+
64
+ prompt = self._build_simplified_prompt(user_profile, risk_level)
65
+
66
+ for attempt in range(self.max_retries):
67
+ try:
68
+ response = self._call_granite_api(prompt, temperature=0.4)
69
+
70
+ if response:
71
+ tasks = self._extract_and_validate_tasks(response, risk_level)
72
+ if tasks and len(tasks) >= 2:
73
+ print(f"βœ“ Generated {len(tasks)} simplified AI tasks")
74
+ return tasks
75
+
76
+ except Exception as e:
77
+ print(f"Attempt {attempt + 1} failed for simplified generation: {e}")
78
+ if attempt < self.max_retries - 1:
79
+ time.sleep(self.retry_delay)
80
+
81
+ return None
82
+
83
+ def _generate_basic_tasks(self, risk_level: int) -> Optional[List[Dict]]:
84
+ """Secondary fallback: Generate basic tasks with minimal context"""
85
+
86
+ prompt = self._build_basic_prompt(risk_level)
87
+
88
+ for attempt in range(self.max_retries):
89
+ try:
90
+ response = self._call_granite_api(prompt, temperature=0.3)
91
+
92
+ if response:
93
+ tasks = self._extract_and_validate_tasks(response, risk_level, min_tasks=1)
94
+ if tasks:
95
+ print(f"βœ“ Generated {len(tasks)} basic AI tasks")
96
+ return tasks
97
+
98
+ except Exception as e:
99
+ print(f"Attempt {attempt + 1} failed for basic generation: {e}")
100
+ if attempt < self.max_retries - 1:
101
+ time.sleep(self.retry_delay)
102
+
103
+ return None
104
+
105
+ def _build_comprehensive_prompt(self, user_profile: Dict, mental_health_assessment: str, risk_level: int) -> str:
106
+ """Build detailed prompt for comprehensive task generation"""
107
+
108
+ return f"""
109
+ You are a professional wellness coach. Create personalized wellness tasks based on user data.
110
+
111
+ GUIDELINES:
112
+ - Use professional, clear language
113
+ - Focus on evidence-based wellness practices
114
+ - Ensure all recommendations are safe and appropriate
115
+ - Use inclusive, respectful language
116
+
117
+ USER CONTEXT:
118
+ Risk Level: {risk_level}/10 (10 = highest risk)
119
+ Assessment: {mental_health_assessment}
120
+
121
+ USER PROFILE:
122
+ - Stress Level: {user_profile.get('Stress_Level', 'Not specified')}
123
+ - Sleep: {user_profile.get('Sleep_Hours', 'Not specified')} hours/night, quality: {user_profile.get('Sleep_Quality', 'Not specified')}
124
+ - Work: {user_profile.get('Work_Hours', 'Not specified')} hours/week
125
+ - Exercise: {user_profile.get('Physical_Activity_Hours', 'Not specified')} hours/week
126
+ - Occupation: {user_profile.get('Occupation', 'Not specified')}
127
+ - Age: {user_profile.get('Age', 'Not specified')}
128
+ - Mood: {user_profile.get('Mood', 'Not specified')}
129
+ - Anxiety: {user_profile.get('Anxiety_Frequency', 'Not specified')}
130
+ - Energy: {user_profile.get('Energy_Level', 'Not specified')}
131
+
132
+ TASK REQUIREMENTS:
133
+ {self._get_risk_specific_requirements(risk_level)}
134
+
135
+ Generate 4-6 personalized wellness tasks. Each task must be:
136
+ 1. Relevant to their specific situation
137
+ 2. Practical and achievable
138
+ 3. Evidence-based for mental health improvement
139
+ 4. Appropriate for their risk level
140
+ 5. Include clear, actionable instructions
141
+
142
+ MANDATORY JSON FORMAT (return ONLY valid JSON):
143
+ [
144
+ {{
145
+ "task_type": "select from: meditation, exercise, sleep_schedule, social_connection, journaling, breathing_exercise, nature_walk, healthy_meal, screen_break, gratitude_practice, professional_help, mindfulness, stress_management, routine_building, creative_activity, relaxation_technique",
146
+ "title": "Clear, engaging title",
147
+ "description": "Brief description explaining the benefits",
148
+ "duration_days": appropriate_number,
149
+ "difficulty": "easy/medium/hard",
150
+ "instructions": "Step-by-step instructions tailored to their profile",
151
+ "completion_criteria": "Clear, measurable success criteria",
152
+ "personalization_notes": "Why this task fits their specific situation"
153
+ }}
154
+ ]
155
+ """
156
+
157
+ def _build_simplified_prompt(self, user_profile: Dict, risk_level: int) -> str:
158
+ """Build simplified prompt when comprehensive generation fails"""
159
+
160
+ key_factors = []
161
+ if user_profile.get('Stress_Level'):
162
+ key_factors.append(f"Stress: {user_profile['Stress_Level']}")
163
+ if user_profile.get('Sleep_Hours'):
164
+ key_factors.append(f"Sleep: {user_profile['Sleep_Hours']}h")
165
+ if user_profile.get('Work_Hours'):
166
+ key_factors.append(f"Work: {user_profile['Work_Hours']}h/week")
167
+
168
+ factors_str = ", ".join(key_factors) if key_factors else "Limited profile data"
169
+
170
+ return f"""
171
+ You are a wellness coach. Create practical wellness tasks.
172
+
173
+ USER: {factors_str}
174
+ Risk Level: {risk_level}/10
175
+
176
+ {self._get_risk_specific_requirements(risk_level)}
177
+
178
+ Generate 3-4 practical wellness tasks as JSON array:
179
+ [
180
+ {{
181
+ "task_type": "meditation|exercise|sleep_schedule|journaling|breathing_exercise|professional_help|stress_management",
182
+ "title": "Clear task title",
183
+ "description": "Brief helpful description",
184
+ "duration_days": number,
185
+ "difficulty": "easy|medium|hard",
186
+ "instructions": "Step-by-step instructions",
187
+ "completion_criteria": "How to measure success"
188
+ }}
189
+ ]
190
+ """
191
+
192
+ def _build_basic_prompt(self, risk_level: int) -> str:
193
+ """Build minimal prompt for basic task generation"""
194
+
195
+ return f"""
196
+ You are a wellness coach. Generate wellness tasks for risk level {risk_level}/10.
197
+
198
+ {self._get_risk_specific_requirements(risk_level)}
199
+
200
+ Return 2-3 tasks as JSON:
201
+ [
202
+ {{
203
+ "task_type": "breathing_exercise|meditation|professional_help|journaling",
204
+ "title": "Task title",
205
+ "description": "What this helps with",
206
+ "duration_days": 1-7,
207
+ "difficulty": "easy|medium",
208
+ "instructions": "Clear instructions",
209
+ "completion_criteria": "Success measure"
210
+ }}
211
+ ]
212
+ """
213
+
214
+ def _get_risk_specific_requirements(self, risk_level: int) -> str:
215
+ """Get specific requirements based on risk level"""
216
+
217
+ if risk_level >= 8:
218
+ return """
219
+ CRITICAL PRIORITY:
220
+ - MUST include immediate professional help seeking
221
+ - Focus on crisis intervention and safety
222
+ - Include emergency resources and contacts
223
+ - Tasks should provide immediate coping mechanisms
224
+ - Maximum task duration: 1-2 days
225
+ """
226
+ elif risk_level >= 6:
227
+ return """
228
+ HIGH PRIORITY:
229
+ - Strongly recommend professional consultation within 1 week
230
+ - Include daily anxiety/stress management techniques
231
+ - Focus on stabilization and routine building
232
+ - Provide structured, manageable activities
233
+ - Task duration: 2-7 days
234
+ """
235
+ elif risk_level >= 4:
236
+ return """
237
+ MODERATE PRIORITY:
238
+ - Include both self-care and gradual improvement activities
239
+ - Balance mental and physical wellness approaches
240
+ - Encourage social connection and support
241
+ - Build sustainable, healthy habits
242
+ - Task duration: 5-14 days
243
+ """
244
+ else:
245
+ return """
246
+ MAINTENANCE/PREVENTION:
247
+ - Focus on wellness enhancement and prevention
248
+ - Include enjoyable, engaging activities
249
+ - Support long-term habit building
250
+ - Promote overall life satisfaction
251
+ - Task duration: 7-21 days
252
+ """
253
+
254
+ def _extract_and_validate_tasks(self, response: str, risk_level: int, min_tasks: int = 2) -> Optional[List[Dict]]:
255
+ """Extract and validate tasks from AI response"""
256
+
257
+ try:
258
+ # Clean the response
259
+ cleaned_response = self._clean_json_string(response)
260
+ if not cleaned_response:
261
+ return None
262
+
263
+ # Try to parse JSON
264
+ try:
265
+ tasks = json.loads(cleaned_response)
266
+ except json.JSONDecodeError:
267
+ # Try to extract individual task objects
268
+ tasks = self._extract_individual_tasks(response)
269
+ if not tasks:
270
+ return None
271
+
272
+ # Validate task structure
273
+ validated_tasks = self._validate_task_structure(tasks, risk_level)
274
+
275
+ if validated_tasks and len(validated_tasks) >= min_tasks:
276
+ return validated_tasks
277
+
278
+ except Exception as e:
279
+ print(f"Error extracting tasks: {e}")
280
+
281
+ return None
282
+
283
+ def _clean_json_string(self, json_str: str) -> Optional[str]:
284
+ """Clean malformed JSON string"""
285
+ try:
286
+ # Remove common issues
287
+ cleaned = json_str.strip()
288
+
289
+ # Find JSON array boundaries
290
+ start_idx = cleaned.find('[')
291
+ end_idx = cleaned.rfind(']')
292
+
293
+ if start_idx == -1 or end_idx == -1:
294
+ return None
295
+
296
+ cleaned = cleaned[start_idx:end_idx + 1]
297
+
298
+ # Remove newlines and extra whitespace
299
+ cleaned = cleaned.replace('\n', ' ').replace('\r', '')
300
+ cleaned = ' '.join(cleaned.split())
301
+
302
+ # Fix common JSON issues
303
+ cleaned = cleaned.replace(',}', '}') # Remove trailing commas
304
+ cleaned = cleaned.replace(',]', ']')
305
+
306
+ return cleaned
307
+
308
+ except Exception:
309
+ return None
310
+
311
+ def _extract_individual_tasks(self, response: str) -> Optional[List[Dict]]:
312
+ """Extract individual task objects when array parsing fails"""
313
+ tasks = []
314
+
315
+ # Find individual task objects
316
+ import re
317
+ task_pattern = r'\{[^{}]*"task_type"[^{}]*\}'
318
+ matches = re.findall(task_pattern, response, re.DOTALL)
319
+
320
+ for match in matches:
321
+ try:
322
+ # Clean the match
323
+ clean_match = match.replace('\n', ' ').replace('\r', '')
324
+ clean_match = ' '.join(clean_match.split())
325
+ clean_match = clean_match.replace(',}', '}')
326
+
327
+ task = json.loads(clean_match)
328
+ tasks.append(task)
329
+ except:
330
+ continue
331
+
332
+ return tasks if tasks else None
333
+
334
+ def _validate_task_structure(self, tasks: List[Dict], risk_level: int) -> Optional[List[Dict]]:
335
+ """Validate and sanitize task structure"""
336
+
337
+ if not isinstance(tasks, list):
338
+ return None
339
+
340
+ valid_tasks = []
341
+ required_fields = ['task_type', 'title', 'description', 'duration_days', 'difficulty', 'instructions', 'completion_criteria']
342
+
343
+ valid_task_types = {
344
+ 'meditation', 'exercise', 'sleep_schedule', 'social_connection',
345
+ 'journaling', 'breathing_exercise', 'nature_walk', 'healthy_meal',
346
+ 'screen_break', 'gratitude_practice', 'professional_help',
347
+ 'mindfulness', 'stress_management', 'routine_building',
348
+ 'creative_activity', 'relaxation_technique'
349
+ }
350
+
351
+ # Ensure high-risk users get professional help
352
+ has_professional_help = False
353
+
354
+ for task in tasks:
355
+ if not isinstance(task, dict):
356
+ continue
357
+
358
+ # Check required fields
359
+ if not all(field in task and str(task[field]).strip() for field in required_fields):
360
+ continue
361
+
362
+ # Validate and clean task
363
+ validated_task = self._clean_and_validate_task(task, valid_task_types)
364
+ if validated_task:
365
+ valid_tasks.append(validated_task)
366
+
367
+ if validated_task['task_type'] == 'professional_help':
368
+ has_professional_help = True
369
+
370
+ # Ensure high-risk users have professional help task
371
+ if risk_level >= 7 and not has_professional_help and len(valid_tasks) > 0:
372
+ professional_task = self._generate_professional_help_task(risk_level)
373
+ valid_tasks.insert(0, professional_task)
374
+
375
+ return valid_tasks if len(valid_tasks) >= 1 else None
376
+
377
+ def _clean_and_validate_task(self, task: Dict, valid_task_types: set) -> Optional[Dict]:
378
+ """Clean and validate individual task"""
379
+
380
+ try:
381
+ # Validate task_type
382
+ if task.get('task_type') not in valid_task_types:
383
+ return None
384
+
385
+ # Clean and validate duration
386
+ try:
387
+ duration = int(task.get('duration_days', 7))
388
+ duration = max(1, min(30, duration)) # Clamp between 1-30 days
389
+ except (ValueError, TypeError):
390
+ duration = 7
391
+
392
+ # Validate difficulty
393
+ difficulty = task.get('difficulty', 'medium').lower()
394
+ if difficulty not in ['easy', 'medium', 'hard']:
395
+ difficulty = 'medium'
396
+
397
+ # Clean text fields
398
+ cleaned_task = {
399
+ 'task_type': task['task_type'],
400
+ 'title': str(task['title']).strip()[:100],
401
+ 'description': str(task['description']).strip()[:300],
402
+ 'duration_days': duration,
403
+ 'difficulty': difficulty,
404
+ 'instructions': str(task['instructions']).strip()[:1000],
405
+ 'completion_criteria': str(task['completion_criteria']).strip()[:200]
406
+ }
407
+
408
+ # Add optional fields if present
409
+ if 'personalization_notes' in task:
410
+ cleaned_task['personalization_notes'] = str(task['personalization_notes']).strip()[:200]
411
+
412
+ return cleaned_task
413
+
414
+ except Exception as e:
415
+ print(f"Error cleaning task: {e}")
416
+ return None
417
+
418
+ def _generate_professional_help_task(self, risk_level: int) -> Dict:
419
+ """Generate professional help task for high-risk users"""
420
+
421
+ urgency = "immediately" if risk_level >= 8 else "within 1-2 days"
422
+
423
+ return {
424
+ "task_type": "professional_help",
425
+ "title": "Seek Professional Mental Health Support",
426
+ "description": f"Contact a mental health professional {urgency} for assessment and support",
427
+ "duration_days": 2,
428
+ "difficulty": "medium",
429
+ "instructions": f"Contact your healthcare provider, call a mental health helpline, or visit a mental health clinic {urgency}. If in immediate crisis, call emergency services (911) or go to the nearest emergency room.",
430
+ "completion_criteria": "Make contact with a mental health professional or crisis support service"
431
+ }
432
+
433
+ def _get_emergency_preset_tasks(self, risk_level: int) -> List[Dict]:
434
+ """ONLY used when Granite AI is completely unavailable - minimal preset tasks"""
435
+
436
+ print("EMERGENCY: Using preset tasks - Granite AI unavailable")
437
+
438
+ if risk_level >= 7:
439
+ return [
440
+ {
441
+ "task_type": "professional_help",
442
+ "title": "Emergency Professional Help",
443
+ "description": "Seek immediate professional mental health support",
444
+ "duration_days": 1,
445
+ "difficulty": "medium",
446
+ "instructions": "Contact emergency mental health services, your doctor, or call a crisis helpline immediately.",
447
+ "completion_criteria": "Make contact with professional help"
448
+ }
449
+ ]
450
+ elif risk_level >= 4:
451
+ return [
452
+ {
453
+ "task_type": "breathing_exercise",
454
+ "title": "Daily Breathing Practice",
455
+ "description": "Use breathing exercises to manage stress and anxiety",
456
+ "duration_days": 7,
457
+ "difficulty": "easy",
458
+ "instructions": "Practice 4-7-8 breathing: Inhale for 4 counts, hold for 7 counts, exhale for 8 counts. Repeat 4 times, twice daily.",
459
+ "completion_criteria": "Complete breathing exercise twice daily for one week"
460
+ },
461
+ {
462
+ "task_type": "sleep_schedule",
463
+ "title": "Improve Sleep Routine",
464
+ "description": "Establish a consistent sleep schedule for better rest",
465
+ "duration_days": 14,
466
+ "difficulty": "medium",
467
+ "instructions": "Go to bed and wake up at the same time daily. Create a 30-minute wind-down routine before bed.",
468
+ "completion_criteria": "Maintain consistent sleep schedule for 2 weeks"
469
+ }
470
+ ]
471
+ else:
472
+ return [
473
+ {
474
+ "task_type": "gratitude_practice",
475
+ "title": "Daily Gratitude Journal",
476
+ "description": "Practice gratitude to boost mood and well-being",
477
+ "duration_days": 14,
478
+ "difficulty": "easy",
479
+ "instructions": "Write down 3 things you're grateful for each morning. Be specific and reflect on why you appreciate them.",
480
+ "completion_criteria": "Complete gratitude entries for 14 consecutive days"
481
+ },
482
+ {
483
+ "task_type": "nature_walk",
484
+ "title": "Weekly Nature Walks",
485
+ "description": "Connect with nature to reduce stress and improve mood",
486
+ "duration_days": 21,
487
+ "difficulty": "easy",
488
+ "instructions": "Take a 20-30 minute walk in a park, garden, or natural area twice per week. Focus on your surroundings.",
489
+ "completion_criteria": "Complete 6 nature walks over 3 weeks"
490
+ }
491
+ ]
492
+
493
+ def _call_granite_api(self, prompt: str, temperature: float = 0.3) -> Optional[str]:
494
+ """Enhanced API call with better error handling and logging"""
495
+
496
+ data = {
497
+ "model": self.model,
498
+ "prompt": prompt,
499
+ "stream": False,
500
+ "options": {
501
+ "temperature": temperature,
502
+ "top_p": 0.9,
503
+ "num_predict": 2000, # Increased for better task generation
504
+ "repeat_penalty": 1.1,
505
+ "stop": ["Human:", "Assistant:", "\n\n---"]
506
+ }
507
+ }
508
+
509
+ try:
510
+ print(f"πŸ€– Calling Granite API (temp={temperature})...")
511
+ response = requests.post(self.base_url, json=data, timeout=90)
512
+
513
+ if response.status_code == 200:
514
+ result = response.json()
515
+ ai_response = result.get('response', '').strip()
516
+
517
+ if ai_response:
518
+ print(f"βœ“ Granite API responded ({len(ai_response)} chars)")
519
+ return ai_response
520
+ else:
521
+ print("⚠ Granite API returned empty response")
522
+ return None
523
+ else:
524
+ print(f"❌ Granite API error: {response.status_code}")
525
+ return None
526
+
527
+ except requests.exceptions.ConnectionError:
528
+ print("❌ Cannot connect to Granite (Ollama not running?)")
529
+ return None
530
+ except requests.exceptions.Timeout:
531
+ print("❌ Granite API timeout")
532
+ return None
533
+ except Exception as e:
534
+ print(f"❌ Granite API error: {e}")
535
+ return None
src/granite_chat.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ import re
4
+ from typing import Dict, List, Optional, Tuple, Any
5
+ from ibm_watsonx_ai.foundation_models import Model
6
+ from ibm_watsonx_ai.foundation_models.utils.enums import ModelTypes
7
+ from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
8
+ from langchain_ibm import WatsonxLLM
9
+
10
+ class GraniteChatAgent:
11
+ def __init__(self, config):
12
+ self.config = config
13
+ self.max_retries = 3
14
+ self.retry_delay = 2
15
+ self.conversation_history = []
16
+ self.llm = WatsonxLLM(
17
+ model_id=config.GRANITE_MODEL,
18
+ url=config.WATSON_URL,
19
+ apikey=config.WATSON_API_KEY,
20
+ project_id=config.WATSON_PROJECT_ID,
21
+ params={
22
+ GenParams.DECODING_METHOD: "greedy",
23
+ GenParams.TEMPERATURE: 0.7,
24
+ GenParams.MIN_NEW_TOKENS: 20,
25
+ GenParams.MAX_NEW_TOKENS: 500,
26
+ GenParams.STOP_SEQUENCES: ["Human:", "User:", "\n\nHuman:", "\n\nUser:"],
27
+ },
28
+ )
29
+
30
+ def get_chat_response(self, user_message: str, user_profile: Optional[Dict] = None, context: Optional[str] = None) -> str:
31
+ self.conversation_history.append({"role": "user", "content": user_message})
32
+ response = self._generate_direct_response(user_message, user_profile, context)
33
+ if not response:
34
+ response = self._generate_fallback_response(user_message, user_profile)
35
+ if not response:
36
+ response = self._generate_basic_response(user_message)
37
+ if not response:
38
+ response = "I apologize, but I'm having trouble processing your request right now. Could you please rephrase your question or try again?"
39
+ self.conversation_history.append({"role": "assistant", "content": response})
40
+ if len(self.conversation_history) > 20:
41
+ self.conversation_history = self.conversation_history[-20:]
42
+ return response
43
+
44
+ def get_wellness_advice(self, topic: str, user_profile: Optional[Dict] = None) -> str:
45
+ advice_prompt = f"Please provide wellness advice about {topic}"
46
+ if user_profile:
47
+ advice_prompt += f" for someone with these characteristics: {self._format_user_profile(user_profile)}"
48
+ return self.get_chat_response(advice_prompt, user_profile)
49
+
50
+ def answer_question(self, question: str, user_profile: Optional[Dict] = None) -> str:
51
+ return self.get_chat_response(question, user_profile)
52
+
53
+ def provide_support(self, concern: str, user_profile: Optional[Dict] = None) -> str:
54
+ support_message = f"I'm concerned about {concern} and could use some support and guidance."
55
+ return self.get_chat_response(support_message, user_profile)
56
+
57
+ def _generate_direct_response(self, user_message: str, user_profile: Optional[Dict], context: Optional[str]) -> Optional[str]:
58
+ prompt = self._build_chat_prompt(user_message, user_profile, context)
59
+ for attempt in range(self.max_retries):
60
+ try:
61
+ response = self._call_granite_api(prompt, temperature=0.7)
62
+ if response and len(response.strip()) > 10:
63
+ cleaned_response = self._clean_chat_response(response)
64
+ if cleaned_response:
65
+ print(f"βœ“ Generated direct chat response ({len(cleaned_response)} chars)")
66
+ return cleaned_response
67
+ except Exception as e:
68
+ print(f"Attempt {attempt + 1} failed for direct response: {e}")
69
+ if attempt < self.max_retries - 1:
70
+ time.sleep(self.retry_delay)
71
+ return None
72
+
73
+ def _generate_fallback_response(self, user_message: str, user_profile: Optional[Dict]) -> Optional[str]:
74
+ prompt = self._build_simple_chat_prompt(user_message, user_profile)
75
+ for attempt in range(self.max_retries):
76
+ try:
77
+ response = self._call_granite_api(prompt, temperature=0.6)
78
+ if response and len(response.strip()) > 5:
79
+ cleaned_response = self._clean_chat_response(response)
80
+ if cleaned_response:
81
+ print(f"βœ“ Generated fallback chat response")
82
+ return cleaned_response
83
+ except Exception as e:
84
+ print(f"Attempt {attempt + 1} failed for fallback response: {e}")
85
+ if attempt < self.max_retries - 1:
86
+ time.sleep(self.retry_delay)
87
+ return None
88
+
89
+ def _generate_basic_response(self, user_message: str) -> Optional[str]:
90
+ prompt = self._build_basic_chat_prompt(user_message)
91
+ for attempt in range(self.max_retries):
92
+ try:
93
+ response = self._call_granite_api(prompt, temperature=0.5)
94
+ if response and len(response.strip()) > 3:
95
+ cleaned_response = self._clean_chat_response(response)
96
+ if cleaned_response:
97
+ print(f"βœ“ Generated basic chat response")
98
+ return cleaned_response
99
+ except Exception as e:
100
+ print(f"Attempt {attempt + 1} failed for basic response: {e}")
101
+ if attempt < self.max_retries - 1:
102
+ time.sleep(self.retry_delay)
103
+ return None
104
+
105
+ def _build_chat_prompt(self, user_message: str, user_profile: Optional[Dict], context: Optional[str]) -> str:
106
+ conversation_context = self._build_conversation_context()
107
+ user_context = ""
108
+ if user_profile:
109
+ user_context = f"""
110
+ USER CONTEXT:
111
+ - Stress Level: {user_profile.get('Stress_Level', 'Unknown')}
112
+ - Sleep: {user_profile.get('Sleep_Hours', 'Unknown')} hours/night
113
+ - Work: {user_profile.get('Work_Hours', 'Unknown')} hours/week
114
+ - Mood: {user_profile.get('Mood', 'Unknown')}
115
+ - Age: {user_profile.get('Age', 'Unknown')}
116
+ - Occupation: {user_profile.get('Occupation', 'Unknown')}
117
+ """
118
+ additional_context = f"\nADDITIONAL CONTEXT:\n{context}\n" if context else ""
119
+ return f"""You are a helpful, empathetic, and knowledgeable AI assistant specializing in wellness and mental health support. Respond directly in first person as if you are speaking to the user face-to-face.
120
+
121
+ IMPORTANT: Start your response immediately with your direct answer. Do not think aloud, analyze, or explain your reasoning process. Give a direct, conversational response from the first word.
122
+
123
+ Guidelines:
124
+ 1. SPEAK DIRECTLY: Use "I", "you", "your" - respond as if in conversation
125
+ 2. NO ANALYSIS: Don't show your thinking process or reasoning
126
+ 3. BE IMMEDIATE: Start with your actual response, not explanations
127
+ 4. STAY SUPPORTIVE: Be empathetic and helpful
128
+ 5. BE CONCISE: Give practical, actionable advice
129
+
130
+ {user_context}{additional_context}
131
+
132
+ CONVERSATION HISTORY:
133
+ {conversation_context}
134
+
135
+ USER MESSAGE: {user_message}
136
+
137
+ Your direct response (start immediately, no analysis):"""
138
+
139
+ def _build_simple_chat_prompt(self, user_message: str, user_profile: Optional[Dict]) -> str:
140
+ user_info = ""
141
+ if user_profile:
142
+ key_info = []
143
+ if user_profile.get('Stress_Level'):
144
+ key_info.append(f"Stress: {user_profile['Stress_Level']}")
145
+ if user_profile.get('Mood'):
146
+ key_info.append(f"Mood: {user_profile['Mood']}")
147
+ if key_info:
148
+ user_info = f"User info: {', '.join(key_info)}\n"
149
+ recent_context = ""
150
+ if len(self.conversation_history) > 0:
151
+ recent_context = f"Previous message: {self.conversation_history[-1]['content'][:100]}...\n"
152
+ return f"""You are a helpful wellness AI assistant. Give a direct, first-person response. Start immediately with your answer - no thinking aloud or analysis.
153
+
154
+ {user_info}{recent_context}
155
+ User: {user_message}
156
+
157
+ Direct response:"""
158
+
159
+ def _build_basic_chat_prompt(self, user_message: str) -> str:
160
+ return f"""You are a helpful AI assistant. Respond directly in first person. Start immediately with your response - no analysis or thinking aloud.
161
+
162
+ User: {user_message}
163
+
164
+ Direct response:"""
165
+
166
+ def _build_conversation_context(self) -> str:
167
+ if not self.conversation_history:
168
+ return "No previous conversation."
169
+ context_lines = []
170
+ recent_history = self.conversation_history[-6:]
171
+ for entry in recent_history:
172
+ role = "User" if entry["role"] == "user" else "Assistant"
173
+ content = entry["content"][:150] + "..." if len(entry["content"]) > 150 else entry["content"]
174
+ context_lines.append(f"{role}: {content}")
175
+ return "\n".join(context_lines)
176
+
177
+ def _format_user_profile(self, user_profile: Dict) -> str:
178
+ profile_parts = []
179
+ for key, value in user_profile.items():
180
+ if value and str(value).strip().lower() not in ['unknown', 'not specified', '']:
181
+ readable_key = key.replace('_', ' ').title()
182
+ profile_parts.append(f"{readable_key}: {value}")
183
+ return ", ".join(profile_parts) if profile_parts else "Limited profile information"
184
+
185
+ def _clean_chat_response(self, response: str) -> Optional[str]:
186
+ if not response:
187
+ return None
188
+
189
+ cleaned = response.strip()
190
+
191
+ # Remove common system/thinking indicators
192
+ system_indicators = [
193
+ "You are a helpful", "RESPONSE:", "Assistant:", "AI:", "Human:", "User:",
194
+ "Direct response:", "Your direct response:", "Based on", "Looking at",
195
+ "Let me", "I need to", "First,", "The user is", "From what", "Since",
196
+ "Given that", "Considering", "In this case", "It seems", "This appears"
197
+ ]
198
+
199
+ # Remove thinking patterns - lines that start with analysis
200
+ thinking_patterns = [
201
+ r"^(Okay|So|Well|Now|Let me|I see|Looking at|Based on|From what|Given that|Since|Considering).*",
202
+ r"^The user (is|has|wants|needs|seems).*",
203
+ r"^This (is|seems|appears|looks|sounds).*",
204
+ r"^It (seems|appears|looks|sounds).*",
205
+ r"^They (are|have|want|need|seem).*"
206
+ ]
207
+
208
+ lines = cleaned.split('\n')
209
+ cleaned_lines = []
210
+ skip_mode = True # Start in skip mode to remove initial thinking
211
+
212
+ for line in lines:
213
+ line = line.strip()
214
+ if not line:
215
+ continue
216
+
217
+ # Check if this line is thinking/analysis
218
+ is_thinking = False
219
+
220
+ # Check system indicators
221
+ if any(line.startswith(indicator) for indicator in system_indicators):
222
+ is_thinking = True
223
+
224
+ # Check thinking patterns
225
+ if any(re.match(pattern, line, re.IGNORECASE) for pattern in thinking_patterns):
226
+ is_thinking = True
227
+
228
+ # Check for analysis keywords at start of line
229
+ analysis_starts = ['okay', 'so', 'well', 'now', 'let me', 'i see', 'looking', 'based on',
230
+ 'from what', 'given that', 'since', 'considering', 'the user', 'this is',
231
+ 'it seems', 'they are', 'i think', 'i believe', 'i should', 'i need to']
232
+
233
+ line_lower = line.lower()
234
+ if any(line_lower.startswith(start) for start in analysis_starts):
235
+ is_thinking = True
236
+
237
+ # If we're in skip mode and this isn't thinking, switch to collect mode
238
+ if skip_mode and not is_thinking:
239
+ skip_mode = False
240
+
241
+ # Collect non-thinking lines after we've found the first real response
242
+ if not skip_mode and not is_thinking:
243
+ cleaned_lines.append(line)
244
+
245
+ # If no good lines found, try to salvage something
246
+ if not cleaned_lines:
247
+ # Look for lines that start with "I" or common first-person responses
248
+ for line in lines:
249
+ line = line.strip()
250
+ if line and (line.startswith('I ') or line.startswith('You ') or
251
+ line.startswith('Your ') or line.startswith('Based on your')):
252
+ cleaned_lines.append(line)
253
+ break
254
+
255
+ # Join the cleaned lines
256
+ final_response = '\n'.join(cleaned_lines).strip()
257
+
258
+ # Additional cleanup
259
+ if final_response.startswith('"') and final_response.endswith('"'):
260
+ final_response = final_response[1:-1].strip()
261
+
262
+ # Ensure minimum length
263
+ if len(final_response) < 10:
264
+ return None
265
+
266
+ # Limit length
267
+ if len(final_response) > 1000:
268
+ final_response = final_response[:1000] + "..."
269
+
270
+ return final_response if final_response else None
271
+
272
+ def _call_granite_api(self, prompt: str, temperature: float = 0.7) -> Optional[str]:
273
+ try:
274
+ print(f"πŸ€– Calling Watson Granite API (temp={temperature})...")
275
+ self.llm.params[GenParams.TEMPERATURE] = temperature
276
+ response = self.llm.invoke(prompt)
277
+ if response and response.strip():
278
+ print(f"βœ“ Watson Granite API responded ({len(response)} chars)")
279
+ return response.strip()
280
+ else:
281
+ print("⚠ Watson Granite API returned empty response")
282
+ return None
283
+ except Exception as e:
284
+ print(f"❌ Watson Granite API error: {e}")
285
+ return None
286
+
287
+ def clear_conversation_history(self):
288
+ self.conversation_history = []
289
+ print("βœ“ Conversation history cleared")
290
+
291
+ def get_conversation_summary(self) -> str:
292
+ if not self.conversation_history:
293
+ return "No conversation history available."
294
+ total_messages = len(self.conversation_history)
295
+ user_messages = len([msg for msg in self.conversation_history if msg["role"] == "user"])
296
+ assistant_messages = len([msg for msg in self.conversation_history if msg["role"] == "assistant"])
297
+ recent_topics = []
298
+ for msg in self.conversation_history[-4:]:
299
+ if msg["role"] == "user":
300
+ content = msg["content"][:100]
301
+ recent_topics.append(content)
302
+ summary = f"""Conversation Summary:
303
+ - Total messages: {total_messages}
304
+ - User messages: {user_messages}
305
+ - Assistant messages: {assistant_messages}
306
+ - Recent topics discussed: {'; '.join(recent_topics) if recent_topics else 'General conversation'}"""
307
+ return summary.strip()
308
+
309
+ def set_chat_personality(self, personality_type: str = "supportive"):
310
+ personality_configs = {
311
+ "supportive": {
312
+ "temperature": 0.7,
313
+ "max_tokens": 500,
314
+ "style": "empathetic and encouraging"
315
+ },
316
+ "professional": {
317
+ "temperature": 0.5,
318
+ "max_tokens": 400,
319
+ "style": "clinical and informative"
320
+ },
321
+ "casual": {
322
+ "temperature": 0.8,
323
+ "max_tokens": 300,
324
+ "style": "friendly and conversational"
325
+ },
326
+ "direct": {
327
+ "temperature": 0.4,
328
+ "max_tokens": 250,
329
+ "style": "concise and straightforward"
330
+ }
331
+ }
332
+ if personality_type in personality_configs:
333
+ config = personality_configs[personality_type]
334
+ self.llm.params[GenParams.TEMPERATURE] = config["temperature"]
335
+ self.llm.params[GenParams.MAX_NEW_TOKENS] = config["max_tokens"]
336
+ print(f"βœ“ Chat personality set to: {personality_type} ({config['style']})")
337
+ else:
338
+ print(f"⚠ Unknown personality type: {personality_type}. Available: {list(personality_configs.keys())}")
339
+
340
+ class ChatIntegration:
341
+ def __init__(self, granite_agent: GraniteChatAgent):
342
+ self.agent = granite_agent
343
+
344
+ async def handle_chat_message(self, message: str, user_id: str, user_profile: Optional[Dict] = None) -> Dict:
345
+ try:
346
+ response = self.agent.get_chat_response(message, user_profile)
347
+ return {
348
+ "success": True,
349
+ "response": response,
350
+ "user_id": user_id,
351
+ "timestamp": time.time(),
352
+ "response_type": "direct_answer",
353
+ "confidence": "high" if len(response) > 50 else "medium"
354
+ }
355
+ except Exception as e:
356
+ return {
357
+ "success": False,
358
+ "error": str(e),
359
+ "response": "I apologize, but I'm having technical difficulties. Please try again.",
360
+ "user_id": user_id,
361
+ "timestamp": time.time(),
362
+ "response_type": "error"
363
+ }
364
+
365
+ def get_quick_responses(self, user_message: str) -> List[str]:
366
+ message_lower = user_message.lower()
367
+ if any(word in message_lower for word in ['stressed', 'anxiety', 'worried']):
368
+ return [
369
+ "Tell me more about what's causing your stress",
370
+ "Would you like some immediate stress relief techniques?",
371
+ "How long have you been feeling this way?"
372
+ ]
373
+ elif any(word in message_lower for word in ['sleep', 'tired', 'insomnia']):
374
+ return [
375
+ "What's your current sleep schedule like?",
376
+ "Would you like tips for better sleep hygiene?",
377
+ "How many hours of sleep do you typically get?"
378
+ ]
379
+ elif any(word in message_lower for word in ['sad', 'depressed', 'down']):
380
+ return [
381
+ "I'm here to support you through this",
382
+ "Would you like to talk about what's making you feel this way?",
383
+ "Have you considered speaking with a counselor?"
384
+ ]
385
+ else:
386
+ return [
387
+ "Can you tell me more about that?",
388
+ "How can I best support you with this?",
389
+ "What would be most helpful for you right now?"
390
+ ]
src/groq_agent.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from groq import Groq
2
+ import json
3
+ import os
4
+ import re
5
+ from typing import Dict, Optional, Tuple
6
+
7
+ class GroqAgent:
8
+ def __init__(self, config):
9
+ self.config = config
10
+ # Set API key as environment variable for Groq client
11
+ os.environ['GROQ_API_KEY'] = config.GROQ_API_KEY
12
+ self.client = Groq()
13
+ self.model = "qwen/qwen3-32b"
14
+
15
+ def analyze_mental_health(self, user_profile: Dict) -> Tuple[str, int]:
16
+ """Analyze user's mental health status and return short bullet assessment with risk level"""
17
+
18
+ # Calculate dynamic risk level based on profile factors
19
+ risk_level = self._calculate_dynamic_risk_level(user_profile)
20
+
21
+ prompt = f"""
22
+ Analyze this user profile and provide ONLY bullet-point assessment. Start immediately with bullet points.
23
+
24
+ USER PROFILE:
25
+ - Age: {user_profile.get('Age', 'N/A')}
26
+ - Occupation: {user_profile.get('Occupation', 'N/A')}
27
+ - Stress Level: {user_profile.get('Stress_Level', 'N/A')}
28
+ - Sleep: {user_profile.get('Sleep_Hours', 'N/A')} hours, quality: {user_profile.get('Sleep_Quality', 'N/A')}
29
+ - Work: {user_profile.get('Work_Hours', 'N/A')} hours/week
30
+ - Exercise: {user_profile.get('Physical_Activity_Hours', 'N/A')} hours/week
31
+ - Mood: {user_profile.get('Mood', 'N/A')}
32
+ - Anxiety: {user_profile.get('Anxiety_Frequency', 'N/A')}
33
+ - Energy: {user_profile.get('Energy_Level', 'N/A')}
34
+
35
+ Give exactly 4 bullet points:
36
+
37
+ β€’ [Key strength/positive factor]
38
+ β€’ [Main concern/area needing attention]
39
+ β€’ [Specific recommendation]
40
+ β€’ [Overall wellness status]
41
+ """
42
+
43
+ response = self._call_groq_api(prompt, temperature=0.3)
44
+
45
+ if response:
46
+ # Clean and format the response
47
+ assessment = self._format_bullet_response(response)
48
+ return assessment, risk_level
49
+
50
+ return "β€’ Unable to complete assessment at this time\nβ€’ Please try again later", 5
51
+
52
+ def get_health_tips(self, user_profile: Dict, user_question: str = None) -> str:
53
+ """Generate short, practical health tips in bullet format"""
54
+
55
+ if user_question:
56
+ prompt = f"""
57
+ Answer this health question with SHORT, practical advice for this user:
58
+
59
+ QUESTION: {user_question}
60
+
61
+ USER: {user_profile.get('Age')}yo, {user_profile.get('Occupation')}, {user_profile.get('Stress_Level')} stress, {user_profile.get('Sleep_Hours')}h sleep, {user_profile.get('Physical_Activity_Hours')}h exercise/week
62
+
63
+ Give exactly 4 bullet points with direct actionable advice:
64
+
65
+ β€’ [Direct tip 1]
66
+ β€’ [Direct tip 2]
67
+ β€’ [Direct tip 3]
68
+ β€’ [Direct tip 4]
69
+ """
70
+ else:
71
+ prompt = f"""
72
+ Create 4 wellness tips for this user:
73
+
74
+ USER: {user_profile.get('Age')}yo, {user_profile.get('Occupation')}, stress: {user_profile.get('Stress_Level')}, sleep: {user_profile.get('Sleep_Hours')}h, exercise: {user_profile.get('Physical_Activity_Hours')}h/week
75
+
76
+ Give exactly 4 bullet points:
77
+
78
+ β€’ [tip 1]
79
+ β€’ [tip 2]
80
+ β€’ [tip 3]
81
+ β€’ [tip 4]
82
+ """
83
+
84
+ response = self._call_groq_api(prompt, temperature=0.5)
85
+ if response:
86
+ return self._format_bullet_response(response)
87
+ return "β€’ Stay hydrated throughout the day\nβ€’ Take short breaks every hour\nβ€’ Practice deep breathing exercises\nβ€’ Get 7-8 hours of sleep"
88
+
89
+ def get_chat_response(self, user_profile, question):
90
+ """Get a natural, conversational response to user questions"""
91
+
92
+ if not user_profile:
93
+ return "I need more information about you to provide personalized advice. Could you share some details about your lifestyle?"
94
+
95
+ if not question or len(question) < 5:
96
+ return "Could you please ask a more specific question so I can help you better?"
97
+
98
+ try:
99
+ # Build basic user context for personalization
100
+ user_context = f"User is {user_profile.get('Age', 'unknown')} years old"
101
+ if user_profile.get('Stress_Level'):
102
+ user_context += f", stress level: {user_profile.get('Stress_Level')}"
103
+ if user_profile.get('Sleep_Hours'):
104
+ user_context += f", sleeps {user_profile.get('Sleep_Hours')} hours"
105
+ if user_profile.get('Physical_Activity_Hours'):
106
+ user_context += f", exercises {user_profile.get('Physical_Activity_Hours')} hours/week"
107
+ if user_profile.get('Diet'):
108
+ user_context += f", diet quality: {user_profile.get('Diet')}"
109
+ if user_profile.get('Mood'):
110
+ user_context += f", mood: {user_profile.get('Mood')}"
111
+ if user_profile.get('Anxiety_Frequency'):
112
+ user_context += f", anxiety frequency: {user_profile.get('Anxiety_Frequency')}"
113
+ if user_profile.get('Energy_Level'):
114
+ user_context += f", energy level: {user_profile.get('Energy_Level')}"
115
+ if user_profile.get('Occupation'):
116
+ user_context += f", occupation: {user_profile.get('Occupation')}"
117
+ except Exception as e:
118
+ print(f"Error building user context: {e}")
119
+ user_context = "User profile information is incomplete"
120
+
121
+ # Enhanced conversation prompt for direct responses
122
+ response = self.client.chat.completions.create(
123
+ model="qwen/qwen3-32b",
124
+ messages=[
125
+ {
126
+ "role": "system",
127
+ "content": f"""You are a friendly AI health coach. Respond directly and conversationally from the first word - no analysis, no thinking aloud.
128
+
129
+ User context: {user_context}
130
+
131
+ IMPORTANT: Start immediately with your direct response. Use "I", "you", "your" naturally. Give practical advice in 2-3 sentences. Be supportive and encouraging."""
132
+ },
133
+ {
134
+ "role": "user",
135
+ "content": question
136
+ }
137
+ ],
138
+ max_tokens=150,
139
+ temperature=0.7
140
+ )
141
+
142
+ # Clean the response to remove any thinking patterns
143
+ raw_response = response.choices[0].message.content.strip()
144
+ cleaned_response = self._clean_chat_response(raw_response)
145
+
146
+ return cleaned_response if cleaned_response else raw_response
147
+
148
+ def _clean_chat_response(self, response: str) -> Optional[str]:
149
+ """Clean response to remove thinking patterns and ensure direct answers"""
150
+ if not response:
151
+ return None
152
+
153
+ # Split into lines for analysis
154
+ lines = response.split('\n')
155
+ cleaned_lines = []
156
+ skip_mode = True # Start in skip mode to remove initial thinking
157
+
158
+ # Patterns that indicate thinking/analysis rather than direct response
159
+ thinking_patterns = [
160
+ r"^(Let me|Looking at|Based on|Given that|Considering|The user|This user|From what).*",
161
+ r"^(I see|I notice|I understand|It seems|It appears|This seems).*",
162
+ r"^(Analyzing|Analysis|Assessment|Evaluation).*",
163
+ r"^(First|Firstly|To start|Initially).*",
164
+ r"^(So|Well|Now|Okay).*"
165
+ ]
166
+
167
+ # Words that indicate analysis vs direct response
168
+ analysis_starters = [
169
+ 'analyzing', 'looking at', 'based on', 'given that', 'considering',
170
+ 'the user', 'this user', 'from what', 'let me', 'i see', 'i notice',
171
+ 'it seems', 'it appears', 'this seems', 'first', 'firstly', 'so',
172
+ 'well', 'now', 'okay', 'assessment shows', 'profile indicates'
173
+ ]
174
+
175
+ for line in lines:
176
+ line = line.strip()
177
+ if not line:
178
+ continue
179
+
180
+ # Check if this line is thinking/analysis
181
+ is_thinking = False
182
+ line_lower = line.lower()
183
+
184
+ # Check against thinking patterns
185
+ if any(re.match(pattern, line, re.IGNORECASE) for pattern in thinking_patterns):
186
+ is_thinking = True
187
+
188
+ # Check against analysis starters
189
+ if any(line_lower.startswith(starter) for starter in analysis_starters):
190
+ is_thinking = True
191
+
192
+ # If we're in skip mode and this isn't thinking, switch to collect mode
193
+ if skip_mode and not is_thinking:
194
+ # Look for lines that start with direct conversational responses
195
+ if (line_lower.startswith('i ') or line_lower.startswith('you ') or
196
+ line_lower.startswith('your ') or line_lower.startswith('based on your') or
197
+ any(line_lower.startswith(start) for start in ['great', 'good', 'excellent', 'wonderful', 'that\'s', 'this is', 'absolutely'])):
198
+ skip_mode = False
199
+
200
+ # Collect non-thinking lines after we've found the first real response
201
+ if not skip_mode and not is_thinking:
202
+ cleaned_lines.append(line)
203
+
204
+ # If no good lines found, try to find the first conversational line
205
+ if not cleaned_lines:
206
+ for line in lines:
207
+ line = line.strip()
208
+ if line and len(line) > 10:
209
+ line_lower = line.lower()
210
+ # Look for direct responses
211
+ if (line_lower.startswith('i ') or line_lower.startswith('you ') or
212
+ line_lower.startswith('your ') or line_lower.startswith('that\'s ') or
213
+ line_lower.startswith('this is ') or line_lower.startswith('great ') or
214
+ line_lower.startswith('good ') or line_lower.startswith('excellent ')):
215
+ cleaned_lines.append(line)
216
+ break
217
+
218
+ # Join the cleaned lines
219
+ final_response = '\n'.join(cleaned_lines).strip()
220
+
221
+ # Remove quotes if present
222
+ if final_response.startswith('"') and final_response.endswith('"'):
223
+ final_response = final_response[1:-1].strip()
224
+
225
+ # Ensure minimum length
226
+ if len(final_response) < 10:
227
+ return None
228
+
229
+ return final_response
230
+
231
+ def _format_bullet_response(self, response: str) -> str:
232
+ """Format response into clean bullet points"""
233
+ if not response:
234
+ return "β€’ No response available"
235
+
236
+ # Remove any thinking process or analysis first
237
+ cleaned_response = self._clean_analysis_from_bullets(response)
238
+
239
+ # Extract bullet points
240
+ lines = cleaned_response.split('\n')
241
+ bullets = []
242
+
243
+ for line in lines:
244
+ line = line.strip()
245
+
246
+ # Skip empty lines
247
+ if not line:
248
+ continue
249
+
250
+ # Extract bullet points
251
+ if line.startswith('β€’'):
252
+ bullet_text = line[1:].strip()
253
+ if len(bullet_text) > 5: # Only meaningful content
254
+ # Limit length and clean up
255
+ if len(bullet_text) > 80:
256
+ bullet_text = bullet_text[:77] + "..."
257
+ bullets.append(f"β€’ {bullet_text}")
258
+ elif line.startswith('-'):
259
+ bullet_text = line[1:].strip()
260
+ if len(bullet_text) > 5:
261
+ if len(bullet_text) > 80:
262
+ bullet_text = bullet_text[:77] + "..."
263
+ bullets.append(f"β€’ {bullet_text}")
264
+ elif line and len(line) > 10 and len(bullets) < 4:
265
+ # Convert non-bullet content to bullet if it looks like useful content
266
+ if not any(word in line.lower() for word in ['analyzing', 'looking at', 'based on', 'assessment', 'profile']):
267
+ if len(line) > 80:
268
+ line = line[:77] + "..."
269
+ bullets.append(f"β€’ {line}")
270
+
271
+ # Ensure we have 3-4 bullets
272
+ if len(bullets) == 0:
273
+ return "β€’ Assessment temporarily unavailable\nβ€’ Please try again in a moment"
274
+ elif len(bullets) == 1:
275
+ bullets.append("β€’ Continue monitoring your wellness patterns")
276
+ elif len(bullets) == 2:
277
+ bullets.append("β€’ Consider consulting a healthcare professional if concerns persist")
278
+
279
+ return '\n'.join(bullets[:4]) # Limit to 4 bullets max
280
+
281
+ def _clean_analysis_from_bullets(self, response: str) -> str:
282
+ """Remove analysis/thinking patterns from bullet responses"""
283
+ lines = response.split('\n')
284
+ cleaned_lines = []
285
+
286
+ # Skip lines that contain analysis patterns
287
+ analysis_phrases = [
288
+ 'analyzing', 'looking at', 'based on', 'given that', 'considering',
289
+ 'assessment shows', 'profile indicates', 'examination reveals',
290
+ 'let me', 'first', 'okay', 'so', 'well', 'now'
291
+ ]
292
+
293
+ for line in lines:
294
+ line = line.strip()
295
+ if not line:
296
+ continue
297
+
298
+ # Check if line contains analysis patterns
299
+ line_lower = line.lower()
300
+ is_analysis = any(phrase in line_lower for phrase in analysis_phrases)
301
+
302
+ # Keep lines that are bullet points or direct statements
303
+ if not is_analysis or line.startswith('β€’') or line.startswith('-'):
304
+ cleaned_lines.append(line)
305
+
306
+ return '\n'.join(cleaned_lines)
307
+
308
+ def _calculate_dynamic_risk_level(self, user_profile: Dict) -> int:
309
+ """Calculate dynamic risk level based on user profile factors"""
310
+ risk_score = 0
311
+
312
+ # Sleep factors (0-4 points)
313
+ sleep_hours = user_profile.get('Sleep_Hours', 7)
314
+ sleep_quality = user_profile.get('Sleep_Quality', 'Fair')
315
+
316
+ if sleep_hours < 5:
317
+ risk_score += 3
318
+ elif sleep_hours < 6:
319
+ risk_score += 2
320
+ elif sleep_hours < 7:
321
+ risk_score += 1
322
+ elif sleep_hours > 9:
323
+ risk_score += 1
324
+
325
+ if sleep_quality == 'Poor':
326
+ risk_score += 2
327
+ elif sleep_quality == 'Fair':
328
+ risk_score += 1
329
+
330
+ # Stress and anxiety factors (0-6 points)
331
+ stress_level = user_profile.get('Stress_Level', 'Medium')
332
+ anxiety_freq = user_profile.get('Anxiety_Frequency', 'Sometimes')
333
+
334
+ if stress_level == 'High':
335
+ risk_score += 3
336
+ elif stress_level == 'Medium':
337
+ risk_score += 1
338
+
339
+ if anxiety_freq in ['Often', 'Always']:
340
+ risk_score += 3
341
+ elif anxiety_freq == 'Sometimes':
342
+ risk_score += 1
343
+
344
+ # Mood factors (0-3 points)
345
+ mood = user_profile.get('Mood', 'Neutral')
346
+ if mood in ['Very Sad', 'Sad']:
347
+ risk_score += 3
348
+ elif mood == 'Neutral':
349
+ risk_score += 1
350
+
351
+ # Energy level (0-2 points)
352
+ energy = user_profile.get('Energy_Level', 'Medium')
353
+ if energy in ['Very Low', 'Low']:
354
+ risk_score += 2
355
+ elif energy == 'Medium' and stress_level == 'High':
356
+ risk_score += 1
357
+
358
+ # Physical activity (0-2 points)
359
+ activity_hours = user_profile.get('Physical_Activity_Hours', 3)
360
+ if activity_hours < 1:
361
+ risk_score += 2
362
+ elif activity_hours < 2:
363
+ risk_score += 1
364
+
365
+ # Work-life balance (0-2 points)
366
+ work_hours = user_profile.get('Work_Hours', 40)
367
+ if work_hours > 60:
368
+ risk_score += 2
369
+ elif work_hours > 50:
370
+ risk_score += 1
371
+
372
+ # Social media and lifestyle factors (0-2 points)
373
+ social_media = user_profile.get('Social_Media_Hours', 3)
374
+ if social_media > 8:
375
+ risk_score += 2
376
+ elif social_media > 6:
377
+ risk_score += 1
378
+
379
+ # Diet quality (0-1 points)
380
+ diet = user_profile.get('Diet', 'Average')
381
+ if diet == 'Unhealthy':
382
+ risk_score += 1
383
+
384
+ # Substance use (0-2 points)
385
+ smoking = user_profile.get('Smoking', 'Non-Smoker')
386
+ alcohol = user_profile.get('Alcohol_Consumption', 'Rarely')
387
+
388
+ if smoking in ['Regular Smoker', 'Heavy Smoker']:
389
+ risk_score += 2
390
+ elif smoking == 'Occasional Smoker':
391
+ risk_score += 1
392
+
393
+ if alcohol in ['Regularly']:
394
+ risk_score += 1
395
+
396
+ # Convert to 1-10 scale with proper distribution
397
+ if risk_score <= 2:
398
+ risk_level = 1 # Very low risk
399
+ elif risk_score <= 4:
400
+ risk_level = 2 # Low risk
401
+ elif risk_score <= 6:
402
+ risk_level = 3 # Low-moderate risk
403
+ elif risk_score <= 8:
404
+ risk_level = 4 # Moderate risk
405
+ elif risk_score <= 10:
406
+ risk_level = 5 # Moderate risk
407
+ elif risk_score <= 12:
408
+ risk_level = 6 # Moderate-high risk
409
+ elif risk_score <= 14:
410
+ risk_level = 7 # High risk
411
+ elif risk_score <= 16:
412
+ risk_level = 8 # High risk
413
+ elif risk_score <= 18:
414
+ risk_level = 9 # Very high risk
415
+ else:
416
+ risk_level = 10 # Critical risk
417
+
418
+ return risk_level
419
+
420
+ def _call_groq_api(self, prompt: str, temperature: float = 0.7) -> Optional[str]:
421
+ """Make API call to Groq using the new Python SDK"""
422
+ try:
423
+ completion = self.client.chat.completions.create(
424
+ model=self.model,
425
+ messages=[
426
+ {
427
+ "role": "system",
428
+ "content": "You are a wellness coach. Provide ONLY final answers - no thinking process, no analysis steps, no reasoning explanation. Start immediately with your direct response. Be direct, concise, and helpful."
429
+ },
430
+ {
431
+ "role": "user",
432
+ "content": prompt
433
+ }
434
+ ],
435
+ temperature=temperature,
436
+ max_completion_tokens=150, # Reduced for shorter responses
437
+ top_p=0.9,
438
+ stream=False,
439
+ stop=None
440
+ )
441
+
442
+ return completion.choices[0].message.content
443
+
444
+ except Exception as e:
445
+ print(f"Groq API error: {e}")
446
+
447
+ # Return user-friendly error messages
448
+ error_msg = str(e).lower()
449
+ if "rate limit" in error_msg or "429" in error_msg:
450
+ return "β€’ Service temporarily busy, please try again shortly"
451
+ elif "authentication" in error_msg or "401" in error_msg:
452
+ return "β€’ Authentication issue, please contact support"
453
+ elif "timeout" in error_msg:
454
+ return "β€’ Response taking longer than expected, please retry"
455
+ elif "connection" in error_msg:
456
+ return "β€’ Connection issue, please check internet and retry"
457
+ else:
458
+ return "β€’ Service temporarily unavailable, please try again"
src/reward_system.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ class RewardSystem:
4
+ def __init__(self, config, db_manager):
5
+ self.config = config
6
+ self.db = db_manager
7
+ self.task_rewards = config.TASK_REWARDS
8
+
9
+ def calculate_task_reward(self, task_type: str, difficulty: str, completion_data: Dict = None) -> int:
10
+ """Calculate coins for completed task"""
11
+ base_reward = self.task_rewards.get(task_type, 10)
12
+
13
+ # Difficulty multiplier
14
+ difficulty_multipliers = {
15
+ "easy": 1.0,
16
+ "medium": 1.3,
17
+ "hard": 1.6
18
+ }
19
+
20
+ multiplier = difficulty_multipliers.get(difficulty, 1.0)
21
+
22
+ # Quality bonus based on completion data
23
+ quality_bonus = 0
24
+ if completion_data:
25
+ if completion_data.get('quality_rating', 0) >= 4:
26
+ quality_bonus = 5
27
+ if completion_data.get('exceeded_expectations', False):
28
+ quality_bonus += 3
29
+
30
+ total_reward = int(base_reward * multiplier) + quality_bonus
31
+ return total_reward
32
+
33
+ def award_task_completion(self, user_id: str, task_id: str, completion_data: Dict = None) -> int:
34
+ """Award coins for completing a task"""
35
+ # Get task details
36
+ task = self.db.db[self.config.TASKS_COLLECTION].find_one({"task_id": task_id})
37
+
38
+ if not task or task['status'] == 'completed':
39
+ return 0
40
+
41
+ # Calculate reward
42
+ coins = self.calculate_task_reward(
43
+ task['task_type'],
44
+ task.get('difficulty', 'medium'),
45
+ completion_data
46
+ )
47
+
48
+ # Update task as completed
49
+ self.db.complete_task(task_id, completion_data)
50
+
51
+ # Award coins
52
+ reward_type = f"task_completion_{task['task_type']}"
53
+ if self.db.update_user_coins(user_id, coins, reward_type):
54
+ return coins
55
+
56
+ return 0
57
+
58
+ def get_reward_summary(self, user_id: str) -> Dict:
59
+ """Get user's reward summary"""
60
+ user = self.db.get_user_profile(user_id)
61
+ completed_tasks = self.db.get_user_tasks(user_id, "completed")
62
+
63
+ return {
64
+ "total_coins": user.get('coins', 0) if user else 0,
65
+ "total_earned": user.get('total_coins_earned', 0) if user else 0,
66
+ "completed_tasks": len(completed_tasks),
67
+ "pending_tasks": len(self.db.get_user_tasks(user_id, "pending"))
68
+ }
src/setup.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Setup script for the Wellness Platform
3
+
4
+ Run this script to initialize the system:
5
+ 1. Install dependencies
6
+ 2. Set up MongoDB
7
+ 3. Pull Ollama models
8
+ 4. Initialize database collections
9
+ """
10
+
11
+ import subprocess
12
+ import sys
13
+ import os
14
+ import json
15
+ from pymongo import MongoClient
16
+
17
+ def install_requirements():
18
+ """Install Python dependencies"""
19
+ print("πŸ“¦ Installing Python dependencies...")
20
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"])
21
+ print("βœ… Dependencies installed successfully!")
22
+
23
+ def setup_mongodb():
24
+ """Set up MongoDB connection and collections"""
25
+ print("πŸƒ Setting up MongoDB...")
26
+ try:
27
+ client = MongoClient("mongodb://localhost:27017/")
28
+ db = client["wellness_platform"]
29
+
30
+ # Create collections
31
+ collections = ["users", "activity_logs", "suggestions", "rewards"]
32
+ for collection in collections:
33
+ if collection not in db.list_collection_names():
34
+ db.create_collection(collection)
35
+ print(f"βœ… Created collection: {collection}")
36
+
37
+ print("βœ… MongoDB setup complete!")
38
+ return True
39
+ except Exception as e:
40
+ print(f"❌ MongoDB setup failed: {e}")
41
+ return False
42
+
43
+ def setup_ollama():
44
+ """Pull required Ollama models"""
45
+ print("πŸ¦™ Setting up Ollama models...")
46
+ try:
47
+ # Check if Ollama is running
48
+ result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
49
+ if result.returncode != 0:
50
+ print("❌ Ollama is not running. Please start Ollama first.")
51
+ return False
52
+
53
+ # Pull granite model
54
+ print("πŸ“₯ Pulling granite-code model...")
55
+ subprocess.run(["ollama", "pull", "granite-code"], check=True)
56
+ print("βœ… Granite model ready!")
57
+ return True
58
+ except subprocess.CalledProcessError as e:
59
+ print(f"❌ Ollama setup failed: {e}")
60
+ return False
61
+ except FileNotFoundError:
62
+ print("❌ Ollama not found. Please install Ollama first.")
63
+ return False
64
+
65
+ def create_env_file():
66
+ """Create .env file with default configuration"""
67
+ print("πŸ“ Creating environment configuration...")
68
+ env_content = """
69
+ # MongoDB Configuration
70
+ MONGODB_URI=mongodb://localhost:27017/
71
+
72
+ # API Keys (update with your actual keys)
73
+ GROQ_API_KEY=your_groq_api_key_here
74
+
75
+ # Ollama Configuration
76
+ OLLAMA_BASE_URL=http://localhost:11434/api/generate
77
+ """
78
+
79
+ if not os.path.exists(".env"):
80
+ with open(".env", "w") as f:
81
+ f.write(env_content.strip())
82
+ print("βœ… Created .env file")
83
+ else:
84
+ print("ℹ️ .env file already exists")
85
+
86
+ def create_sample_user():
87
+ """Create a sample user profile"""
88
+ print("πŸ‘€ Creating sample user profile...")
89
+ sample_profile = {
90
+ "user_id": "demo_user",
91
+ "Age": 30,
92
+ "Gender": "Male",
93
+ "Occupation": "Engineering",
94
+ "Country": "USA",
95
+ "Consultation_History": "No",
96
+ "Stress_Level": "Medium",
97
+ "Sleep_Hours": 7.5,
98
+ "Work_Hours": 40,
99
+ "Physical_Activity_Hours": 3.0,
100
+ "Social_Media_Usage": 2.0,
101
+ "Diet_Quality": "Average",
102
+ "Smoking_Habit": "Non-Smoker",
103
+ "Alcohol_Consumption": "Social Drinker",
104
+ "Medication_Usage": "No",
105
+ "coins": 50,
106
+ "total_coins_earned": 50
107
+ }
108
+
109
+ try:
110
+ client = MongoClient("mongodb://localhost:27017/")
111
+ db = client["wellness_platform"]
112
+
113
+ # Insert sample user if doesn't exist
114
+ if not db.users.find_one({"user_id": "demo_user"}):
115
+ db.users.insert_one(sample_profile)
116
+ print("βœ… Sample user created!")
117
+ else:
118
+ print("ℹ️ Sample user already exists")
119
+ except Exception as e:
120
+ print(f"❌ Failed to create sample user: {e}")
121
+
122
+ def main():
123
+ """Main setup function"""
124
+ print("🌟 Welcome to Wellness Platform Setup!")
125
+ print("=" * 50)
126
+
127
+ # Install dependencies
128
+ install_requirements()
129
+ print()
130
+
131
+ # Create environment file
132
+ create_env_file()
133
+ print()
134
+
135
+ # Setup MongoDB
136
+ if setup_mongodb():
137
+ create_sample_user()
138
+ print()
139
+
140
+ # Setup Ollama
141
+ setup_ollama()
142
+ print()
143
+
144
+ print("πŸŽ‰ Setup Complete!")
145
+ print("=" * 50)
146
+ print("Next steps:")
147
+ print("1. Update your GROQ_API_KEY in the .env file")
148
+ print("2. Make sure MongoDB is running (mongod)")
149
+ print("3. Make sure Ollama is running (ollama serve)")
150
+ print("4. Run the application: streamlit run main_app.py")
151
+ print()
152
+ print("πŸš€ Happy wellness tracking!")
153
+
154
+ if __name__ == "__main__":
155
+ main()