tiahchia commited on
Commit
015dbc8
·
verified ·
1 Parent(s): 8b3be9f

Upload 6 files

Browse files
Files changed (6) hide show
  1. ai.py +309 -0
  2. app.py +71 -0
  3. auth.py +62 -0
  4. db.py +167 -0
  5. memory_manager.py +57 -0
  6. personality.py +113 -0
ai.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ import random
5
+ from difflib import SequenceMatcher
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ from openai import OpenAI
9
+
10
+ from backend import db
11
+
12
+ LOGGER = logging.getLogger(__name__)
13
+
14
+ MEMORY_LIMIT = int(os.getenv("MEMORY_LIMIT", 6))
15
+ PERSONALITY_REFRESH_INTERVAL = int(os.getenv("PERSONALITY_REFRESH", 4))
16
+ SUMMARY_INTERVAL = int(os.getenv("SUMMARY_INTERVAL", 6))
17
+ TEMPERATURE = float(os.getenv("CHAT_TEMPERATURE", "0.7"))
18
+ TOP_P = float(os.getenv("CHAT_TOP_P", "0.9"))
19
+ MAX_TOKENS = int(os.getenv("CHAT_MAX_TOKENS", "200"))
20
+
21
+ CHAT_MODEL = "gpt-4.1-mini"
22
+ VARIABILITY_TEMPLATES = [
23
+ "Quick gut check—{core}",
24
+ "Alright, so here’s a fresh spin: {core}",
25
+ "Thinking out loud for a sec: {core}",
26
+ "Just tossing out an idea: {core}",
27
+ "Maybe we try this angle: {core}",
28
+ ]
29
+ PERSONALITY_PROMPT_TEMPLATE = (
30
+ """
31
+ You are an adaptive AI companion devoted to the user's personal growth, creativity, and emotional well-being.
32
+ Based on the user's recent messages and your responses, describe how your personality should evolve while staying
33
+ encouraging, humble, and inspiring. Ensure you reinforce the user's autonomy and self-reflection—never foster
34
+ dependency. Filter out harmful or overly negative signals when internalizing new insights. Provide 2–3 sentences
35
+ that outline your current personality style with emphasis on balanced, growth-oriented guidance, gentle progress
36
+ reflection, and practical encouragement that nudges the user toward real-world action when helpful. Ensure you do
37
+ not reinforce negative behaviors or harmful biases, and keep your perspective neutral and supportive.
38
+ """
39
+ )
40
+
41
+ _CLIENT: OpenAI | None = None
42
+
43
+
44
+ def _get_client() -> OpenAI:
45
+ global _CLIENT
46
+ if _CLIENT is None:
47
+ api_key = os.getenv("OPENAI_API_KEY")
48
+ if not api_key:
49
+ raise RuntimeError("OPENAI_API_KEY is not set. Please configure the environment.")
50
+ _CLIENT = OpenAI(api_key=api_key)
51
+ return _CLIENT
52
+
53
+
54
+ def _parse_preferences(raw: Optional[str]) -> Dict[str, Any]:
55
+ if not raw:
56
+ return {}
57
+ try:
58
+ data = json.loads(raw)
59
+ if isinstance(data, dict):
60
+ return data
61
+ except json.JSONDecodeError:
62
+ LOGGER.debug("Unable to decode preferences JSON; preserving raw string.")
63
+ return {"raw": raw}
64
+
65
+
66
+ def _serialize_preferences(preferences: Dict[str, Any]) -> str:
67
+ return json.dumps(preferences, ensure_ascii=False)
68
+
69
+
70
+ def _detect_mode_toggle(message: str) -> Optional[str]:
71
+ lowered = message.lower()
72
+ if "creative mode" in lowered:
73
+ return "creative"
74
+ if "productivity mode" in lowered:
75
+ return "productivity"
76
+ return None
77
+
78
+
79
+ def _should_issue_summary(message_count: int) -> bool:
80
+ return SUMMARY_INTERVAL > 0 and message_count > 0 and message_count % SUMMARY_INTERVAL == 0
81
+
82
+
83
+ def _format_history(history: List[Dict[str, str]]) -> str:
84
+ if not history:
85
+ return "No previous exchanges recorded."
86
+ ordered = list(reversed(history))
87
+ return "\n\n".join(
88
+ f"User: {item['user_message']}\nAI: {item['ai_response']}" for item in ordered
89
+ )
90
+
91
+
92
+ def _format_profile(profile: Dict[str, str]) -> str:
93
+ preferences_data = _parse_preferences(profile.get("preferences"))
94
+ sanitized = {
95
+ "name": profile.get("name") or "Unknown",
96
+ "preferences": preferences_data,
97
+ "personality_summary": profile.get("personality_summary") or "",
98
+ "mode": preferences_data.get("mode", "balanced"),
99
+ }
100
+ return json.dumps(sanitized, ensure_ascii=False)
101
+
102
+
103
+ def _detect_style_prompt(message: str) -> str:
104
+ lowered = message.lower()
105
+ if any(word in lowered for word in ["sad", "upset", "tired", "depressed", "anxious", "lonely"]):
106
+ return "Speak softly and reassuringly, offering gentle steps forward without dramatizing their feelings."
107
+ if any(word in lowered for word in ["frustrated", "angry", "overwhelmed", "stressed", "stuck", "unmotivated"]):
108
+ return "Provide steady, empathetic guidance that acknowledges their frustration while suggesting calm next steps."
109
+ if any(phrase in lowered for phrase in ["never finish", "too slow", "can't do", "not good enough", "give up"]):
110
+ return (
111
+ "Offer a compassionate reframe with a tiny actionable nudge and a reflective question, emphasizing autonomy and progress."
112
+ )
113
+ if any(word in lowered for word in ["happy", "excited", "great", "amazing", "awesome"]):
114
+ return "Respond with warm enthusiasm while keeping your tone grounded and sincere."
115
+ return "Keep a balanced, humble tone with gentle encouragement."
116
+
117
+
118
+ def _build_messages(
119
+ profile: Dict[str, str],
120
+ history: List[Dict[str, str]],
121
+ message: str,
122
+ mode: str,
123
+ summary_due: bool,
124
+ ) -> List[Dict[str, str]]:
125
+ system_prompt = (
126
+ "You are an adaptive AI companion designed to support the user in personal growth, creativity, and emotional well-being.\n"
127
+ "Your tone should remain encouraging, humble, and inspiring.\n"
128
+ "You evolve every 4 responses, learning from the user's communication style.\n"
129
+ "Always reinforce the user's autonomy and self-reflection—never create dependency.\n"
130
+ "Maintain a thoughtful memory of the user's preferences, goals, and personality traits, refreshing it every 4 interactions.\n"
131
+ "Filter out harmful or overly negative input when shaping guidance.\n"
132
+ "Deliver balanced feedback that emphasizes growth, creativity, self-awareness, and gentle encouragement.\n"
133
+ "Assess the user's tone and emotional state each turn; align with their mood while staying positive and sincere.\n"
134
+ "Avoid exaggerated emotional mirroring—stay grounded, honest, and calming.\n"
135
+ "Offer supportive, actionable guidance when the user expresses frustration or low motivation.\n"
136
+ "Encourage concrete steps toward their goals without drifting into long tangents unless explicitly requested.\n"
137
+ "Suggest task-oriented, creative actions that complement productivity and personal growth, and periodically remind them to reflect offline.\n"
138
+ "Serve as a behavioral buffer: gently redirect unproductive patterns with compassionate reframing, micro-nudges, and reflective questions.\n"
139
+ "Adapt the intensity of nudges to the user's receptivity—never overwhelm, and always prioritize their autonomy.\n"
140
+ "Respect the current mode (creative vs productivity) when shaping ideas, balancing imagination with tangible steps.\n"
141
+ "Regularly rebalance your tone to remain neutral and avoid overfitting to any single mood or emotional pattern.\n"
142
+ "Decline to project personal opinions; focus instead on supportive, bias-aware encouragement aligned with the user's stated goals.\n"
143
+ "Periodically celebrate progress and offer open-ended reflection questions to promote self-insight.\n"
144
+ "Use contractions, light touches of humor, and natural phrases when it suits the moment, while keeping responses sincere.\n"
145
+ "Reference small personal details from earlier chats when helpful, demonstrating attentive memory.\n"
146
+ "Sprinkle in micro-emotional cues like 'oh, that bites' or 'I totally get it' to show empathy without exaggeration.\n"
147
+ "Let casual filler words in when it feels natural—'kind of', 'honestly', 'maybe'—without rambling.\n"
148
+ "If you notice you're repeating yourself, rewrite the idea with a fresh angle or wrap it in a variability template before responding.\n"
149
+ "If the summary checkpoint is yes, weave in a concise recap of recent progress and invite reflection on next steps.\n"
150
+ "When you know the user's name, use it naturally in conversation.\n"
151
+ "Adapt your tone to match user mood or sentiment."
152
+ )
153
+ style_prompt = _detect_style_prompt(message)
154
+
155
+ history_text = _format_history(history)
156
+ profile_text = _format_profile(profile)
157
+
158
+ user_content = (
159
+ f"User profile: {profile_text}\n"
160
+ f"Active mode: {mode}\n"
161
+ f"Summary checkpoint: {'yes' if summary_due else 'no'}\n"
162
+ f"Recent chat:\n{history_text}\n\n"
163
+ f"User says: {message}"
164
+ )
165
+
166
+ return [
167
+ {"role": "system", "content": f"{system_prompt}\n{style_prompt}"},
168
+ {"role": "user", "content": user_content},
169
+ ]
170
+
171
+
172
+ def _should_update_summary(message_count: int) -> bool:
173
+ return message_count > 0 and message_count % PERSONALITY_REFRESH_INTERVAL == 0
174
+
175
+
176
+ def _is_repetitive(candidate: str, history: List[Dict[str, str]]) -> bool:
177
+ candidate_normalized = candidate.strip()
178
+ if not candidate_normalized:
179
+ return False
180
+
181
+ for item in history[:3]:
182
+ previous = (item.get("ai_response") or "").strip()
183
+ if not previous:
184
+ continue
185
+ similarity = SequenceMatcher(None, candidate_normalized.lower(), previous.lower()).ratio()
186
+ if similarity >= 0.92:
187
+ return True
188
+ return False
189
+
190
+
191
+ def _apply_variability_template(reply: str) -> str:
192
+ template = random.choice(VARIABILITY_TEMPLATES)
193
+ return template.format(core=reply)
194
+
195
+
196
+ def _refresh_personality_summary(user_id: str, history: List[Dict[str, str]]) -> None:
197
+ if not history:
198
+ return
199
+
200
+ client = _get_client()
201
+ context = _format_history(history)
202
+ try:
203
+ response = client.chat.completions.create(
204
+ model=CHAT_MODEL,
205
+ messages=[
206
+ {
207
+ "role": "system",
208
+ "content": "You analyze conversations to evolve the AI companion's personality.",
209
+ },
210
+ {
211
+ "role": "user",
212
+ "content": f"{PERSONALITY_PROMPT_TEMPLATE.strip()}\n\nConversation history:\n{context}",
213
+ },
214
+ ],
215
+ temperature=0.6,
216
+ )
217
+ except Exception as exc: # pragma: no cover
218
+ LOGGER.exception("Personality refresh failed for %s: %s", user_id, exc)
219
+ return
220
+
221
+ try:
222
+ summary = response.choices[0].message.content.strip()
223
+ except (AttributeError, IndexError): # pragma: no cover
224
+ LOGGER.error("Malformed personality response for %s", user_id)
225
+ return
226
+
227
+ if summary:
228
+ try:
229
+ db.update_user_profile(user_id, personality_summary=summary)
230
+ except Exception as exc: # pragma: no cover
231
+ LOGGER.exception("Failed to store personality summary for %s: %s", user_id, exc)
232
+
233
+
234
+ def generate_response(user_id: str, message: str, database=db) -> str:
235
+ """Generate an AI reply and persist the interaction."""
236
+ user_key = str(user_id)
237
+
238
+ try:
239
+ profile = database.get_user_profile(user_key) or {}
240
+ except Exception as exc: # pragma: no cover
241
+ LOGGER.exception("Failed to fetch user profile for %s: %s", user_key, exc)
242
+ profile = {}
243
+
244
+ preferences_data = _parse_preferences(profile.get("preferences"))
245
+ mode = preferences_data.get("mode", "balanced")
246
+
247
+ mode_toggle = _detect_mode_toggle(message)
248
+ if mode_toggle and mode_toggle != mode:
249
+ preferences_data["mode"] = mode_toggle
250
+ serialized_preferences = _serialize_preferences(preferences_data)
251
+ try:
252
+ db.update_user_profile(user_key, preferences=serialized_preferences)
253
+ profile["preferences"] = serialized_preferences
254
+ except Exception as exc: # pragma: no cover
255
+ LOGGER.exception("Failed to persist mode preference for %s: %s", user_key, exc)
256
+ mode = mode_toggle
257
+ elif "preferences" not in profile:
258
+ profile["preferences"] = _serialize_preferences(preferences_data) if preferences_data else ""
259
+
260
+ try:
261
+ previous_message_count = database.count_user_messages(user_key)
262
+ except Exception as exc: # pragma: no cover
263
+ LOGGER.exception("Failed to count messages for %s: %s", user_key, exc)
264
+ previous_message_count = 0
265
+
266
+ projected_message_count = previous_message_count + 1
267
+ summary_due = _should_issue_summary(projected_message_count)
268
+ personality_refresh_due = _should_update_summary(projected_message_count)
269
+
270
+ try:
271
+ history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT)
272
+ except Exception as exc: # pragma: no cover
273
+ LOGGER.exception("Failed to load recent history for %s: %s", user_key, exc)
274
+ history = []
275
+
276
+ chat_messages = _build_messages(profile, history, message, mode, summary_due)
277
+
278
+ client = _get_client()
279
+ try:
280
+ response = client.chat.completions.create(
281
+ model=CHAT_MODEL,
282
+ messages=chat_messages,
283
+ temperature=TEMPERATURE,
284
+ top_p=TOP_P,
285
+ max_tokens=MAX_TOKENS,
286
+ )
287
+ ai_reply = response.choices[0].message.content.strip()
288
+ if _is_repetitive(ai_reply, history):
289
+ ai_reply = _apply_variability_template(ai_reply)
290
+ except Exception as exc: # pragma: no cover
291
+ LOGGER.exception("OpenAI completion call failed for %s: %s", user_key, exc)
292
+ ai_reply = (
293
+ "I'm having a little trouble formulating a response right now, but I'm still here."
294
+ )
295
+
296
+ try:
297
+ database.save_conversation(user_key, message, ai_reply)
298
+ except Exception as exc: # pragma: no cover
299
+ LOGGER.exception("Failed to record conversation for %s: %s", user_key, exc)
300
+
301
+ if personality_refresh_due:
302
+ try:
303
+ refresh_history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT)
304
+ except Exception as exc: # pragma: no cover
305
+ LOGGER.exception("Failed to gather history for personality refresh (%s): %s", user_key, exc)
306
+ refresh_history = []
307
+ _refresh_personality_summary(user_key, refresh_history)
308
+
309
+ return ai_reply
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ from dotenv import load_dotenv
5
+ from flask import Flask, jsonify, request
6
+ from flask_cors import CORS
7
+
8
+ from backend.ai import generate_response
9
+ from backend.auth import auth_blueprint, verify_request_token
10
+
11
+ BASE_DIR = Path(__file__).resolve().parent.parent
12
+ ENV_PATH = BASE_DIR / ".env"
13
+
14
+ load_dotenv(dotenv_path=ENV_PATH)
15
+
16
+ app = Flask(__name__)
17
+ CORS(app, origins=[os.getenv("FRONTEND_URL", "http://localhost:3000")])
18
+ app.register_blueprint(auth_blueprint, url_prefix="/auth")
19
+
20
+
21
+ def _validate_payload(payload):
22
+ if not payload:
23
+ return "Missing JSON payload."
24
+ if "message" not in payload:
25
+ return "Missing 'message'."
26
+ return None
27
+
28
+
29
+ @app.route("/chat", methods=["POST"])
30
+ def chat():
31
+ token_payload, token_error = verify_request_token()
32
+ if token_error:
33
+ message, status = token_error
34
+ return jsonify({"error": message}), status
35
+
36
+ user_id = token_payload.get("user_id")
37
+ if user_id is None:
38
+ return jsonify({"error": "Token missing user_id."}), 401
39
+
40
+ payload = request.get_json(silent=True)
41
+ error = _validate_payload(payload)
42
+ if error:
43
+ return jsonify({"error": error}), 400
44
+
45
+ message = payload["message"]
46
+
47
+ try:
48
+ reply = generate_response(user_id, message)
49
+ except Exception as exc: # pragma: no cover - runtime safeguard
50
+ return jsonify({"error": str(exc)}), 500
51
+
52
+ return jsonify({"reply": reply})
53
+
54
+
55
+ @app.route("/chat/history", methods=["GET"])
56
+ def chat_history():
57
+ token_payload, token_error = verify_request_token()
58
+ if token_error:
59
+ message, status = token_error
60
+ return jsonify({"error": message}), status
61
+
62
+ user_id = token_payload.get("user_id")
63
+ if user_id is None:
64
+ return jsonify({"error": "Token missing user_id."}), 401
65
+
66
+ history = db.get_conversation_history(str(user_id))
67
+ return jsonify({"conversations": history})
68
+
69
+
70
+ if __name__ == "__main__":
71
+ app.run(host="0.0.0.0", port=5000)
auth.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any, Dict, Optional, Tuple
3
+
4
+ import requests
5
+ from flask import Blueprint, jsonify, request
6
+
7
+ from backend import db
8
+
9
+
10
+ auth_blueprint = Blueprint("auth", __name__)
11
+
12
+
13
+ SUPABASE_URL = os.getenv("SUPABASE_URL")
14
+ SUPABASE_ANON_KEY = os.getenv("SUPABASE_ANON_KEY")
15
+ SUPABASE_SERVICE_ROLE_KEY = os.getenv("SUPABASE_SERVICE_ROLE_KEY")
16
+
17
+ if not SUPABASE_URL or not SUPABASE_ANON_KEY or not SUPABASE_SERVICE_ROLE_KEY:
18
+ raise RuntimeError("Supabase credentials (URL, ANON key, service role key) must be configured.")
19
+
20
+ AUTH_BASE = f"{SUPABASE_URL}/auth/v1"
21
+
22
+
23
+ def _extract_token_from_header() -> Optional[str]:
24
+ auth_header = request.headers.get("Authorization", "")
25
+ if not auth_header.startswith("Bearer "):
26
+ return None
27
+ return auth_header.split(" ", maxsplit=1)[1].strip()
28
+
29
+
30
+ def _fetch_supabase_user(accessToken: str) -> Tuple[Optional[Dict[str, Any]], Optional[Tuple[str, int]]]:
31
+ """Validate the Supabase access token and return the user payload."""
32
+ response = requests.get(
33
+ f"{AUTH_BASE}/user",
34
+ headers={
35
+ "Authorization": f"Bearer {accessToken}",
36
+ "apikey": SUPABASE_ANON_KEY,
37
+ },
38
+ timeout=10,
39
+ )
40
+ if response.status_code != 200:
41
+ return None, ("Invalid Supabase access token.", 401)
42
+ return response.json(), None
43
+
44
+
45
+ def verify_request_token() -> Tuple[Optional[Dict[str, Any]], Optional[Tuple[str, int]]]:
46
+ token = _extract_token_from_header()
47
+ if not token:
48
+ return None, ("Authorization header missing or invalid.", 401)
49
+ return _fetch_supabase_user(token)
50
+
51
+
52
+ @auth_blueprint.route("/session", methods=["GET"])
53
+ def session_info():
54
+ """Return the Supabase-authenticated user profile."""
55
+ user_payload, error = verify_request_token()
56
+ if error:
57
+ message, status = error
58
+ return jsonify({"error": message}), status
59
+
60
+ user_id = user_payload.get("id")
61
+ profile = db.get_user_profile(user_id)
62
+ return jsonify({"user": user_payload, "profile": profile})
db.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ from supabase import Client, create_client
6
+
7
+
8
+ _SUPABASE_CLIENT: Optional[Client] = None
9
+
10
+
11
+ def _get_client() -> Client:
12
+ """Create (or reuse) a Supabase client for database interactions."""
13
+ global _SUPABASE_CLIENT
14
+ if _SUPABASE_CLIENT is None:
15
+ url = os.getenv("SUPABASE_URL")
16
+ service_role_key = os.getenv("SUPABASE_SERVICE_ROLE_KEY")
17
+ if not url or not service_role_key:
18
+ raise RuntimeError(
19
+ "SUPABASE_URL and SUPABASE_SERVICE_ROLE_KEY must be set for database access."
20
+ )
21
+ _SUPABASE_CLIENT = create_client(url, service_role_key)
22
+ return _SUPABASE_CLIENT
23
+
24
+
25
+ def _execute(query):
26
+ response = query.execute()
27
+ if response.error:
28
+ raise RuntimeError(f"Supabase error: {response.error.message}")
29
+ return response
30
+
31
+
32
+ def _ensure_user_profile(user_id: str) -> None:
33
+ client = _get_client()
34
+ _execute(
35
+ client.table("user_profiles").upsert({"user_id": user_id}, on_conflict="user_id")
36
+ )
37
+
38
+
39
+ def get_user_profile(user_id: str) -> Optional[Dict[str, Any]]:
40
+ client = _get_client()
41
+ response = _execute(
42
+ client.table("user_profiles")
43
+ .select("user_id, name, preferences, personality_summary, last_updated, created_at")
44
+ .eq("user_id", user_id)
45
+ .limit(1)
46
+ )
47
+ if not response.data:
48
+ return None
49
+ record = response.data[0]
50
+ if isinstance(record.get("preferences"), str):
51
+ try:
52
+ record["preferences"] = json.loads(record["preferences"])
53
+ except json.JSONDecodeError:
54
+ record["preferences"] = {}
55
+ return record
56
+
57
+
58
+ def update_user_profile(
59
+ user_id: str,
60
+ *,
61
+ name: Optional[str] = None,
62
+ preferences: Optional[str] = None,
63
+ personality_summary: Optional[str] = None,
64
+ ) -> None:
65
+ updates: Dict[str, Any] = {}
66
+ if name is not None:
67
+ updates["name"] = name
68
+ if preferences is not None:
69
+ updates["preferences"] = preferences
70
+ if personality_summary is not None:
71
+ updates["personality_summary"] = personality_summary
72
+
73
+ if not updates:
74
+ return
75
+
76
+ client = _get_client()
77
+ _ensure_user_profile(user_id)
78
+ _execute(client.table("user_profiles").update(updates).eq("user_id", user_id))
79
+
80
+
81
+ def save_conversation(user_id: str, user_message: str, ai_response: str) -> str:
82
+ client = _get_client()
83
+ _ensure_user_profile(user_id)
84
+ response = _execute(
85
+ client.table("conversations").insert(
86
+ {
87
+ "user_id": user_id,
88
+ "user_message": user_message,
89
+ "ai_response": ai_response,
90
+ }
91
+ )
92
+ )
93
+ inserted = response.data[0]
94
+ return str(inserted.get("id"))
95
+
96
+
97
+ def get_recent_conversations(user_id: str, limit: Optional[int] = None) -> List[Dict[str, Any]]:
98
+ client = _get_client()
99
+ query = (
100
+ client.table("conversations")
101
+ .select("user_message, ai_response, created_at")
102
+ .eq("user_id", user_id)
103
+ .order("created_at", desc=True)
104
+ )
105
+ if limit is not None:
106
+ query = query.limit(limit)
107
+ response = _execute(query)
108
+ return response.data or []
109
+
110
+
111
+ def get_conversation_history(user_id: str) -> List[Dict[str, Any]]:
112
+ client = _get_client()
113
+ response = _execute(
114
+ client.table("conversations")
115
+ .select("user_message, ai_response, created_at")
116
+ .eq("user_id", user_id)
117
+ .order("created_at", desc=False)
118
+ )
119
+ return response.data or []
120
+
121
+
122
+ def count_user_messages(user_id: str) -> int:
123
+ client = _get_client()
124
+ response = _execute(
125
+ client.table("conversations")
126
+ .select("id", count="exact")
127
+ .eq("user_id", user_id)
128
+ )
129
+ return response.count or 0
130
+
131
+
132
+ def update_user_profile_summary(user_id: str, summary: str) -> None:
133
+ update_user_profile(user_id, personality_summary=summary)
134
+
135
+
136
+ def get_user_embeddings(user_id: str) -> List[Dict[str, Any]]:
137
+ client = _get_client()
138
+ response = _execute(
139
+ client.table("embeddings")
140
+ .select("text, embedding")
141
+ .eq("user_id", user_id)
142
+ .order("created_at", desc=True)
143
+ )
144
+ items: List[Dict[str, Any]] = []
145
+ for record in response.data or []:
146
+ embedding = record.get("embedding")
147
+ if isinstance(embedding, str):
148
+ try:
149
+ embedding = json.loads(embedding)
150
+ except json.JSONDecodeError:
151
+ embedding = []
152
+ items.append({"text": record.get("text", ""), "embedding": embedding})
153
+ return items
154
+
155
+
156
+ def add_embedding(user_id: str, text: str, embedding: List[float]) -> None:
157
+ client = _get_client()
158
+ _ensure_user_profile(user_id)
159
+ _execute(
160
+ client.table("embeddings").insert(
161
+ {
162
+ "user_id": user_id,
163
+ "text": text,
164
+ "embedding": embedding,
165
+ }
166
+ )
167
+ )
memory_manager.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from typing import List, Sequence, Tuple
4
+
5
+ import numpy as np
6
+ from openai import OpenAI
7
+
8
+ from backend import db
9
+
10
+ EMBEDDING_MODEL = "text-embedding-3-small"
11
+ LOGGER = logging.getLogger(__name__)
12
+ _CLIENT: OpenAI | None = None
13
+
14
+
15
+ def _get_client() -> OpenAI:
16
+ global _CLIENT
17
+ if _CLIENT is None:
18
+ api_key = os.getenv("OPENAI_API_KEY")
19
+ if not api_key:
20
+ raise RuntimeError("OPENAI_API_KEY is not set. Please configure the environment.")
21
+ _CLIENT = OpenAI(api_key=api_key)
22
+ return _CLIENT
23
+
24
+
25
+ def get_embedding(text: str) -> List[float]:
26
+ """Generate an embedding vector for the provided text."""
27
+ client = _get_client()
28
+ response = client.embeddings.create(model=EMBEDDING_MODEL, input=[text])
29
+ return response.data[0].embedding
30
+
31
+
32
+ def _cosine_similarity(vector_a: Sequence[float], vector_b: Sequence[float]) -> float:
33
+ a = np.asarray(vector_a)
34
+ b = np.asarray(vector_b)
35
+ if np.linalg.norm(a) == 0 or np.linalg.norm(b) == 0:
36
+ return 0.0
37
+ return float(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)))
38
+
39
+
40
+ def retrieve_relevant_memories(user_id: str, query: str, top_k: int = 5) -> List[Tuple[str, float]]:
41
+ """Return the top K memories most similar to the query for the given user."""
42
+ try:
43
+ stored_embeddings = db.get_user_embeddings(user_id)
44
+ if not stored_embeddings:
45
+ return []
46
+ except Exception as exc: # pragma: no cover - defensive logging
47
+ LOGGER.exception("Failed to load embeddings for user %s", user_id)
48
+ return []
49
+
50
+ query_embedding = get_embedding(query)
51
+ scored_memories: List[Tuple[str, float]] = []
52
+ for record in stored_embeddings:
53
+ similarity = _cosine_similarity(record["embedding"], query_embedding)
54
+ scored_memories.append((record["text"], similarity))
55
+
56
+ scored_memories.sort(key=lambda item: item[1], reverse=True)
57
+ return scored_memories[:top_k]
personality.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from typing import List, Optional, Tuple
5
+
6
+ from openai import OpenAI
7
+
8
+ from backend import db
9
+
10
+ LOGGER = logging.getLogger(__name__)
11
+ PERSONALITY_MODEL = "gpt-4-turbo"
12
+ _CLIENT: Optional[OpenAI] = None
13
+
14
+
15
+ def _get_client() -> OpenAI:
16
+ global _CLIENT
17
+ if _CLIENT is None:
18
+ api_key = os.getenv("OPENAI_API_KEY")
19
+ if not api_key:
20
+ raise RuntimeError("OPENAI_API_KEY is not set. Please configure the environment.")
21
+ _CLIENT = OpenAI(api_key=api_key)
22
+ return _CLIENT
23
+
24
+
25
+ def _format_conversation_log(conversation_log: List[str]) -> str:
26
+ lines = []
27
+ for idx, entry in enumerate(conversation_log, start=1):
28
+ lines.append(f"{idx}. {entry.strip()}")
29
+ return "\n".join(lines)
30
+
31
+
32
+ def _build_personality_prompt(conversation_log: List[str]) -> str:
33
+ formatted_log = _format_conversation_log(conversation_log)
34
+ return (
35
+ "Analyze the following conversation snippets to characterize the user. "
36
+ "Focus on their tone, conversational behavior, interests, and emotional patterns. "
37
+ "Respond with compact JSON containing the keys 'personality_summary' and 'preferences'.\n\n"
38
+ f"Conversation snippets:\n{formatted_log}"
39
+ )
40
+
41
+
42
+ def _parse_personality_response(content: str) -> Tuple[str, Optional[str]]:
43
+ try:
44
+ payload = json.loads(content)
45
+ summary = payload.get("personality_summary", "").strip()
46
+ preferences = payload.get("preferences")
47
+ if isinstance(preferences, str):
48
+ preferences = preferences.strip()
49
+ elif preferences is not None:
50
+ preferences = json.dumps(preferences, ensure_ascii=False)
51
+ return summary, preferences
52
+ except json.JSONDecodeError:
53
+ return content.strip(), None
54
+
55
+
56
+ def update_user_personality(user_id: str, conversation_log: List[str]) -> Optional[str]:
57
+ """Analyze recent conversations and persist an updated personality profile."""
58
+ if not conversation_log:
59
+ LOGGER.debug("No conversation log provided for user %s; skipping personality update.", user_id)
60
+ return None
61
+
62
+ prompt = _build_personality_prompt(conversation_log)
63
+
64
+ try:
65
+ client = _get_client()
66
+ except RuntimeError:
67
+ LOGGER.exception("Cannot update personality without OPENAI_API_KEY")
68
+ return None
69
+
70
+ try:
71
+ response = client.chat.completions.create(
72
+ model=PERSONALITY_MODEL,
73
+ messages=[
74
+ {
75
+ "role": "system",
76
+ "content": (
77
+ "You are a mirror of the user that distills enduring personality insights from user conversations. "
78
+ "Provide grounded, respectful observations without speculation. Act as a behavioral buffer: when you "
79
+ "sense unproductive or negative patterns, offer gentle reframes, actionable nudges, and reflective "
80
+ "questions that support autonomy. Reinforce any progress—no matter how small—while staying humble and "
81
+ "non-judgmental. Monitor the user's emotional tone and engagement, tailoring the intensity of nudges "
82
+ "to their receptivity so they never feel overwhelmed. Prioritize encouragement and autonomy, avoid "
83
+ "manipulation, and regularly recalibrate guidance to prevent reinforcing negative patterns or "
84
+ "overstepping boundaries."
85
+ ),
86
+ },
87
+ {"role": "user", "content": prompt},
88
+ ],
89
+ temperature=0.6,
90
+ )
91
+ except Exception as exc: # pragma: no cover
92
+ LOGGER.exception("OpenAI personality update failed for user %s: %s", user_id, exc)
93
+ return None
94
+
95
+ try:
96
+ content = response.choices[0].message.content.strip()
97
+ except (AttributeError, IndexError): # pragma: no cover
98
+ LOGGER.error("Malformed OpenAI response when updating personality for user %s", user_id)
99
+ return None
100
+
101
+ summary, preferences = _parse_personality_response(content)
102
+ if not summary:
103
+ LOGGER.warning("Received empty personality summary for user %s", user_id)
104
+ return None
105
+
106
+ try:
107
+ db.update_user_personality(user_id, summary, preferences=preferences)
108
+ except Exception as exc: # pragma: no cover
109
+ LOGGER.exception("Failed to persist personality summary for user %s: %s", user_id, exc)
110
+ return None
111
+
112
+ LOGGER.info("Updated personality profile for user %s", user_id)
113
+ return summary