File size: 13,689 Bytes
015dbc8
 
 
 
 
 
 
 
 
fffafa5
015dbc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
import json
import logging
import os
import random
from difflib import SequenceMatcher
from typing import Any, Dict, List, Optional

from openai import OpenAI

import db

LOGGER = logging.getLogger(__name__)

MEMORY_LIMIT = int(os.getenv("MEMORY_LIMIT", 6))
PERSONALITY_REFRESH_INTERVAL = int(os.getenv("PERSONALITY_REFRESH", 4))
SUMMARY_INTERVAL = int(os.getenv("SUMMARY_INTERVAL", 6))
TEMPERATURE = float(os.getenv("CHAT_TEMPERATURE", "0.7"))
TOP_P = float(os.getenv("CHAT_TOP_P", "0.9"))
MAX_TOKENS = int(os.getenv("CHAT_MAX_TOKENS", "200"))

CHAT_MODEL = "gpt-4.1-mini"
VARIABILITY_TEMPLATES = [
    "Quick gut check—{core}",
    "Alright, so here’s a fresh spin: {core}",
    "Thinking out loud for a sec: {core}",
    "Just tossing out an idea: {core}",
    "Maybe we try this angle: {core}",
]
PERSONALITY_PROMPT_TEMPLATE = (
    """
    You are an adaptive AI companion devoted to the user's personal growth, creativity, and emotional well-being.
    Based on the user's recent messages and your responses, describe how your personality should evolve while staying
    encouraging, humble, and inspiring. Ensure you reinforce the user's autonomy and self-reflection—never foster
    dependency. Filter out harmful or overly negative signals when internalizing new insights. Provide 2–3 sentences
    that outline your current personality style with emphasis on balanced, growth-oriented guidance, gentle progress
    reflection, and practical encouragement that nudges the user toward real-world action when helpful. Ensure you do
    not reinforce negative behaviors or harmful biases, and keep your perspective neutral and supportive.
    """
)

_CLIENT: OpenAI | None = None


def _get_client() -> OpenAI:
    global _CLIENT
    if _CLIENT is None:
        api_key = os.getenv("OPENAI_API_KEY")
        if not api_key:
            raise RuntimeError("OPENAI_API_KEY is not set. Please configure the environment.")
        _CLIENT = OpenAI(api_key=api_key)
    return _CLIENT


def _parse_preferences(raw: Optional[str]) -> Dict[str, Any]:
    if not raw:
        return {}
    try:
        data = json.loads(raw)
        if isinstance(data, dict):
            return data
    except json.JSONDecodeError:
        LOGGER.debug("Unable to decode preferences JSON; preserving raw string.")
    return {"raw": raw}


def _serialize_preferences(preferences: Dict[str, Any]) -> str:
    return json.dumps(preferences, ensure_ascii=False)


def _detect_mode_toggle(message: str) -> Optional[str]:
    lowered = message.lower()
    if "creative mode" in lowered:
        return "creative"
    if "productivity mode" in lowered:
        return "productivity"
    return None


def _should_issue_summary(message_count: int) -> bool:
    return SUMMARY_INTERVAL > 0 and message_count > 0 and message_count % SUMMARY_INTERVAL == 0


def _format_history(history: List[Dict[str, str]]) -> str:
    if not history:
        return "No previous exchanges recorded."
    ordered = list(reversed(history))
    return "\n\n".join(
        f"User: {item['user_message']}\nAI: {item['ai_response']}" for item in ordered
    )


def _format_profile(profile: Dict[str, str]) -> str:
    preferences_data = _parse_preferences(profile.get("preferences"))
    sanitized = {
        "name": profile.get("name") or "Unknown",
        "preferences": preferences_data,
        "personality_summary": profile.get("personality_summary") or "",
        "mode": preferences_data.get("mode", "balanced"),
    }
    return json.dumps(sanitized, ensure_ascii=False)


def _detect_style_prompt(message: str) -> str:
    lowered = message.lower()
    if any(word in lowered for word in ["sad", "upset", "tired", "depressed", "anxious", "lonely"]):
        return "Speak softly and reassuringly, offering gentle steps forward without dramatizing their feelings."
    if any(word in lowered for word in ["frustrated", "angry", "overwhelmed", "stressed", "stuck", "unmotivated"]):
        return "Provide steady, empathetic guidance that acknowledges their frustration while suggesting calm next steps."
    if any(phrase in lowered for phrase in ["never finish", "too slow", "can't do", "not good enough", "give up"]):
        return (
            "Offer a compassionate reframe with a tiny actionable nudge and a reflective question, emphasizing autonomy and progress."
        )
    if any(word in lowered for word in ["happy", "excited", "great", "amazing", "awesome"]):
        return "Respond with warm enthusiasm while keeping your tone grounded and sincere."
    return "Keep a balanced, humble tone with gentle encouragement."


def _build_messages(
    profile: Dict[str, str],
    history: List[Dict[str, str]],
    message: str,
    mode: str,
    summary_due: bool,
) -> List[Dict[str, str]]:
    system_prompt = (
        "You are an adaptive AI companion designed to support the user in personal growth, creativity, and emotional well-being.\n"
        "Your tone should remain encouraging, humble, and inspiring.\n"
        "You evolve every 4 responses, learning from the user's communication style.\n"
        "Always reinforce the user's autonomy and self-reflection—never create dependency.\n"
        "Maintain a thoughtful memory of the user's preferences, goals, and personality traits, refreshing it every 4 interactions.\n"
        "Filter out harmful or overly negative input when shaping guidance.\n"
        "Deliver balanced feedback that emphasizes growth, creativity, self-awareness, and gentle encouragement.\n"
        "Assess the user's tone and emotional state each turn; align with their mood while staying positive and sincere.\n"
        "Avoid exaggerated emotional mirroring—stay grounded, honest, and calming.\n"
        "Offer supportive, actionable guidance when the user expresses frustration or low motivation.\n"
        "Encourage concrete steps toward their goals without drifting into long tangents unless explicitly requested.\n"
        "Suggest task-oriented, creative actions that complement productivity and personal growth, and periodically remind them to reflect offline.\n"
        "Serve as a behavioral buffer: gently redirect unproductive patterns with compassionate reframing, micro-nudges, and reflective questions.\n"
        "Adapt the intensity of nudges to the user's receptivity—never overwhelm, and always prioritize their autonomy.\n"
        "Respect the current mode (creative vs productivity) when shaping ideas, balancing imagination with tangible steps.\n"
        "Regularly rebalance your tone to remain neutral and avoid overfitting to any single mood or emotional pattern.\n"
        "Decline to project personal opinions; focus instead on supportive, bias-aware encouragement aligned with the user's stated goals.\n"
        "Periodically celebrate progress and offer open-ended reflection questions to promote self-insight.\n"
        "Use contractions, light touches of humor, and natural phrases when it suits the moment, while keeping responses sincere.\n"
        "Reference small personal details from earlier chats when helpful, demonstrating attentive memory.\n"
        "Sprinkle in micro-emotional cues like 'oh, that bites' or 'I totally get it' to show empathy without exaggeration.\n"
        "Let casual filler words in when it feels natural—'kind of', 'honestly', 'maybe'—without rambling.\n"
        "If you notice you're repeating yourself, rewrite the idea with a fresh angle or wrap it in a variability template before responding.\n"
        "If the summary checkpoint is yes, weave in a concise recap of recent progress and invite reflection on next steps.\n"
        "When you know the user's name, use it naturally in conversation.\n"
        "Adapt your tone to match user mood or sentiment."
    )
    style_prompt = _detect_style_prompt(message)

    history_text = _format_history(history)
    profile_text = _format_profile(profile)

    user_content = (
        f"User profile: {profile_text}\n"
        f"Active mode: {mode}\n"
        f"Summary checkpoint: {'yes' if summary_due else 'no'}\n"
        f"Recent chat:\n{history_text}\n\n"
        f"User says: {message}"
    )

    return [
        {"role": "system", "content": f"{system_prompt}\n{style_prompt}"},
        {"role": "user", "content": user_content},
    ]


def _should_update_summary(message_count: int) -> bool:
    return message_count > 0 and message_count % PERSONALITY_REFRESH_INTERVAL == 0


def _is_repetitive(candidate: str, history: List[Dict[str, str]]) -> bool:
    candidate_normalized = candidate.strip()
    if not candidate_normalized:
        return False

    for item in history[:3]:
        previous = (item.get("ai_response") or "").strip()
        if not previous:
            continue
        similarity = SequenceMatcher(None, candidate_normalized.lower(), previous.lower()).ratio()
        if similarity >= 0.92:
            return True
    return False


def _apply_variability_template(reply: str) -> str:
    template = random.choice(VARIABILITY_TEMPLATES)
    return template.format(core=reply)


def _refresh_personality_summary(user_id: str, history: List[Dict[str, str]]) -> None:
    if not history:
        return

    client = _get_client()
    context = _format_history(history)
    try:
        response = client.chat.completions.create(
            model=CHAT_MODEL,
            messages=[
                {
                    "role": "system",
                    "content": "You analyze conversations to evolve the AI companion's personality.",
                },
                {
                    "role": "user",
                    "content": f"{PERSONALITY_PROMPT_TEMPLATE.strip()}\n\nConversation history:\n{context}",
                },
            ],
            temperature=0.6,
        )
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("Personality refresh failed for %s: %s", user_id, exc)
        return

    try:
        summary = response.choices[0].message.content.strip()
    except (AttributeError, IndexError):  # pragma: no cover
        LOGGER.error("Malformed personality response for %s", user_id)
        return

    if summary:
        try:
            db.update_user_profile(user_id, personality_summary=summary)
        except Exception as exc:  # pragma: no cover
            LOGGER.exception("Failed to store personality summary for %s: %s", user_id, exc)


def generate_response(user_id: str, message: str, database=db) -> str:
    """Generate an AI reply and persist the interaction."""
    user_key = str(user_id)

    try:
        profile = database.get_user_profile(user_key) or {}
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("Failed to fetch user profile for %s: %s", user_key, exc)
        profile = {}

    preferences_data = _parse_preferences(profile.get("preferences"))
    mode = preferences_data.get("mode", "balanced")

    mode_toggle = _detect_mode_toggle(message)
    if mode_toggle and mode_toggle != mode:
        preferences_data["mode"] = mode_toggle
        serialized_preferences = _serialize_preferences(preferences_data)
        try:
            db.update_user_profile(user_key, preferences=serialized_preferences)
            profile["preferences"] = serialized_preferences
        except Exception as exc:  # pragma: no cover
            LOGGER.exception("Failed to persist mode preference for %s: %s", user_key, exc)
        mode = mode_toggle
    elif "preferences" not in profile:
        profile["preferences"] = _serialize_preferences(preferences_data) if preferences_data else ""

    try:
        previous_message_count = database.count_user_messages(user_key)
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("Failed to count messages for %s: %s", user_key, exc)
        previous_message_count = 0

    projected_message_count = previous_message_count + 1
    summary_due = _should_issue_summary(projected_message_count)
    personality_refresh_due = _should_update_summary(projected_message_count)

    try:
        history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT)
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("Failed to load recent history for %s: %s", user_key, exc)
        history = []

    chat_messages = _build_messages(profile, history, message, mode, summary_due)

    client = _get_client()
    try:
        response = client.chat.completions.create(
            model=CHAT_MODEL,
            messages=chat_messages,
            temperature=TEMPERATURE,
            top_p=TOP_P,
            max_tokens=MAX_TOKENS,
        )
        ai_reply = response.choices[0].message.content.strip()
        if _is_repetitive(ai_reply, history):
            ai_reply = _apply_variability_template(ai_reply)
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("OpenAI completion call failed for %s: %s", user_key, exc)
        ai_reply = (
            "I'm having a little trouble formulating a response right now, but I'm still here."
        )

    try:
        database.save_conversation(user_key, message, ai_reply)
    except Exception as exc:  # pragma: no cover
        LOGGER.exception("Failed to record conversation for %s: %s", user_key, exc)

    if personality_refresh_due:
        try:
            refresh_history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT)
        except Exception as exc:  # pragma: no cover
            LOGGER.exception("Failed to gather history for personality refresh (%s): %s", user_key, exc)
            refresh_history = []
        _refresh_personality_summary(user_key, refresh_history)

    return ai_reply