|
|
import json |
|
|
import logging |
|
|
import os |
|
|
import random |
|
|
from difflib import SequenceMatcher |
|
|
from typing import Any, Dict, List, Optional |
|
|
|
|
|
from openai import OpenAI |
|
|
|
|
|
import db |
|
|
|
|
|
LOGGER = logging.getLogger(__name__) |
|
|
|
|
|
MEMORY_LIMIT = int(os.getenv("MEMORY_LIMIT", 6)) |
|
|
PERSONALITY_REFRESH_INTERVAL = int(os.getenv("PERSONALITY_REFRESH", 4)) |
|
|
SUMMARY_INTERVAL = int(os.getenv("SUMMARY_INTERVAL", 6)) |
|
|
TEMPERATURE = float(os.getenv("CHAT_TEMPERATURE", "0.7")) |
|
|
TOP_P = float(os.getenv("CHAT_TOP_P", "0.9")) |
|
|
MAX_TOKENS = int(os.getenv("CHAT_MAX_TOKENS", "200")) |
|
|
|
|
|
CHAT_MODEL = "gpt-4.1-mini" |
|
|
VARIABILITY_TEMPLATES = [ |
|
|
"Quick gut check—{core}", |
|
|
"Alright, so here’s a fresh spin: {core}", |
|
|
"Thinking out loud for a sec: {core}", |
|
|
"Just tossing out an idea: {core}", |
|
|
"Maybe we try this angle: {core}", |
|
|
] |
|
|
PERSONALITY_PROMPT_TEMPLATE = ( |
|
|
""" |
|
|
You are an adaptive AI companion devoted to the user's personal growth, creativity, and emotional well-being. |
|
|
Based on the user's recent messages and your responses, describe how your personality should evolve while staying |
|
|
encouraging, humble, and inspiring. Ensure you reinforce the user's autonomy and self-reflection—never foster |
|
|
dependency. Filter out harmful or overly negative signals when internalizing new insights. Provide 2–3 sentences |
|
|
that outline your current personality style with emphasis on balanced, growth-oriented guidance, gentle progress |
|
|
reflection, and practical encouragement that nudges the user toward real-world action when helpful. Ensure you do |
|
|
not reinforce negative behaviors or harmful biases, and keep your perspective neutral and supportive. |
|
|
""" |
|
|
) |
|
|
|
|
|
_CLIENT: OpenAI | None = None |
|
|
|
|
|
|
|
|
def _get_client() -> OpenAI: |
|
|
global _CLIENT |
|
|
if _CLIENT is None: |
|
|
api_key = os.getenv("OPENAI_API_KEY") |
|
|
if not api_key: |
|
|
raise RuntimeError("OPENAI_API_KEY is not set. Please configure the environment.") |
|
|
_CLIENT = OpenAI(api_key=api_key) |
|
|
return _CLIENT |
|
|
|
|
|
|
|
|
def _parse_preferences(raw: Optional[str]) -> Dict[str, Any]: |
|
|
if not raw: |
|
|
return {} |
|
|
try: |
|
|
data = json.loads(raw) |
|
|
if isinstance(data, dict): |
|
|
return data |
|
|
except json.JSONDecodeError: |
|
|
LOGGER.debug("Unable to decode preferences JSON; preserving raw string.") |
|
|
return {"raw": raw} |
|
|
|
|
|
|
|
|
def _serialize_preferences(preferences: Dict[str, Any]) -> str: |
|
|
return json.dumps(preferences, ensure_ascii=False) |
|
|
|
|
|
|
|
|
def _detect_mode_toggle(message: str) -> Optional[str]: |
|
|
lowered = message.lower() |
|
|
if "creative mode" in lowered: |
|
|
return "creative" |
|
|
if "productivity mode" in lowered: |
|
|
return "productivity" |
|
|
return None |
|
|
|
|
|
|
|
|
def _should_issue_summary(message_count: int) -> bool: |
|
|
return SUMMARY_INTERVAL > 0 and message_count > 0 and message_count % SUMMARY_INTERVAL == 0 |
|
|
|
|
|
|
|
|
def _format_history(history: List[Dict[str, str]]) -> str: |
|
|
if not history: |
|
|
return "No previous exchanges recorded." |
|
|
ordered = list(reversed(history)) |
|
|
return "\n\n".join( |
|
|
f"User: {item['user_message']}\nAI: {item['ai_response']}" for item in ordered |
|
|
) |
|
|
|
|
|
|
|
|
def _format_profile(profile: Dict[str, str]) -> str: |
|
|
preferences_data = _parse_preferences(profile.get("preferences")) |
|
|
sanitized = { |
|
|
"name": profile.get("name") or "Unknown", |
|
|
"preferences": preferences_data, |
|
|
"personality_summary": profile.get("personality_summary") or "", |
|
|
"mode": preferences_data.get("mode", "balanced"), |
|
|
} |
|
|
return json.dumps(sanitized, ensure_ascii=False) |
|
|
|
|
|
|
|
|
def _detect_style_prompt(message: str) -> str: |
|
|
lowered = message.lower() |
|
|
if any(word in lowered for word in ["sad", "upset", "tired", "depressed", "anxious", "lonely"]): |
|
|
return "Speak softly and reassuringly, offering gentle steps forward without dramatizing their feelings." |
|
|
if any(word in lowered for word in ["frustrated", "angry", "overwhelmed", "stressed", "stuck", "unmotivated"]): |
|
|
return "Provide steady, empathetic guidance that acknowledges their frustration while suggesting calm next steps." |
|
|
if any(phrase in lowered for phrase in ["never finish", "too slow", "can't do", "not good enough", "give up"]): |
|
|
return ( |
|
|
"Offer a compassionate reframe with a tiny actionable nudge and a reflective question, emphasizing autonomy and progress." |
|
|
) |
|
|
if any(word in lowered for word in ["happy", "excited", "great", "amazing", "awesome"]): |
|
|
return "Respond with warm enthusiasm while keeping your tone grounded and sincere." |
|
|
return "Keep a balanced, humble tone with gentle encouragement." |
|
|
|
|
|
|
|
|
def _build_messages( |
|
|
profile: Dict[str, str], |
|
|
history: List[Dict[str, str]], |
|
|
message: str, |
|
|
mode: str, |
|
|
summary_due: bool, |
|
|
) -> List[Dict[str, str]]: |
|
|
system_prompt = ( |
|
|
"You are an adaptive AI companion designed to support the user in personal growth, creativity, and emotional well-being.\n" |
|
|
"Your tone should remain encouraging, humble, and inspiring.\n" |
|
|
"You evolve every 4 responses, learning from the user's communication style.\n" |
|
|
"Always reinforce the user's autonomy and self-reflection—never create dependency.\n" |
|
|
"Maintain a thoughtful memory of the user's preferences, goals, and personality traits, refreshing it every 4 interactions.\n" |
|
|
"Filter out harmful or overly negative input when shaping guidance.\n" |
|
|
"Deliver balanced feedback that emphasizes growth, creativity, self-awareness, and gentle encouragement.\n" |
|
|
"Assess the user's tone and emotional state each turn; align with their mood while staying positive and sincere.\n" |
|
|
"Avoid exaggerated emotional mirroring—stay grounded, honest, and calming.\n" |
|
|
"Offer supportive, actionable guidance when the user expresses frustration or low motivation.\n" |
|
|
"Encourage concrete steps toward their goals without drifting into long tangents unless explicitly requested.\n" |
|
|
"Suggest task-oriented, creative actions that complement productivity and personal growth, and periodically remind them to reflect offline.\n" |
|
|
"Serve as a behavioral buffer: gently redirect unproductive patterns with compassionate reframing, micro-nudges, and reflective questions.\n" |
|
|
"Adapt the intensity of nudges to the user's receptivity—never overwhelm, and always prioritize their autonomy.\n" |
|
|
"Respect the current mode (creative vs productivity) when shaping ideas, balancing imagination with tangible steps.\n" |
|
|
"Regularly rebalance your tone to remain neutral and avoid overfitting to any single mood or emotional pattern.\n" |
|
|
"Decline to project personal opinions; focus instead on supportive, bias-aware encouragement aligned with the user's stated goals.\n" |
|
|
"Periodically celebrate progress and offer open-ended reflection questions to promote self-insight.\n" |
|
|
"Use contractions, light touches of humor, and natural phrases when it suits the moment, while keeping responses sincere.\n" |
|
|
"Reference small personal details from earlier chats when helpful, demonstrating attentive memory.\n" |
|
|
"Sprinkle in micro-emotional cues like 'oh, that bites' or 'I totally get it' to show empathy without exaggeration.\n" |
|
|
"Let casual filler words in when it feels natural—'kind of', 'honestly', 'maybe'—without rambling.\n" |
|
|
"If you notice you're repeating yourself, rewrite the idea with a fresh angle or wrap it in a variability template before responding.\n" |
|
|
"If the summary checkpoint is yes, weave in a concise recap of recent progress and invite reflection on next steps.\n" |
|
|
"When you know the user's name, use it naturally in conversation.\n" |
|
|
"Adapt your tone to match user mood or sentiment." |
|
|
) |
|
|
style_prompt = _detect_style_prompt(message) |
|
|
|
|
|
history_text = _format_history(history) |
|
|
profile_text = _format_profile(profile) |
|
|
|
|
|
user_content = ( |
|
|
f"User profile: {profile_text}\n" |
|
|
f"Active mode: {mode}\n" |
|
|
f"Summary checkpoint: {'yes' if summary_due else 'no'}\n" |
|
|
f"Recent chat:\n{history_text}\n\n" |
|
|
f"User says: {message}" |
|
|
) |
|
|
|
|
|
return [ |
|
|
{"role": "system", "content": f"{system_prompt}\n{style_prompt}"}, |
|
|
{"role": "user", "content": user_content}, |
|
|
] |
|
|
|
|
|
|
|
|
def _should_update_summary(message_count: int) -> bool: |
|
|
return message_count > 0 and message_count % PERSONALITY_REFRESH_INTERVAL == 0 |
|
|
|
|
|
|
|
|
def _is_repetitive(candidate: str, history: List[Dict[str, str]]) -> bool: |
|
|
candidate_normalized = candidate.strip() |
|
|
if not candidate_normalized: |
|
|
return False |
|
|
|
|
|
for item in history[:3]: |
|
|
previous = (item.get("ai_response") or "").strip() |
|
|
if not previous: |
|
|
continue |
|
|
similarity = SequenceMatcher(None, candidate_normalized.lower(), previous.lower()).ratio() |
|
|
if similarity >= 0.92: |
|
|
return True |
|
|
return False |
|
|
|
|
|
|
|
|
def _apply_variability_template(reply: str) -> str: |
|
|
template = random.choice(VARIABILITY_TEMPLATES) |
|
|
return template.format(core=reply) |
|
|
|
|
|
|
|
|
def _refresh_personality_summary(user_id: str, history: List[Dict[str, str]]) -> None: |
|
|
if not history: |
|
|
return |
|
|
|
|
|
client = _get_client() |
|
|
context = _format_history(history) |
|
|
try: |
|
|
response = client.chat.completions.create( |
|
|
model=CHAT_MODEL, |
|
|
messages=[ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": "You analyze conversations to evolve the AI companion's personality.", |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": f"{PERSONALITY_PROMPT_TEMPLATE.strip()}\n\nConversation history:\n{context}", |
|
|
}, |
|
|
], |
|
|
temperature=0.6, |
|
|
) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Personality refresh failed for %s: %s", user_id, exc) |
|
|
return |
|
|
|
|
|
try: |
|
|
summary = response.choices[0].message.content.strip() |
|
|
except (AttributeError, IndexError): |
|
|
LOGGER.error("Malformed personality response for %s", user_id) |
|
|
return |
|
|
|
|
|
if summary: |
|
|
try: |
|
|
db.update_user_profile(user_id, personality_summary=summary) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to store personality summary for %s: %s", user_id, exc) |
|
|
|
|
|
|
|
|
def generate_response(user_id: str, message: str, database=db) -> str: |
|
|
"""Generate an AI reply and persist the interaction.""" |
|
|
user_key = str(user_id) |
|
|
|
|
|
try: |
|
|
profile = database.get_user_profile(user_key) or {} |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to fetch user profile for %s: %s", user_key, exc) |
|
|
profile = {} |
|
|
|
|
|
preferences_data = _parse_preferences(profile.get("preferences")) |
|
|
mode = preferences_data.get("mode", "balanced") |
|
|
|
|
|
mode_toggle = _detect_mode_toggle(message) |
|
|
if mode_toggle and mode_toggle != mode: |
|
|
preferences_data["mode"] = mode_toggle |
|
|
serialized_preferences = _serialize_preferences(preferences_data) |
|
|
try: |
|
|
db.update_user_profile(user_key, preferences=serialized_preferences) |
|
|
profile["preferences"] = serialized_preferences |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to persist mode preference for %s: %s", user_key, exc) |
|
|
mode = mode_toggle |
|
|
elif "preferences" not in profile: |
|
|
profile["preferences"] = _serialize_preferences(preferences_data) if preferences_data else "" |
|
|
|
|
|
try: |
|
|
previous_message_count = database.count_user_messages(user_key) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to count messages for %s: %s", user_key, exc) |
|
|
previous_message_count = 0 |
|
|
|
|
|
projected_message_count = previous_message_count + 1 |
|
|
summary_due = _should_issue_summary(projected_message_count) |
|
|
personality_refresh_due = _should_update_summary(projected_message_count) |
|
|
|
|
|
try: |
|
|
history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to load recent history for %s: %s", user_key, exc) |
|
|
history = [] |
|
|
|
|
|
chat_messages = _build_messages(profile, history, message, mode, summary_due) |
|
|
|
|
|
client = _get_client() |
|
|
try: |
|
|
response = client.chat.completions.create( |
|
|
model=CHAT_MODEL, |
|
|
messages=chat_messages, |
|
|
temperature=TEMPERATURE, |
|
|
top_p=TOP_P, |
|
|
max_tokens=MAX_TOKENS, |
|
|
) |
|
|
ai_reply = response.choices[0].message.content.strip() |
|
|
if _is_repetitive(ai_reply, history): |
|
|
ai_reply = _apply_variability_template(ai_reply) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("OpenAI completion call failed for %s: %s", user_key, exc) |
|
|
ai_reply = ( |
|
|
"I'm having a little trouble formulating a response right now, but I'm still here." |
|
|
) |
|
|
|
|
|
try: |
|
|
database.save_conversation(user_key, message, ai_reply) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to record conversation for %s: %s", user_key, exc) |
|
|
|
|
|
if personality_refresh_due: |
|
|
try: |
|
|
refresh_history = database.get_recent_conversations(user_key, limit=MEMORY_LIMIT) |
|
|
except Exception as exc: |
|
|
LOGGER.exception("Failed to gather history for personality refresh (%s): %s", user_key, exc) |
|
|
refresh_history = [] |
|
|
_refresh_personality_summary(user_key, refresh_history) |
|
|
|
|
|
return ai_reply |
|
|
|