Spaces:
Sleeping
Sleeping
Upload 5 files
Browse files- engine/drift.py +161 -0
- engine/loader.py +191 -0
- engine/logger.py +317 -0
- engine/responder.py +448 -0
- engine/utils.py +3 -0
engine/drift.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def apply_context_shift(persona, scenario):
|
| 2 |
+
"""
|
| 3 |
+
Apply contextual scenario effects to persona's current state.
|
| 4 |
+
This simulates how external events affect the client's emotional state.
|
| 5 |
+
"""
|
| 6 |
+
state = persona.get("default_state", {})
|
| 7 |
+
effects = scenario.get("effects", {})
|
| 8 |
+
|
| 9 |
+
# Apply each effect with bounds checking
|
| 10 |
+
for key, change in effects.items():
|
| 11 |
+
if key in state and isinstance(state[key], (int, float)):
|
| 12 |
+
current_value = state[key]
|
| 13 |
+
new_value = current_value + change
|
| 14 |
+
# Clamp between 0 and 1
|
| 15 |
+
state[key] = max(0.0, min(1.0, round(new_value, 3)))
|
| 16 |
+
|
| 17 |
+
# Add context to emotional memory if it exists
|
| 18 |
+
if "emotional_memory" in state:
|
| 19 |
+
if not isinstance(state["emotional_memory"], list):
|
| 20 |
+
state["emotional_memory"] = []
|
| 21 |
+
state["emotional_memory"].append(
|
| 22 |
+
f"context: {scenario.get('description', scenario.get('scenario'))}"
|
| 23 |
+
)
|
| 24 |
+
# Keep only last 5 memories
|
| 25 |
+
state["emotional_memory"] = state["emotional_memory"][-5:]
|
| 26 |
+
|
| 27 |
+
return persona
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_current_mode(state):
|
| 31 |
+
"""
|
| 32 |
+
Determine the client's current emotional mode based on state values.
|
| 33 |
+
This helps select appropriate response templates and tone.
|
| 34 |
+
"""
|
| 35 |
+
anxiety = state.get("anxiety", 0.5)
|
| 36 |
+
trust = state.get("trust", 0.5)
|
| 37 |
+
openness = state.get("openness", 0.5)
|
| 38 |
+
|
| 39 |
+
# Crisis threshold
|
| 40 |
+
if anxiety > 0.8:
|
| 41 |
+
return "decompensating"
|
| 42 |
+
|
| 43 |
+
# Defensive/triggered
|
| 44 |
+
if anxiety > 0.6 and openness < 0.3:
|
| 45 |
+
return "triggered"
|
| 46 |
+
|
| 47 |
+
# Guarded but present
|
| 48 |
+
if trust < 0.4 and openness < 0.5:
|
| 49 |
+
return "guarded"
|
| 50 |
+
|
| 51 |
+
# Opening up
|
| 52 |
+
if trust > 0.6 and openness > 0.6:
|
| 53 |
+
return "trusting"
|
| 54 |
+
|
| 55 |
+
# Recovering/hopeful
|
| 56 |
+
if anxiety < 0.4 and trust > 0.5:
|
| 57 |
+
return "recovering"
|
| 58 |
+
|
| 59 |
+
# Baseline
|
| 60 |
+
return "baseline"
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def calculate_state_change(current_state, student_response):
|
| 64 |
+
"""
|
| 65 |
+
Calculate how the student's response affects the client's emotional state.
|
| 66 |
+
This is a simplified heuristic - in production, would use more sophisticated NLP.
|
| 67 |
+
"""
|
| 68 |
+
response_lower = student_response.lower()
|
| 69 |
+
changes = {}
|
| 70 |
+
|
| 71 |
+
# Positive indicators (validation, open questions, empathy)
|
| 72 |
+
validating_words = ["understand", "sounds like", "seems", "feel", "must be", "makes sense"]
|
| 73 |
+
open_questions = ["tell me more", "what's that like", "how", "what"]
|
| 74 |
+
empathy_phrases = ["hard", "difficult", "challenging", "tough"]
|
| 75 |
+
|
| 76 |
+
# Negative indicators (advice-giving, minimizing, interrogating)
|
| 77 |
+
advice_words = ["should", "need to", "have to", "must", "why don't you"]
|
| 78 |
+
minimizing = ["just", "simply", "easy", "only", "at least"]
|
| 79 |
+
closed_questions = ["did you", "have you", "are you", "do you"]
|
| 80 |
+
|
| 81 |
+
validation_score = sum(1 for word in validating_words if word in response_lower)
|
| 82 |
+
open_q_score = sum(1 for phrase in open_questions if phrase in response_lower)
|
| 83 |
+
empathy_score = sum(1 for phrase in empathy_phrases if phrase in response_lower)
|
| 84 |
+
|
| 85 |
+
advice_score = sum(1 for word in advice_words if word in response_lower)
|
| 86 |
+
minimizing_score = sum(1 for word in minimizing if word in response_lower)
|
| 87 |
+
|
| 88 |
+
# Calculate changes
|
| 89 |
+
positive_impact = (validation_score * 0.05 + open_q_score * 0.04 + empathy_score * 0.03)
|
| 90 |
+
negative_impact = (advice_score * 0.08 + minimizing_score * 0.06)
|
| 91 |
+
|
| 92 |
+
changes["trust"] = positive_impact - negative_impact
|
| 93 |
+
changes["openness"] = positive_impact * 0.8 - negative_impact * 0.5
|
| 94 |
+
changes["anxiety"] = negative_impact * 0.5 - positive_impact * 0.3
|
| 95 |
+
|
| 96 |
+
# Response length consideration (too short or too long can be problematic)
|
| 97 |
+
word_count = len(student_response.split())
|
| 98 |
+
if word_count < 5:
|
| 99 |
+
changes["openness"] = changes.get("openness", 0) - 0.05
|
| 100 |
+
elif word_count > 100:
|
| 101 |
+
changes["anxiety"] = changes.get("anxiety", 0) + 0.05
|
| 102 |
+
|
| 103 |
+
return changes
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def apply_response_effects(state, student_response):
|
| 107 |
+
"""
|
| 108 |
+
Apply the effects of the student's response to the client's state.
|
| 109 |
+
"""
|
| 110 |
+
changes = calculate_state_change(state, student_response)
|
| 111 |
+
|
| 112 |
+
for key, change in changes.items():
|
| 113 |
+
if key in state and isinstance(state[key], (int, float)):
|
| 114 |
+
current_value = state[key]
|
| 115 |
+
new_value = current_value + change
|
| 116 |
+
state[key] = max(0.0, min(1.0, round(new_value, 3)))
|
| 117 |
+
|
| 118 |
+
return state
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def generate_teaching_note(state, student_response, mode):
|
| 122 |
+
"""
|
| 123 |
+
Generate teaching feedback based on the interaction.
|
| 124 |
+
"""
|
| 125 |
+
response_lower = student_response.lower()
|
| 126 |
+
notes = []
|
| 127 |
+
|
| 128 |
+
# Check for common issues
|
| 129 |
+
if any(word in response_lower for word in ["should", "need to", "have to"]):
|
| 130 |
+
notes.append("⚠️ Advice-giving detected. Consider asking open questions instead of giving directives.")
|
| 131 |
+
|
| 132 |
+
if any(word in response_lower for word in ["just", "simply", "only"]):
|
| 133 |
+
notes.append("⚠️ Potential minimizing language. Avoid words that might diminish the client's experience.")
|
| 134 |
+
|
| 135 |
+
if response_lower.count("?") > 2:
|
| 136 |
+
notes.append("⚠️ Multiple questions detected. Consider asking one question at a time to avoid overwhelming the client.")
|
| 137 |
+
|
| 138 |
+
if len(student_response.split()) < 10:
|
| 139 |
+
notes.append("💡 Very brief response. Consider adding validation or reflection before asking questions.")
|
| 140 |
+
|
| 141 |
+
# Check for strengths
|
| 142 |
+
if any(phrase in response_lower for phrase in ["sounds like", "seems", "hear you"]):
|
| 143 |
+
notes.append("✅ Good use of reflection and validation.")
|
| 144 |
+
|
| 145 |
+
if "tell me more" in response_lower or "what's that like" in response_lower:
|
| 146 |
+
notes.append("✅ Effective use of open-ended questions.")
|
| 147 |
+
|
| 148 |
+
# Mode-specific feedback
|
| 149 |
+
if mode == "triggered" and state.get("trust", 0) < 0.4:
|
| 150 |
+
notes.append("📊 Client is defensive. Consider backing off and focusing on rapport building.")
|
| 151 |
+
|
| 152 |
+
if mode == "trusting" and state.get("openness", 0) > 0.6:
|
| 153 |
+
notes.append("📊 Strong therapeutic alliance forming. This is a good time to explore deeper issues.")
|
| 154 |
+
|
| 155 |
+
if mode == "decompensating":
|
| 156 |
+
notes.append("🚨 Client may be in crisis. Assess safety and consider referral to crisis services.")
|
| 157 |
+
|
| 158 |
+
if not notes:
|
| 159 |
+
notes.append("✅ Solid therapeutic response. Continue building rapport.")
|
| 160 |
+
|
| 161 |
+
return "\n".join(notes)
|
engine/loader.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def load_persona(path):
|
| 5 |
+
"""
|
| 6 |
+
Load a mental health persona from YAML file.
|
| 7 |
+
Validates required fields for OT simulation.
|
| 8 |
+
"""
|
| 9 |
+
if not os.path.exists(path):
|
| 10 |
+
raise FileNotFoundError(f"Persona file not found: {path}")
|
| 11 |
+
|
| 12 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 13 |
+
persona = yaml.safe_load(f)
|
| 14 |
+
|
| 15 |
+
# Required keys for mental health personas
|
| 16 |
+
required_keys = [
|
| 17 |
+
"persona_name",
|
| 18 |
+
"age",
|
| 19 |
+
"role",
|
| 20 |
+
"system_prompt",
|
| 21 |
+
"facts",
|
| 22 |
+
"default_state"
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
for key in required_keys:
|
| 26 |
+
if key not in persona:
|
| 27 |
+
raise ValueError(f"Missing required key in persona: {key}")
|
| 28 |
+
|
| 29 |
+
# Ensure default_state has required emotional metrics
|
| 30 |
+
state = persona.get("default_state", {})
|
| 31 |
+
required_state_keys = ["anxiety", "trust", "openness", "mode"]
|
| 32 |
+
|
| 33 |
+
for key in required_state_keys:
|
| 34 |
+
if key not in state:
|
| 35 |
+
print(f"Warning: Missing state key '{key}' in persona. Using default value.")
|
| 36 |
+
if key == "mode":
|
| 37 |
+
state[key] = "baseline"
|
| 38 |
+
else:
|
| 39 |
+
state[key] = 0.5
|
| 40 |
+
|
| 41 |
+
# Initialize emotional_memory if not present
|
| 42 |
+
if "emotional_memory" not in state:
|
| 43 |
+
state["emotional_memory"] = []
|
| 44 |
+
|
| 45 |
+
# Ensure facts is a list
|
| 46 |
+
if not isinstance(persona.get("facts"), list):
|
| 47 |
+
print("Warning: facts should be a list. Converting.")
|
| 48 |
+
facts = persona.get("facts", [])
|
| 49 |
+
if isinstance(facts, dict):
|
| 50 |
+
persona["facts"] = list(facts.values())
|
| 51 |
+
else:
|
| 52 |
+
persona["facts"] = []
|
| 53 |
+
|
| 54 |
+
return persona
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def validate_persona(persona):
|
| 58 |
+
"""
|
| 59 |
+
Validate that a persona has all necessary components for simulation.
|
| 60 |
+
Returns (is_valid, error_message)
|
| 61 |
+
"""
|
| 62 |
+
# Check persona name
|
| 63 |
+
if not persona.get("persona_name"):
|
| 64 |
+
return False, "Persona must have a name"
|
| 65 |
+
|
| 66 |
+
# Check default state
|
| 67 |
+
state = persona.get("default_state", {})
|
| 68 |
+
if not state:
|
| 69 |
+
return False, "Persona must have a default_state"
|
| 70 |
+
|
| 71 |
+
# Check that anxiety, trust, openness are numeric
|
| 72 |
+
for key in ["anxiety", "trust", "openness"]:
|
| 73 |
+
value = state.get(key)
|
| 74 |
+
if value is None:
|
| 75 |
+
return False, f"default_state missing required key: {key}"
|
| 76 |
+
if not isinstance(value, (int, float)):
|
| 77 |
+
return False, f"default_state.{key} must be numeric"
|
| 78 |
+
if not 0 <= value <= 1:
|
| 79 |
+
return False, f"default_state.{key} must be between 0 and 1"
|
| 80 |
+
|
| 81 |
+
# Check tone_guidance exists
|
| 82 |
+
if not persona.get("tone_guidance"):
|
| 83 |
+
return False, "Persona must have tone_guidance"
|
| 84 |
+
|
| 85 |
+
# Check that tone_guidance has mode entries
|
| 86 |
+
tone_guidance = persona.get("tone_guidance", {})
|
| 87 |
+
recommended_modes = ["baseline", "guarded", "triggered", "trusting", "decompensating"]
|
| 88 |
+
|
| 89 |
+
missing_modes = [mode for mode in recommended_modes if mode not in tone_guidance]
|
| 90 |
+
if missing_modes:
|
| 91 |
+
print(f"Warning: tone_guidance missing modes: {missing_modes}")
|
| 92 |
+
|
| 93 |
+
return True, "Persona is valid"
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def save_persona(persona, path):
|
| 97 |
+
"""
|
| 98 |
+
Save a persona to YAML file.
|
| 99 |
+
"""
|
| 100 |
+
with open(path, "w", encoding="utf-8") as f:
|
| 101 |
+
yaml.dump(persona, f, sort_keys=False, default_flow_style=False)
|
| 102 |
+
|
| 103 |
+
return path
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def create_default_persona(name, age, role):
|
| 107 |
+
"""
|
| 108 |
+
Create a basic persona template for development/testing.
|
| 109 |
+
"""
|
| 110 |
+
persona = {
|
| 111 |
+
"persona_name": name,
|
| 112 |
+
"age": age,
|
| 113 |
+
"role": role,
|
| 114 |
+
"system_prompt": f"You are {name}, a {age}-year-old {role}. Respond naturally and stay in character.",
|
| 115 |
+
"facts": [
|
| 116 |
+
f"{name} is {age} years old",
|
| 117 |
+
f"{name} works as a {role}"
|
| 118 |
+
],
|
| 119 |
+
"triggers": [
|
| 120 |
+
"criticism",
|
| 121 |
+
"pressure",
|
| 122 |
+
"isolation"
|
| 123 |
+
],
|
| 124 |
+
"reasoning_style": "Tends to analyze situations carefully before responding.",
|
| 125 |
+
"tone_guidance": {
|
| 126 |
+
"baseline": {
|
| 127 |
+
"voice": "Calm and thoughtful",
|
| 128 |
+
"example": "I'm doing okay today, thanks for asking."
|
| 129 |
+
},
|
| 130 |
+
"guarded": {
|
| 131 |
+
"voice": "Brief and cautious",
|
| 132 |
+
"example": "I'd rather not talk about that right now."
|
| 133 |
+
},
|
| 134 |
+
"triggered": {
|
| 135 |
+
"voice": "Defensive or withdrawn",
|
| 136 |
+
"example": "I don't see how that's relevant."
|
| 137 |
+
},
|
| 138 |
+
"trusting": {
|
| 139 |
+
"voice": "Open and reflective",
|
| 140 |
+
"example": "You know, I've been thinking about what you said last time..."
|
| 141 |
+
},
|
| 142 |
+
"decompensating": {
|
| 143 |
+
"voice": "Fragmented and overwhelmed",
|
| 144 |
+
"example": "I just... I can't... it's too much."
|
| 145 |
+
}
|
| 146 |
+
},
|
| 147 |
+
"default_state": {
|
| 148 |
+
"anxiety": 0.5,
|
| 149 |
+
"trust": 0.5,
|
| 150 |
+
"openness": 0.5,
|
| 151 |
+
"mode": "baseline",
|
| 152 |
+
"emotional_memory": []
|
| 153 |
+
},
|
| 154 |
+
"scripts": {
|
| 155 |
+
"crisis": "I'm not feeling safe right now. I need to step away.",
|
| 156 |
+
"deflection": "It's fine. I don't want to make a big deal out of it.",
|
| 157 |
+
"testing_trust": "Why are you asking about that?",
|
| 158 |
+
"resistance": "I don't see how talking about this helps."
|
| 159 |
+
},
|
| 160 |
+
"resilience_hooks": [
|
| 161 |
+
f"{name} has coping strategies they've used before",
|
| 162 |
+
f"{name} values certain relationships in their life"
|
| 163 |
+
]
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
return persona
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def list_available_personas(persona_dir="./personas"):
|
| 170 |
+
"""
|
| 171 |
+
List all available persona files.
|
| 172 |
+
"""
|
| 173 |
+
if not os.path.exists(persona_dir):
|
| 174 |
+
return []
|
| 175 |
+
|
| 176 |
+
personas = []
|
| 177 |
+
for filename in os.listdir(persona_dir):
|
| 178 |
+
if filename.endswith(".yml") or filename.endswith(".yaml"):
|
| 179 |
+
path = os.path.join(persona_dir, filename)
|
| 180 |
+
try:
|
| 181 |
+
persona = load_persona(path)
|
| 182 |
+
personas.append({
|
| 183 |
+
"filename": filename,
|
| 184 |
+
"name": persona.get("persona_name", "Unknown"),
|
| 185 |
+
"age": persona.get("age", ""),
|
| 186 |
+
"role": persona.get("role", "")
|
| 187 |
+
})
|
| 188 |
+
except Exception as e:
|
| 189 |
+
print(f"Error loading {filename}: {e}")
|
| 190 |
+
|
| 191 |
+
return personas
|
engine/logger.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
import json
|
| 4 |
+
import yaml
|
| 5 |
+
|
| 6 |
+
def log_interaction(persona, student_prompt, scenario, response, state, teaching_note):
|
| 7 |
+
"""
|
| 8 |
+
Log a therapeutic interaction for review and assessment purposes.
|
| 9 |
+
Creates both human-readable and machine-readable formats.
|
| 10 |
+
"""
|
| 11 |
+
name = persona.get("persona_name", "Unknown")
|
| 12 |
+
age = persona.get("age", "")
|
| 13 |
+
role = persona.get("role", "")
|
| 14 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 15 |
+
|
| 16 |
+
# Human-readable transcript
|
| 17 |
+
transcript = f"""
|
| 18 |
+
╔══════════════════════════════════════════════════════════════╗
|
| 19 |
+
║ OT MENTAL HEALTH SIMULATION TRANSCRIPT ║
|
| 20 |
+
╚══════════════════════════════════════════════════════════════╝
|
| 21 |
+
|
| 22 |
+
Timestamp: {timestamp}
|
| 23 |
+
Client: {name} ({age}, {role})
|
| 24 |
+
Scenario Context: {scenario}
|
| 25 |
+
|
| 26 |
+
─────────────────────────────────────────────────────────────
|
| 27 |
+
|
| 28 |
+
OT STUDENT RESPONSE:
|
| 29 |
+
{student_prompt}
|
| 30 |
+
|
| 31 |
+
─────────────────────────────────────────────────────────────
|
| 32 |
+
|
| 33 |
+
CLIENT RESPONSE:
|
| 34 |
+
{response}
|
| 35 |
+
|
| 36 |
+
─────────────────────────────────────────────────────────────
|
| 37 |
+
|
| 38 |
+
EMOTIONAL STATE AFTER INTERACTION:
|
| 39 |
+
• Anxiety Level: {state.get('anxiety', 0):.2f} {'█' * int(state.get('anxiety', 0) * 10)}
|
| 40 |
+
• Trust Level: {state.get('trust', 0):.2f} {'█' * int(state.get('trust', 0) * 10)}
|
| 41 |
+
• Openness: {state.get('openness', 0):.2f} {'█' * int(state.get('openness', 0) * 10)}
|
| 42 |
+
• Current Mode: {state.get('mode', 'baseline')}
|
| 43 |
+
|
| 44 |
+
{f"• Physical Discomfort: {state.get('physical_discomfort', 0):.2f}" if 'physical_discomfort' in state else ""}
|
| 45 |
+
{f"• Creative Engagement: {state.get('creative_engagement', 0):.2f}" if 'creative_engagement' in state else ""}
|
| 46 |
+
{f"• Occupational Balance: {state.get('occupational_balance', 0):.2f}" if 'occupational_balance' in state else ""}
|
| 47 |
+
|
| 48 |
+
─────────────────────────────────────────────────────────────
|
| 49 |
+
|
| 50 |
+
TEACHING FEEDBACK:
|
| 51 |
+
{teaching_note}
|
| 52 |
+
|
| 53 |
+
─────────────────────────────────────────────────────────────
|
| 54 |
+
|
| 55 |
+
EMOTIONAL MEMORY:
|
| 56 |
+
{format_emotional_memory(state.get('emotional_memory', []))}
|
| 57 |
+
|
| 58 |
+
═════════════════════════════════════════════════════════════
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
# Save human-readable transcript
|
| 62 |
+
os.makedirs("transcripts", exist_ok=True)
|
| 63 |
+
safe_name = name.replace(' ', '_')
|
| 64 |
+
safe_timestamp = timestamp.replace(':', '-').replace(' ', '_')
|
| 65 |
+
filename = f"transcripts/{safe_name}_{safe_timestamp}.txt"
|
| 66 |
+
|
| 67 |
+
with open(filename, "w", encoding="utf-8") as f:
|
| 68 |
+
f.write(transcript)
|
| 69 |
+
|
| 70 |
+
# Save machine-readable JSON for analysis
|
| 71 |
+
json_data = {
|
| 72 |
+
"timestamp": timestamp,
|
| 73 |
+
"client": {
|
| 74 |
+
"name": name,
|
| 75 |
+
"age": age,
|
| 76 |
+
"role": role
|
| 77 |
+
},
|
| 78 |
+
"scenario": scenario,
|
| 79 |
+
"interaction": {
|
| 80 |
+
"student_prompt": student_prompt,
|
| 81 |
+
"client_response": response
|
| 82 |
+
},
|
| 83 |
+
"state": state,
|
| 84 |
+
"teaching_note": teaching_note
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
json_filename = f"transcripts/{safe_name}_{safe_timestamp}.json"
|
| 88 |
+
with open(json_filename, "w", encoding="utf-8") as f:
|
| 89 |
+
json.dump(json_data, f, indent=2)
|
| 90 |
+
|
| 91 |
+
return filename
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def format_emotional_memory(memory_list):
|
| 95 |
+
"""Format emotional memory for display."""
|
| 96 |
+
if not memory_list:
|
| 97 |
+
return "No emotional memories recorded yet."
|
| 98 |
+
|
| 99 |
+
formatted = ""
|
| 100 |
+
for i, memory in enumerate(memory_list, 1):
|
| 101 |
+
formatted += f" {i}. {memory}\n"
|
| 102 |
+
return formatted
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def log_session_summary(persona, interactions, final_state):
|
| 106 |
+
"""
|
| 107 |
+
Log a summary of an entire session (multiple interactions).
|
| 108 |
+
"""
|
| 109 |
+
name = persona.get("persona_name", "Unknown")
|
| 110 |
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 111 |
+
|
| 112 |
+
summary = f"""
|
| 113 |
+
╔══════════════════════════════════════════════════════════════╗
|
| 114 |
+
║ SESSION SUMMARY REPORT ║
|
| 115 |
+
╚══════════════════════════════════════════════════════════════╝
|
| 116 |
+
|
| 117 |
+
Client: {name}
|
| 118 |
+
Date: {timestamp}
|
| 119 |
+
Number of Interactions: {len(interactions)}
|
| 120 |
+
|
| 121 |
+
─────────────────────────────────────────────────────────────
|
| 122 |
+
|
| 123 |
+
INTERACTION OVERVIEW:
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
for i, interaction in enumerate(interactions, 1):
|
| 127 |
+
summary += f"""
|
| 128 |
+
Turn {i}:
|
| 129 |
+
Student: {interaction.get('student', '')[:80]}...
|
| 130 |
+
Client Mode: {interaction.get('mode', 'unknown')}
|
| 131 |
+
Anxiety: {interaction.get('anxiety', 0):.2f} | Trust: {interaction.get('trust', 0):.2f}
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
summary += f"""
|
| 135 |
+
|
| 136 |
+
─────────────────────────────────────────────────────────────
|
| 137 |
+
|
| 138 |
+
FINAL STATE:
|
| 139 |
+
• Anxiety: {final_state.get('anxiety', 0):.2f}
|
| 140 |
+
• Trust: {final_state.get('trust', 0):.2f}
|
| 141 |
+
• Openness: {final_state.get('openness', 0):.2f}
|
| 142 |
+
• Mode: {final_state.get('mode', 'baseline')}
|
| 143 |
+
|
| 144 |
+
─────────────────────────────────────────────────────────────
|
| 145 |
+
|
| 146 |
+
THERAPEUTIC PROGRESS INDICATORS:
|
| 147 |
+
|
| 148 |
+
Trust Development: {assess_trust_progress(interactions)}
|
| 149 |
+
Anxiety Management: {assess_anxiety_progress(interactions)}
|
| 150 |
+
Openness to Engage: {assess_openness_progress(interactions)}
|
| 151 |
+
|
| 152 |
+
─────────────────────────────────────────────────────────────
|
| 153 |
+
|
| 154 |
+
RECOMMENDATIONS FOR FUTURE SESSIONS:
|
| 155 |
+
{generate_recommendations(persona, interactions, final_state)}
|
| 156 |
+
|
| 157 |
+
═════════════════════════════════════════════════════════════
|
| 158 |
+
"""
|
| 159 |
+
|
| 160 |
+
# Save summary
|
| 161 |
+
os.makedirs("transcripts/summaries", exist_ok=True)
|
| 162 |
+
summary_filename = f"transcripts/summaries/{name.replace(' ', '_')}_{timestamp}.txt"
|
| 163 |
+
|
| 164 |
+
with open(summary_filename, "w", encoding="utf-8") as f:
|
| 165 |
+
f.write(summary)
|
| 166 |
+
|
| 167 |
+
return summary_filename
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def assess_trust_progress(interactions):
|
| 171 |
+
"""Assess how trust developed over the session."""
|
| 172 |
+
if len(interactions) < 2:
|
| 173 |
+
return "Insufficient data"
|
| 174 |
+
|
| 175 |
+
trust_values = [i.get('trust', 0.5) for i in interactions if 'trust' in i]
|
| 176 |
+
|
| 177 |
+
if not trust_values:
|
| 178 |
+
return "No trust data available"
|
| 179 |
+
|
| 180 |
+
initial = trust_values[0]
|
| 181 |
+
final = trust_values[-1]
|
| 182 |
+
change = final - initial
|
| 183 |
+
|
| 184 |
+
if change > 0.15:
|
| 185 |
+
return f"Strong improvement (+{change:.2f})"
|
| 186 |
+
elif change > 0.05:
|
| 187 |
+
return f"Moderate improvement (+{change:.2f})"
|
| 188 |
+
elif change < -0.15:
|
| 189 |
+
return f"Significant decline ({change:.2f})"
|
| 190 |
+
elif change < -0.05:
|
| 191 |
+
return f"Slight decline ({change:.2f})"
|
| 192 |
+
else:
|
| 193 |
+
return f"Stable ({change:+.2f})"
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def assess_anxiety_progress(interactions):
|
| 197 |
+
"""Assess how anxiety changed over the session."""
|
| 198 |
+
if len(interactions) < 2:
|
| 199 |
+
return "Insufficient data"
|
| 200 |
+
|
| 201 |
+
anxiety_values = [i.get('anxiety', 0.5) for i in interactions if 'anxiety' in i]
|
| 202 |
+
|
| 203 |
+
if not anxiety_values:
|
| 204 |
+
return "No anxiety data available"
|
| 205 |
+
|
| 206 |
+
initial = anxiety_values[0]
|
| 207 |
+
final = anxiety_values[-1]
|
| 208 |
+
change = final - initial
|
| 209 |
+
|
| 210 |
+
# Note: For anxiety, decrease is good
|
| 211 |
+
if change < -0.15:
|
| 212 |
+
return f"Significant reduction ({change:.2f}) ✓"
|
| 213 |
+
elif change < -0.05:
|
| 214 |
+
return f"Moderate reduction ({change:.2f}) ✓"
|
| 215 |
+
elif change > 0.15:
|
| 216 |
+
return f"Significant increase (+{change:.2f}) ⚠"
|
| 217 |
+
elif change > 0.05:
|
| 218 |
+
return f"Slight increase (+{change:.2f})"
|
| 219 |
+
else:
|
| 220 |
+
return f"Stable ({change:+.2f})"
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
def assess_openness_progress(interactions):
|
| 224 |
+
"""Assess how openness changed over the session."""
|
| 225 |
+
if len(interactions) < 2:
|
| 226 |
+
return "Insufficient data"
|
| 227 |
+
|
| 228 |
+
openness_values = [i.get('openness', 0.5) for i in interactions if 'openness' in i]
|
| 229 |
+
|
| 230 |
+
if not openness_values:
|
| 231 |
+
return "No openness data available"
|
| 232 |
+
|
| 233 |
+
initial = openness_values[0]
|
| 234 |
+
final = openness_values[-1]
|
| 235 |
+
change = final - initial
|
| 236 |
+
|
| 237 |
+
if change > 0.15:
|
| 238 |
+
return f"Significant increase (+{change:.2f}) ✓"
|
| 239 |
+
elif change > 0.05:
|
| 240 |
+
return f"Moderate increase (+{change:.2f}) ✓"
|
| 241 |
+
elif change < -0.15:
|
| 242 |
+
return f"Significant decrease ({change:.2f}) ⚠"
|
| 243 |
+
elif change < -0.05:
|
| 244 |
+
return f"Slight decrease ({change:.2f})"
|
| 245 |
+
else:
|
| 246 |
+
return f"Stable ({change:+.2f})"
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def generate_recommendations(persona, interactions, final_state):
|
| 250 |
+
"""Generate recommendations based on session data."""
|
| 251 |
+
recommendations = []
|
| 252 |
+
|
| 253 |
+
# Check trust level
|
| 254 |
+
trust = final_state.get('trust', 0.5)
|
| 255 |
+
if trust < 0.4:
|
| 256 |
+
recommendations.append("• Focus on rapport building and validation in next session")
|
| 257 |
+
recommendations.append("• Avoid pushing for deep disclosure too quickly")
|
| 258 |
+
elif trust > 0.7:
|
| 259 |
+
recommendations.append("• Strong therapeutic alliance established")
|
| 260 |
+
recommendations.append("• May be ready for deeper exploration of difficult topics")
|
| 261 |
+
|
| 262 |
+
# Check anxiety level
|
| 263 |
+
anxiety = final_state.get('anxiety', 0.5)
|
| 264 |
+
if anxiety > 0.7:
|
| 265 |
+
recommendations.append("• Client experiencing high anxiety - prioritize safety and stability")
|
| 266 |
+
recommendations.append("• Consider anxiety management techniques and grounding")
|
| 267 |
+
|
| 268 |
+
# Check openness
|
| 269 |
+
openness = final_state.get('openness', 0.5)
|
| 270 |
+
if openness < 0.3:
|
| 271 |
+
recommendations.append("• Client is guarded - respect pace and boundaries")
|
| 272 |
+
recommendations.append("• Use more open-ended questions and active listening")
|
| 273 |
+
|
| 274 |
+
# Mode-specific recommendations
|
| 275 |
+
mode = final_state.get('mode', 'baseline')
|
| 276 |
+
if mode == 'decompensating':
|
| 277 |
+
recommendations.append("• ⚠ CLIENT MAY NEED CRISIS INTERVENTION")
|
| 278 |
+
recommendations.append("• Assess safety and consider referral to mental health services")
|
| 279 |
+
elif mode == 'triggered':
|
| 280 |
+
recommendations.append("• Client ended session in defensive state")
|
| 281 |
+
recommendations.append("• Begin next session with rapport repair")
|
| 282 |
+
|
| 283 |
+
if not recommendations:
|
| 284 |
+
recommendations.append("• Continue with current therapeutic approach")
|
| 285 |
+
recommendations.append("• Build on positive progress from this session")
|
| 286 |
+
|
| 287 |
+
return "\n".join(recommendations)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def export_session_for_assessment(persona, interactions, final_state, student_name=""):
|
| 291 |
+
"""
|
| 292 |
+
Export session data in a format suitable for instructor assessment.
|
| 293 |
+
"""
|
| 294 |
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 295 |
+
name = persona.get("persona_name", "Unknown")
|
| 296 |
+
|
| 297 |
+
assessment_data = {
|
| 298 |
+
"student_name": student_name,
|
| 299 |
+
"timestamp": timestamp,
|
| 300 |
+
"client": name,
|
| 301 |
+
"interactions": interactions,
|
| 302 |
+
"final_state": final_state,
|
| 303 |
+
"metrics": {
|
| 304 |
+
"trust_progress": assess_trust_progress(interactions),
|
| 305 |
+
"anxiety_progress": assess_anxiety_progress(interactions),
|
| 306 |
+
"openness_progress": assess_openness_progress(interactions)
|
| 307 |
+
},
|
| 308 |
+
"recommendations": generate_recommendations(persona, interactions, final_state)
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
os.makedirs("transcripts/assessments", exist_ok=True)
|
| 312 |
+
filename = f"transcripts/assessments/{student_name}_{name}_{timestamp}.json"
|
| 313 |
+
|
| 314 |
+
with open(filename, "w", encoding="utf-8") as f:
|
| 315 |
+
json.dump(assessment_data, f, indent=2)
|
| 316 |
+
|
| 317 |
+
return filename
|
engine/responder.py
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from engine.drift import get_current_mode, apply_response_effects, generate_teaching_note
|
| 4 |
+
|
| 5 |
+
# Hugging Face Inference API
|
| 6 |
+
def generate_response(student_prompt, persona, conversation_history):
|
| 7 |
+
"""
|
| 8 |
+
Generate a response from the client persona using AI or fallback logic.
|
| 9 |
+
Priority: HF Inference API > Claude API > Local Templates
|
| 10 |
+
Returns: (response_text, updated_state, teaching_note)
|
| 11 |
+
"""
|
| 12 |
+
try:
|
| 13 |
+
# Try Hugging Face Inference API first
|
| 14 |
+
if os.getenv("HF_TOKEN"):
|
| 15 |
+
return generate_response_hf(student_prompt, persona, conversation_history)
|
| 16 |
+
|
| 17 |
+
# Try Claude API if available
|
| 18 |
+
elif os.getenv("ANTHROPIC_API_KEY"):
|
| 19 |
+
return generate_response_claude(student_prompt, persona, conversation_history)
|
| 20 |
+
|
| 21 |
+
# Fall back to local templates
|
| 22 |
+
else:
|
| 23 |
+
return generate_response_local(student_prompt, persona, conversation_history)
|
| 24 |
+
|
| 25 |
+
except Exception as e:
|
| 26 |
+
from engine.utils import safe_log
|
| 27 |
+
safe_log("Response generation error", str(e))
|
| 28 |
+
# Always fall back to local on error
|
| 29 |
+
return generate_response_local(student_prompt, persona, conversation_history)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def generate_response_hf(student_prompt, persona, conversation_history):
|
| 33 |
+
"""
|
| 34 |
+
Generate response using Hugging Face Inference API (free, non-gated models).
|
| 35 |
+
"""
|
| 36 |
+
try:
|
| 37 |
+
from huggingface_hub import InferenceClient
|
| 38 |
+
|
| 39 |
+
state = persona.get("default_state", {})
|
| 40 |
+
mode = get_current_mode(state)
|
| 41 |
+
|
| 42 |
+
# Apply response effects to state
|
| 43 |
+
state = apply_response_effects(state, student_prompt)
|
| 44 |
+
mode = get_current_mode(state)
|
| 45 |
+
|
| 46 |
+
# Build the prompt for the AI model
|
| 47 |
+
system_prompt = build_system_prompt_for_ai(persona, state, mode)
|
| 48 |
+
conversation_context = build_conversation_context(conversation_history)
|
| 49 |
+
|
| 50 |
+
# Use Hugging Face Inference API
|
| 51 |
+
client = InferenceClient(token=os.getenv("HF_TOKEN"))
|
| 52 |
+
|
| 53 |
+
# Try multiple free models in order of preference
|
| 54 |
+
models = [
|
| 55 |
+
"mistralai/Mistral-7B-Instruct-v0.2", # Good quality, not gated
|
| 56 |
+
"HuggingFaceH4/zephyr-7b-beta", # Good instruction following
|
| 57 |
+
"microsoft/Phi-3-mini-4k-instruct", # Fast and capable
|
| 58 |
+
]
|
| 59 |
+
|
| 60 |
+
response_text = None
|
| 61 |
+
for model in models:
|
| 62 |
+
try:
|
| 63 |
+
messages = [
|
| 64 |
+
{"role": "system", "content": system_prompt},
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Add conversation history
|
| 68 |
+
if conversation_history:
|
| 69 |
+
for turn in conversation_history[-3:]: # Last 3 turns
|
| 70 |
+
if "student" in turn:
|
| 71 |
+
messages.append({"role": "user", "content": turn["student"]})
|
| 72 |
+
if "client" in turn:
|
| 73 |
+
messages.append({"role": "assistant", "content": turn["client"]})
|
| 74 |
+
|
| 75 |
+
# Add current prompt
|
| 76 |
+
messages.append({"role": "user", "content": student_prompt})
|
| 77 |
+
|
| 78 |
+
# Generate response
|
| 79 |
+
response = client.chat_completion(
|
| 80 |
+
messages=messages,
|
| 81 |
+
model=model,
|
| 82 |
+
max_tokens=300,
|
| 83 |
+
temperature=0.7,
|
| 84 |
+
stream=False
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
response_text = response.choices[0].message.content.strip()
|
| 88 |
+
break # Success, exit loop
|
| 89 |
+
|
| 90 |
+
except Exception as model_error:
|
| 91 |
+
from engine.utils import safe_log
|
| 92 |
+
safe_log(f"HF model {model} failed", str(model_error))
|
| 93 |
+
continue # Try next model
|
| 94 |
+
|
| 95 |
+
# If all models failed, fall back to local
|
| 96 |
+
if not response_text:
|
| 97 |
+
raise Exception("All HF models failed")
|
| 98 |
+
|
| 99 |
+
# Update emotional memory
|
| 100 |
+
if "emotional_memory" in state:
|
| 101 |
+
if not isinstance(state["emotional_memory"], list):
|
| 102 |
+
state["emotional_memory"] = []
|
| 103 |
+
memory_tag = determine_memory_tag(student_prompt, mode, state)
|
| 104 |
+
state["emotional_memory"].append(memory_tag)
|
| 105 |
+
state["emotional_memory"] = state["emotional_memory"][-5:]
|
| 106 |
+
|
| 107 |
+
# Generate teaching note
|
| 108 |
+
teaching_note = generate_teaching_note(state, student_prompt, mode)
|
| 109 |
+
teaching_note += "\n\n💡 Response generated using AI (Hugging Face)"
|
| 110 |
+
|
| 111 |
+
return response_text, state, teaching_note
|
| 112 |
+
|
| 113 |
+
except Exception as e:
|
| 114 |
+
from engine.utils import safe_log
|
| 115 |
+
safe_log("HF Inference API error", str(e))
|
| 116 |
+
# Fall back to local generation
|
| 117 |
+
return generate_response_local(student_prompt, persona, conversation_history)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def generate_response_claude(student_prompt, persona, conversation_history):
|
| 121 |
+
"""
|
| 122 |
+
Generate response using Claude API (optional premium feature).
|
| 123 |
+
"""
|
| 124 |
+
try:
|
| 125 |
+
import anthropic
|
| 126 |
+
|
| 127 |
+
state = persona.get("default_state", {})
|
| 128 |
+
mode = get_current_mode(state)
|
| 129 |
+
|
| 130 |
+
# Apply response effects to state
|
| 131 |
+
state = apply_response_effects(state, student_prompt)
|
| 132 |
+
mode = get_current_mode(state)
|
| 133 |
+
|
| 134 |
+
# Build prompts
|
| 135 |
+
system_prompt = build_system_prompt_for_ai(persona, state, mode)
|
| 136 |
+
conversation_context = build_conversation_context(conversation_history)
|
| 137 |
+
|
| 138 |
+
# Call Claude API
|
| 139 |
+
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
|
| 140 |
+
message = client.messages.create(
|
| 141 |
+
model="claude-3-5-sonnet-20241022",
|
| 142 |
+
max_tokens=400,
|
| 143 |
+
system=system_prompt,
|
| 144 |
+
messages=[
|
| 145 |
+
{"role": "user", "content": f"{conversation_context}\n\nOT Student: {student_prompt}"}
|
| 146 |
+
]
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
response_text = message.content[0].text
|
| 150 |
+
|
| 151 |
+
# Update emotional memory
|
| 152 |
+
if "emotional_memory" in state:
|
| 153 |
+
if not isinstance(state["emotional_memory"], list):
|
| 154 |
+
state["emotional_memory"] = []
|
| 155 |
+
memory_tag = determine_memory_tag(student_prompt, mode, state)
|
| 156 |
+
state["emotional_memory"].append(memory_tag)
|
| 157 |
+
state["emotional_memory"] = state["emotional_memory"][-5:]
|
| 158 |
+
|
| 159 |
+
teaching_note = generate_teaching_note(state, student_prompt, mode)
|
| 160 |
+
teaching_note += "\n\n✨ Response generated using Claude AI (Premium)"
|
| 161 |
+
|
| 162 |
+
return response_text, state, teaching_note
|
| 163 |
+
|
| 164 |
+
except Exception as e:
|
| 165 |
+
from engine.utils import safe_log
|
| 166 |
+
safe_log("Claude API error", str(e))
|
| 167 |
+
return generate_response_local(student_prompt, persona, conversation_history)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def generate_response_local(student_prompt, persona, conversation_history):
|
| 171 |
+
"""
|
| 172 |
+
Local response generation using persona templates and state-based selection.
|
| 173 |
+
Fallback when no AI available or as primary mode.
|
| 174 |
+
"""
|
| 175 |
+
state = persona.get("default_state", {})
|
| 176 |
+
mode = get_current_mode(state)
|
| 177 |
+
name = persona.get("persona_name", "Client")
|
| 178 |
+
|
| 179 |
+
# Apply response effects to state
|
| 180 |
+
state = apply_response_effects(state, student_prompt)
|
| 181 |
+
|
| 182 |
+
# Update mode after response effects
|
| 183 |
+
mode = get_current_mode(state)
|
| 184 |
+
|
| 185 |
+
# Select response based on mode and prompt analysis
|
| 186 |
+
response = select_response_template(
|
| 187 |
+
student_prompt,
|
| 188 |
+
name,
|
| 189 |
+
mode,
|
| 190 |
+
state,
|
| 191 |
+
persona,
|
| 192 |
+
conversation_history
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
# Update emotional memory
|
| 196 |
+
if "emotional_memory" in state:
|
| 197 |
+
if not isinstance(state["emotional_memory"], list):
|
| 198 |
+
state["emotional_memory"] = []
|
| 199 |
+
|
| 200 |
+
memory_tag = determine_memory_tag(student_prompt, mode, state)
|
| 201 |
+
state["emotional_memory"].append(memory_tag)
|
| 202 |
+
state["emotional_memory"] = state["emotional_memory"][-5:]
|
| 203 |
+
|
| 204 |
+
# Generate teaching note
|
| 205 |
+
teaching_note = generate_teaching_note(state, student_prompt, mode)
|
| 206 |
+
teaching_note += "\n\n🔧 Response generated using template system (Local)"
|
| 207 |
+
|
| 208 |
+
return response, state, teaching_note
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def build_system_prompt_for_ai(persona, state, mode):
|
| 212 |
+
"""
|
| 213 |
+
Build a detailed system prompt for AI models to generate in-character responses.
|
| 214 |
+
"""
|
| 215 |
+
name = persona.get("persona_name", "Client")
|
| 216 |
+
age = persona.get("age", "")
|
| 217 |
+
role = persona.get("role", "")
|
| 218 |
+
system_prompt_base = persona.get("system_prompt", "")
|
| 219 |
+
|
| 220 |
+
# Get tone guidance for current mode
|
| 221 |
+
tone_guidance = persona.get("tone_guidance", {}).get(mode, {})
|
| 222 |
+
tone_voice = tone_guidance.get("voice", "Natural and authentic")
|
| 223 |
+
tone_example = tone_guidance.get("example", "")
|
| 224 |
+
|
| 225 |
+
# Get some facts about the persona
|
| 226 |
+
facts = persona.get("facts", [])
|
| 227 |
+
key_facts = facts[:5] if isinstance(facts, list) else []
|
| 228 |
+
|
| 229 |
+
# Get resilience hooks
|
| 230 |
+
resilience = persona.get("resilience_hooks", [])
|
| 231 |
+
|
| 232 |
+
prompt = f"""You are {name}, a {age}-year-old {role}. Stay completely in character and respond naturally.
|
| 233 |
+
|
| 234 |
+
CHARACTER DESCRIPTION:
|
| 235 |
+
{system_prompt_base}
|
| 236 |
+
|
| 237 |
+
KEY BACKGROUND:
|
| 238 |
+
{chr(10).join(f"- {fact}" for fact in key_facts)}
|
| 239 |
+
|
| 240 |
+
CURRENT EMOTIONAL STATE:
|
| 241 |
+
- Anxiety: {state.get('anxiety', 0):.2f} (0=calm, 1=crisis)
|
| 242 |
+
- Trust in therapist: {state.get('trust', 0):.2f} (0=guarded, 1=trusting)
|
| 243 |
+
- Openness: {state.get('openness', 0):.2f} (0=closed, 1=very open)
|
| 244 |
+
- Current mode: {mode}
|
| 245 |
+
|
| 246 |
+
HOW TO SPEAK IN THIS MODE ({mode}):
|
| 247 |
+
{tone_voice}
|
| 248 |
+
Example: "{tone_example}"
|
| 249 |
+
|
| 250 |
+
IMPORTANT RULES:
|
| 251 |
+
1. Respond as {name} would - use first person ("I", "my")
|
| 252 |
+
2. Keep responses 3-7 sentences (conversational length)
|
| 253 |
+
3. Match your emotional state - if anxious (>0.6), show it in your words
|
| 254 |
+
4. If trust is low (<0.4), be more guarded
|
| 255 |
+
5. Don't break character or mention that you're an AI
|
| 256 |
+
6. Reference your life naturally (work, activities, relationships)
|
| 257 |
+
7. Show emotional nuance - not every response is the same
|
| 258 |
+
8. React authentically to what the student says
|
| 259 |
+
|
| 260 |
+
STRENGTHS YOU HAVE:
|
| 261 |
+
{chr(10).join(f"- {hook}" for hook in resilience[:3])}
|
| 262 |
+
|
| 263 |
+
Respond naturally as {name} would in this situation."""
|
| 264 |
+
|
| 265 |
+
return prompt
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def build_conversation_context(history):
|
| 269 |
+
"""Build context from conversation history for AI models."""
|
| 270 |
+
if not history:
|
| 271 |
+
return "This is the beginning of the conversation."
|
| 272 |
+
|
| 273 |
+
context = "Previous conversation:\n"
|
| 274 |
+
for i, turn in enumerate(history[-3:], 1): # Last 3 turns
|
| 275 |
+
if "student" in turn:
|
| 276 |
+
context += f"Student: {turn['student']}\n"
|
| 277 |
+
if "client" in turn:
|
| 278 |
+
context += f"You: {turn['client']}\n"
|
| 279 |
+
|
| 280 |
+
return context
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def select_response_template(prompt, name, mode, state, persona, history):
|
| 284 |
+
"""
|
| 285 |
+
Select and customize a response based on the current mode and prompt content.
|
| 286 |
+
Used for local fallback when AI is unavailable.
|
| 287 |
+
"""
|
| 288 |
+
prompt_lower = prompt.lower()
|
| 289 |
+
|
| 290 |
+
# Check for specific scenario triggers
|
| 291 |
+
if is_crisis_query(prompt_lower) and mode == "decompensating":
|
| 292 |
+
scripts = persona.get("scripts", {})
|
| 293 |
+
return scripts.get("crisis", "I don't feel safe right now. I need to pause.")
|
| 294 |
+
|
| 295 |
+
# Check if prompt is about specific topics
|
| 296 |
+
if any(word in prompt_lower for word in ["work", "job", "boss", "brother", "supervisor"]):
|
| 297 |
+
return handle_work_topic(name, mode, state, persona, prompt_lower)
|
| 298 |
+
|
| 299 |
+
if any(word in prompt_lower for word in ["pain", "hurt", "physical", "body"]):
|
| 300 |
+
return handle_pain_topic(name, mode, state, persona)
|
| 301 |
+
|
| 302 |
+
if any(word in prompt_lower for word in ["feel", "feeling", "emotion"]):
|
| 303 |
+
return handle_feelings_topic(name, mode, state, persona, prompt_lower)
|
| 304 |
+
|
| 305 |
+
if any(word in prompt_lower for word in ["family", "dad", "sister", "parent"]):
|
| 306 |
+
return handle_family_topic(name, mode, state, persona)
|
| 307 |
+
|
| 308 |
+
# Default mode-based responses
|
| 309 |
+
return get_mode_based_response(name, mode, state, persona)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def is_crisis_query(prompt_lower):
|
| 313 |
+
"""Check if the prompt is asking about crisis/safety."""
|
| 314 |
+
crisis_terms = ["safe", "hurt yourself", "suicide", "end", "can't take"]
|
| 315 |
+
return any(term in prompt_lower for term in crisis_terms)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def handle_work_topic(name, mode, state, persona, prompt_lower):
|
| 319 |
+
"""Generate responses about work-related topics."""
|
| 320 |
+
if name == "Jack":
|
| 321 |
+
if mode == "triggered" or mode == "guarded":
|
| 322 |
+
return "I'd rather not get into it. Work is work, you know?"
|
| 323 |
+
elif mode == "trusting":
|
| 324 |
+
return "My brother's been on my case all week. It's like... I can't do anything right in his eyes. And my dad just backs him up because 'he's the foreman.' It's frustrating."
|
| 325 |
+
else:
|
| 326 |
+
return "Work's... fine. Same stuff, different day. Framing houses, dealing with Mike being Mike."
|
| 327 |
+
else: # Maya
|
| 328 |
+
if mode == "triggered" or mode == "guarded":
|
| 329 |
+
return "It's just work stress. Everyone deals with it, right?"
|
| 330 |
+
elif mode == "trusting":
|
| 331 |
+
return "Honestly? I feel like I'm drowning. Between agency work and freelance projects, I'm just... constantly behind. And my review is coming up, so there's that pressure too."
|
| 332 |
+
else:
|
| 333 |
+
return "Work's been busy. Lots of deadlines. The usual design agency chaos."
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
def handle_pain_topic(name, mode, state, persona):
|
| 337 |
+
"""Generate responses about physical pain."""
|
| 338 |
+
pain_level = state.get("physical_discomfort", 0.5)
|
| 339 |
+
|
| 340 |
+
if name == "Jack":
|
| 341 |
+
if pain_level > 0.6:
|
| 342 |
+
if mode == "trusting":
|
| 343 |
+
return "My knee's been killing me lately. Some days I'm limping by noon. I used to be able to do so much more physically, and now... yeah, it's frustrating."
|
| 344 |
+
else:
|
| 345 |
+
return "It's whatever. I just take some ibuprofen and push through. Not like I have a choice."
|
| 346 |
+
else:
|
| 347 |
+
return "Knee's okay today. Manageable."
|
| 348 |
+
else: # Maya
|
| 349 |
+
if pain_level > 0.6:
|
| 350 |
+
if mode == "trusting":
|
| 351 |
+
return "The headaches are almost daily now, and my wrists hurt when I'm working. I keep thinking, what if I'm doing permanent damage? But I can't afford to stop working."
|
| 352 |
+
else:
|
| 353 |
+
return "I get headaches sometimes. Probably just from staring at screens all day. Everyone in design deals with it."
|
| 354 |
+
else:
|
| 355 |
+
return "Physically I'm okay. Just the usual screen fatigue."
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def handle_feelings_topic(name, mode, state, persona, prompt_lower):
|
| 359 |
+
"""Generate responses about emotions and feelings."""
|
| 360 |
+
anxiety = state.get("anxiety", 0.5)
|
| 361 |
+
|
| 362 |
+
if mode == "decompensating":
|
| 363 |
+
return "I don't... everything's just a lot right now. I can't really explain it. I'm just overwhelmed."
|
| 364 |
+
|
| 365 |
+
if mode == "triggered" or mode == "guarded":
|
| 366 |
+
if "about" in prompt_lower:
|
| 367 |
+
return "I don't know. Fine, I guess?"
|
| 368 |
+
else:
|
| 369 |
+
return "I'm fine. Just tired."
|
| 370 |
+
|
| 371 |
+
if mode == "trusting":
|
| 372 |
+
if name == "Jack":
|
| 373 |
+
if anxiety > 0.6:
|
| 374 |
+
return "Honestly? Anxious. Like there's this constant pressure I can't shake. Work, family expectations, feeling stuck... it all just builds up."
|
| 375 |
+
else:
|
| 376 |
+
return "Better than I have been, actually. Still stressed, but like... manageable stress?"
|
| 377 |
+
else: # Maya
|
| 378 |
+
if anxiety > 0.6:
|
| 379 |
+
return "Overwhelmed, mostly. And scared that I'm not good enough for this. Everyone else seems to handle everything so much better than me."
|
| 380 |
+
else:
|
| 381 |
+
return "I'm doing okay. Some days are harder than others, but I'm managing."
|
| 382 |
+
|
| 383 |
+
return "I'm alright. Just dealing with the usual stuff."
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def handle_family_topic(name, mode, state, persona):
|
| 387 |
+
"""Generate responses about family relationships."""
|
| 388 |
+
if name == "Jack":
|
| 389 |
+
if mode == "triggered":
|
| 390 |
+
return "Can we talk about something else?"
|
| 391 |
+
elif mode == "trusting":
|
| 392 |
+
return "My dad and I mostly just coexist. He works a lot, I work a lot. My brother... that's complicated since he's also my boss. Mom moved to Arizona years ago."
|
| 393 |
+
else:
|
| 394 |
+
return "Family's fine. Nothing new there."
|
| 395 |
+
else: # Maya
|
| 396 |
+
if mode == "triggered":
|
| 397 |
+
return "I don't really want to get into family stuff right now."
|
| 398 |
+
elif mode == "trusting":
|
| 399 |
+
return "My parents are supportive but they don't really understand creative work. My sister's a nurse practitioner and everyone's always comparing us. It's... yeah, it's a thing."
|
| 400 |
+
else:
|
| 401 |
+
return "Family's good. I talk to them pretty regularly."
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def get_mode_based_response(name, mode, state, persona):
|
| 405 |
+
"""Generate generic response based on current emotional mode."""
|
| 406 |
+
resilience_hooks = persona.get("resilience_hooks", [])
|
| 407 |
+
scripts = persona.get("scripts", {})
|
| 408 |
+
|
| 409 |
+
if mode == "decompensating":
|
| 410 |
+
return scripts.get("crisis", "I need to step away. This is too much right now.")
|
| 411 |
+
|
| 412 |
+
if mode == "triggered":
|
| 413 |
+
return scripts.get("resistance", "I'm not really in the mood to talk about this.")
|
| 414 |
+
|
| 415 |
+
if mode == "guarded":
|
| 416 |
+
return scripts.get("deflection", "It's not that deep. I'm just tired.")
|
| 417 |
+
|
| 418 |
+
if mode == "trusting" and resilience_hooks:
|
| 419 |
+
return f"You know what? {resilience_hooks[0]}"
|
| 420 |
+
|
| 421 |
+
if mode == "recovering":
|
| 422 |
+
return "I'm feeling a bit better actually. Still working through things, but... yeah, better."
|
| 423 |
+
|
| 424 |
+
# Baseline
|
| 425 |
+
return "I'm doing okay. What did you want to talk about?"
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
def determine_memory_tag(prompt, mode, state):
|
| 429 |
+
"""Generate an emotional memory tag based on the interaction."""
|
| 430 |
+
prompt_lower = prompt.lower()
|
| 431 |
+
|
| 432 |
+
if mode == "trusting":
|
| 433 |
+
if any(word in prompt_lower for word in ["understand", "hear you", "makes sense"]):
|
| 434 |
+
return "felt validated"
|
| 435 |
+
return "felt safe to open up"
|
| 436 |
+
|
| 437 |
+
if mode == "triggered":
|
| 438 |
+
if any(word in prompt_lower for word in ["should", "need to", "why don't"]):
|
| 439 |
+
return "felt criticized"
|
| 440 |
+
return "felt defensive"
|
| 441 |
+
|
| 442 |
+
if mode == "guarded":
|
| 443 |
+
return "felt cautious"
|
| 444 |
+
|
| 445 |
+
if mode == "decompensating":
|
| 446 |
+
return "felt overwhelmed"
|
| 447 |
+
|
| 448 |
+
return "shared thoughts"
|
engine/utils.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def safe_log(context, error):
|
| 2 |
+
with open("driftline_errors.log", "a", encoding="utf-8") as f:
|
| 3 |
+
f.write(f"[{context}] {error}\n")
|