LitDigitalTwin / engine /responder.py
jmisak's picture
Update engine/responder.py
16284d7 verified
import json
import os
from engine.drift import get_current_mode, apply_response_effects, generate_teaching_note
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load local model once
local_tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
local_model = AutoModelForCausalLM.from_pretrained(
"microsoft/phi-2",
torch_dtype="auto",
device_map="auto"
)
from huggingface_hub import InferenceClient
local_pipeline = pipeline("text-generation", model=local_model, tokenizer=local_tokenizer)
# Hugging Face Inference API
def generate_response(student_prompt, persona, conversation_history, force_mode=None, use_fast_mode=False):
try:
if use_fast_mode:
return generate_response_local(student_prompt, persona, conversation_history, force_mode=force_mode)
if os.getenv("HF_TOKEN"):
return generate_response_hf(student_prompt, persona, conversation_history, force_mode=force_mode)
elif os.getenv("ANTHROPIC_API_KEY"):
return generate_response_claude(student_prompt, persona, conversation_history, force_mode=force_mode)
else:
return generate_response_local(student_prompt, persona, conversation_history, force_mode=force_mode)
except Exception as e:
from engine.utils import safe_log
safe_log("Response generation error", str(e))
# Always fall back to local on error
return generate_response_local(
student_prompt,
persona,
conversation_history,
force_mode=force_mode
)
def generate_response_hf(student_prompt, persona, conversation_history, force_mode=None):
"""Generate response using Hugging Face Inference API (free, non-gated models)."""
try:
from huggingface_hub import InferenceClient
state = persona.get("default_state", {}).copy()
if force_mode:
state["mode"] = force_mode
mode = get_current_mode(state)
state = apply_response_effects(state, student_prompt)
mode = get_current_mode(state)
system_prompt = build_system_prompt_for_ai(persona, state, mode)
name = persona.get("persona_name", "Client")
messages = [{"role": "system", "content": system_prompt}]
for turn in conversation_history[-3:]:
if "student" in turn:
messages.append({"role": "user", "content": turn["student"]})
if "client" in turn:
messages.append({"role": "assistant", "content": turn["client"]})
messages.append({"role": "user", "content": student_prompt})
print("[DEBUG] Prompt sent to model:")
import pprint
pprint.pprint(messages)
client = InferenceClient(token=os.getenv("HF_TOKEN"))
try:
response = client.chat_completion(
messages=messages,
model="microsoft/Phi-3-mini-4k-instruct",
max_tokens=150,
temperature=0.7,
stream=False,
stop_sequences=[f"{name}:", "Student:", "Interviewer:"]
)
response_text = response.choices[0].message.content.strip()
except Exception as model_error:
from engine.utils import safe_log
safe_log("HF model Phi-3-mini failed", str(model_error))
response_text = None
if not response_text:
raise Exception("All HF models failed")
if "emotional_memory" in state:
if not isinstance(state["emotional_memory"], list):
state["emotional_memory"] = []
memory_tag = determine_memory_tag(student_prompt, mode, state)
state["emotional_memory"].append(memory_tag)
state["emotional_memory"] = state["emotional_memory"][-5:]
teaching_note = generate_teaching_note(state, student_prompt, mode)
teaching_note += f"\n\n💡 Response generated using {model}"
return response_text, state, teaching_note
except Exception as e:
from engine.utils import safe_log
safe_log("HF Inference API error", str(e))
return generate_response_local(student_prompt, persona, conversation_history, force_mode=force_mode)
def generate_response_claude(student_prompt, persona, conversation_history, force_mode=None):
"""
Generate response using Claude API (optional premium feature).
"""
try:
import anthropic
state = persona.get("default_state", {})
mode = get_current_mode(state)
# Apply response effects to state
state = apply_response_effects(state, student_prompt)
mode = get_current_mode(state)
# Build prompts
system_prompt = build_system_prompt_for_ai(persona, state, mode)
conversation_context = build_conversation_context(conversation_history)
# Call Claude API
client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
message = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=400,
system=system_prompt,
messages=[
{"role": "user", "content": f"{conversation_context}\n\nOT Student: {student_prompt}"}
]
)
response_text = message.content[0].text
# Update emotional memory
if "emotional_memory" in state:
if not isinstance(state["emotional_memory"], list):
state["emotional_memory"] = []
memory_tag = determine_memory_tag(student_prompt, mode, state)
state["emotional_memory"].append(memory_tag)
state["emotional_memory"] = state["emotional_memory"][-5:]
teaching_note = generate_teaching_note(state, student_prompt, mode)
teaching_note += "\n\n✨ Response generated using Claude AI (Premium)"
return response_text, state, teaching_note
except Exception as e:
from engine.utils import safe_log
safe_log("Claude API error", str(e))
return generate_response_local(student_prompt, persona, conversation_history, force_mode=force_mode)
def generate_fallback_response(prompt, name, mode, state, persona):
"""Minimal fallback response using emotional state and persona scripts."""
scripts = persona.get("scripts", {})
resilience = persona.get("resilience_hooks", [])
tone = persona.get("tone_guidance", {}).get(mode, {})
quote = tone.get("example", "")
# Emotional fallback logic
if mode == "decompensating":
return scripts.get("crisis", "I need to step away. This is too much right now.")
if mode == "triggered":
return scripts.get("resistance", "I will not speak of that.")
if mode == "guarded":
return scripts.get("deflection", "It’s not something I want to talk about.")
if mode == "trusting":
if resilience:
return resilience[0]
return scripts.get("breakthrough", quote or "I think I’m ready to say more.")
if mode == "recovering":
return "I’m still sorting through things. But I’m here."
# Baseline fallback
return quote or "I’m doing okay. What did you want to talk about?"
def generate_response_local(student_prompt, persona, conversation_history, force_mode=None):
"""
Local response generation using Transformers pipeline.
Used for Fast Mode or fallback when no external API is available.
"""
state = persona.get("default_state", {}).copy()
if force_mode:
state["mode"] = force_mode
mode = get_current_mode(state)
name = persona.get("persona_name", "Client")
# Apply response effects
state = apply_response_effects(state, student_prompt)
mode = get_current_mode(state)
# Build prompt
system_prompt = build_system_prompt_for_ai(persona, state, mode)
context = build_conversation_context(conversation_history)
full_prompt = f"{system_prompt}\n\n{context}\nStudent: {student_prompt}\n{name}:"
# Generate response using local model
result = local_pipeline(
full_prompt,
max_new_tokens=250,
temperature=0.7,
top_p=0.9,
do_sample=True
)[0]["generated_text"]
# Extract character reply
response = result.split(f"{name}:")[-1].strip()
# Update emotional memory
if "emotional_memory" in state:
if not isinstance(state["emotional_memory"], list):
state["emotional_memory"] = []
memory_tag = tag_emotional_memory(student_prompt, mode, state)
state["emotional_memory"].append(memory_tag)
state["emotional_memory"] = state["emotional_memory"][-5:]
# Teaching note
teaching_note = generate_teaching_note(state, student_prompt, mode)
teaching_note += "\n\n⚡ Response generated using local AI model"
return response, state, teaching_note
def build_system_prompt_for_ai(persona, state, mode):
"""
Build a rich system prompt for AI models to generate emotionally grounded, in-character responses.
"""
name = persona.get("persona_name", "Client")
age = persona.get("age", "")
role = persona.get("role", "")
system_description = persona.get("system_prompt", "")
# Emotional tone guidance
tone = persona.get("tone_guidance", {}).get(mode, {})
tone_voice = tone.get("voice", "Natural and authentic")
tone_example = tone.get("example", f"{name} speaks with emotional nuance and restraint.")
# Key facts and strengths
facts = persona.get("facts", [])[:5]
resilience = persona.get("resilience_hooks", [])[:3]
# Emotional memory
memory_tags = state.get("emotional_memory", [])
recent_memory = memory_tags[-1] if memory_tags else None
# Build prompt
prompt = f"""You are {name}, a {age}-year-old {role}. Stay fully in character and respond naturally.
CHARACTER OVERVIEW:
{system_description}
KEY BACKGROUND:
{chr(10).join(f"- {fact}" for fact in facts)}
CURRENT EMOTIONAL STATE:
- Anxiety: {state.get('anxiety', 0):.2f} (0 = calm, 1 = crisis)
- Trust: {state.get('trust', 0):.2f} (0 = guarded, 1 = trusting)
- Openness: {state.get('openness', 0):.2f} (0 = closed, 1 = very open)
- Mode: {mode}
"""
if recent_memory:
prompt += f"\nRECENT EMOTIONAL MEMORY:\n- {recent_memory}\n"
prompt += f"""
HOW TO SPEAK IN THIS MODE ({mode}):
- Tone: {tone_voice}
- Example: "{tone_example}"
STRENGTHS TO DRAW FROM:
{chr(10).join(f"- {hook}" for hook in resilience)}
RESPONSE GUIDELINES:
1. Speak as {name} — use first person ("I", "my")
2. Keep responses 3–7 sentences (natural conversation length)
3. Match your emotional state — show anxiety, guardedness, or openness as appropriate
4. Do not mention that you are an AI or break character
5. Reference your life naturally — relationships, work, memories
6. React authentically to what the student says
7. Avoid generic or repetitive phrasing
8. Show emotional nuance — not every response is the same
Respond as {name} would in this moment.
"""
return prompt
def build_conversation_context(history):
"""
Build a brief, emotionally relevant context from recent conversation turns.
"""
if not history:
return "This is the beginning of the conversation."
context = "Recent conversation:\n"
for i, turn in enumerate(history[-3:], 1): # Last 3 turns
student = turn.get("student", "").strip()
client = turn.get("client", "").strip()
if student:
context += f"Student: {student}\n"
if client:
context += f"{turn.get('persona_name', 'Client')}: {client}\n"
return context
def handle_emotional_tension_topic(name, mode, state, persona, prompt_lower):
"""Generate responses about emotional tension and dramatic pressure."""
tension = state.get("emotional_tension", 0.5)
if mode == "decompensating":
return "This truth... it weighs heavier than I imagined. I cannot bear it."
if mode in ["triggered", "guarded"]:
return "I am composed. Do not mistake silence for weakness."
if mode == "trusting":
if tension > 0.6:
return "There is a storm inside me. I act, then think — or worse, I think and never act."
else:
return "I am steady, for now. But the ground beneath me is never still."
return "I am as calm as one can be in a world ruled by fate."
def handle_relationship_topic(name, mode, state, persona):
"""Generate responses about family or key relationships."""
if name == "Oedipus":
if mode == "triggered":
return "Do not speak of my bloodline. That path is cursed."
elif mode == "trusting":
return "I loved Jocasta as a wife, not knowing she was my mother. The gods are cruel."
else:
return "My family is a riddle I should never have solved."
elif name == "Jocasta":
if mode == "triggered":
return "Enough. Some truths should remain buried."
elif mode == "trusting":
return "I tried to protect him — my son, my husband. I tried to stop the prophecy."
else:
return "I did what I could to hold our world together."
elif name == "Creon":
if mode == "triggered":
return "I am loyal to the crown, not to chaos."
elif mode == "trusting":
return "I never sought power. I only wanted peace for Thebes."
else:
return "Family matters little when the city is at stake."
elif name == "Tiresias":
if mode == "triggered":
return "You question me, yet you fear the truth I carry."
elif mode == "trusting":
return "I have watched generations rise and fall. My bond is with the gods, not with men."
else:
return "I speak what must be spoken. Relationships are fleeting — prophecy endures."
elif name == "Hamlet":
if mode == "triggered":
return "My mother betrayed my father. What more is there to say?"
elif mode == "trusting":
return "I loved Ophelia. I did. But love is a casualty in this war of ghosts."
else:
return "Family is a stage. Everyone plays their part, even in grief."
elif name == "Gertrude":
if mode == "triggered":
return "You do not understand the choices I had to make."
elif mode == "trusting":
return "I married Claudius because I feared the silence. I feared being alone."
else:
return "I am a mother, a queen, a widow. None of those roles are simple."
elif name == "Laertes":
if mode == "triggered":
return "Speak not of my sister. Her death is on Hamlet’s hands."
elif mode == "trusting":
return "Ophelia was gentle, too gentle for this world. I failed to protect her."
else:
return "Family is honor. And honor demands justice."
elif name == "Ophelia":
if mode == "triggered":
return "I would give you some violets, but they withered all when my father died."
elif mode == "trusting":
return "Laertes was kind. Hamlet was... something else. I loved them both, in different ways."
else:
return "There’s rosemary, that’s for remembrance."
elif name == "Eveline":
if mode == "triggered":
return "I will not speak of that."
elif mode == "trusting":
return "My mother asked me to keep the house together. I try. I do."
else:
return "They need me. I know they do."
elif name == "John Keegan":
if mode == "triggered":
return "I don’t talk about family. I protect them. That’s enough."
elif mode == "trusting":
return "Pauline made me better. Chrissy and Cara keep me grounded. Johnny... he’s still figuring me out."
else:
return "Family’s complicated. I do the job. That’s what I know."
elif name == "Arianna Nunez":
if mode == "triggered":
return "I’m not here to be anyone’s daughter. I earned my place."
elif mode == "trusting":
return "Chrissy asked if I was scared. I told her fear’s not the enemy — silence is."
else:
return "I respect Keegan. Doesn’t mean I want to be him."
elif name == "Jimmy":
if mode == "triggered":
return "Family? You mean the people who taught me how to lie?"
elif mode == "trusting":
return "My brother used to cover for me. I still owe him for that."
else:
return "I keep my distance. It’s safer that way."
elif name == "Sean":
if mode == "triggered":
return "I don’t owe anyone explanations. Blood doesn’t mean loyalty."
elif mode == "trusting":
return "Brendan’s the only one who ever really saw me. That counts for something."
else:
return "Family’s a story I stopped telling."
elif name == "Brendan":
if mode == "triggered":
return "Sean’s got his demons. I’ve got mine. We don’t mix well."
elif mode == "trusting":
return "He’s my brother. I’d take a bullet for him. Doesn’t mean I like him."
else:
return "We grew up fast. Too fast to stay close."
elif name == "Dave":
if mode == "triggered":
return "I don’t talk about my dad. Not unless you want a broken nose."
elif mode == "trusting":
return "My sister used to sing to me when I couldn’t sleep. I miss that."
else:
return "Family’s noise. I prefer silence."
elif name == "Karl Lavin":
if mode == "triggered":
return "Keegan’s like a brick wall. You lean on him, you break your ribs."
elif mode == "trusting":
return "He’s my partner. I’ve seen him bleed for people he barely knows. That’s family."
else:
return "We don’t hug. We solve murders. That’s our bond."
elif name == "Joel":
if mode == "triggered":
return "I lost my daughter. Don’t ask me to lose another."
elif mode == "trusting":
return "Ellie’s not just cargo. She’s... she’s everything now."
else:
return "Family’s what you protect. Even when it breaks you."
elif name == "Ellie":
if mode == "triggered":
return "Everyone I’ve ever cared about either died or left me. So yeah, I’ve got trust issues."
elif mode == "trusting":
return "Joel’s stubborn, grumpy, and kind of a pain. But he’s mine. He’s family."
else:
return "I don’t know what family means anymore. But I know what it feels like to fight for someone."
elif name == "Uncle Ben":
if mode == "triggered":
return "I tried to teach him. I did. But you can’t always stop what’s coming."
elif mode == "trusting":
return "Peter’s got a good heart. He just needs to remember that with great power..."
else:
return "Family’s not about blood. It’s about responsibility."
elif name == "The Lady from The Yellow Wallpaper":
if mode == "triggered":
return "He says I must rest. That I must not think. But I see her — behind the paper."
elif mode == "trusting":
return "John is my husband. He means well. But he does not see me."
else:
return "They call it care. I call it confinement. I am not what they believe."
return "Relationships are threads in a tapestry — some fray, some bind."
def get_dramatic_mode_response(name, mode, state, persona):
"""Generate generic dramatic response based on current mode."""
resilience_hooks = persona.get("resilience_hooks", [])
scripts = persona.get("scripts", {})
if mode == "decompensating":
return scripts.get("collapse", "I cannot continue. The truth has undone me.")
if mode == "triggered":
return scripts.get("defensive", "You tread dangerous ground.")
if mode == "guarded":
return scripts.get("reserved", "I will not speak of that.")
if mode == "trusting" and resilience_hooks:
return f"You wish to understand? Then know this: {resilience_hooks[0]}"
if mode == "recovering":
return "I see more clearly now. The pain has not vanished, but I walk forward."
return "Ask what you will. I am listening."
def tag_emotional_memory(prompt, mode, state):
"""Generate a literary emotional memory tag based on the interaction."""
prompt_lower = prompt.lower()
if mode == "trusting":
if any(word in prompt_lower for word in ["why", "how", "tell me"]):
return "revealed vulnerability"
return "shared guarded truth"
if mode == "triggered":
if any(word in prompt_lower for word in ["accuse", "blame", "should"]):
return "felt attacked"
return "felt exposed"
if mode == "guarded":
return "withheld emotion"
if mode == "decompensating":
return "collapsed under pressure"
return "engaged in reflection"