Agentic-Reliability-Framework-API / utils /psychology_layer_enhanced.py
petter2025's picture
Rename utils/psychology_layer.py to utils/psychology_layer_enhanced.py
65776b9 verified
raw
history blame
29.3 kB
"""
Enhanced Psychology Layer with Prospect Theory Mathematics
PhD-Level Psychological Optimization for Investor Demos
"""
import random
import numpy as np
from typing import Dict, List, Tuple, Any
from dataclasses import dataclass
from enum import Enum
class PsychologicalPrinciple(Enum):
"""Psychological principles with mathematical implementations"""
LOSS_AVERSION = "loss_aversion"
PROSPECT_THEORY = "prospect_theory"
SOCIAL_PROOF = "social_proof"
SCARCITY = "scarcity"
AUTHORITY = "authority"
ANCHORING = "anchoring"
@dataclass
class ProspectTheoryParameters:
"""Kahneman & Tversky's Prospect Theory parameters"""
alpha: float = 0.88 # Risk aversion for gains (0 ≤ α ≤ 1)
beta: float = 0.88 # Risk seeking for losses (0 ≤ β ≤ 1)
lambda_param: float = 2.25 # Loss aversion coefficient (λ > 1)
gamma: float = 0.61 # Probability weighting for gains
delta: float = 0.69 # Probability weighting for losses
def __post_init__(self):
"""Validate parameters"""
assert 0 < self.alpha <= 1, "Alpha must be between 0 and 1"
assert 0 < self.beta <= 1, "Beta must be between 0 and 1"
assert self.lambda_param > 1, "Lambda must be greater than 1"
assert 0 < self.gamma <= 1, "Gamma must be between 0 and 1"
assert 0 < self.delta <= 1, "Delta must be between 0 and 1"
class ProspectTheoryEngine:
"""Mathematical implementation of Kahneman & Tversky's Prospect Theory"""
def __init__(self, params: ProspectTheoryParameters = None):
self.params = params or ProspectTheoryParameters()
def value_function(self, x: float) -> float:
"""
Kahneman & Tversky's value function:
v(x) = { x^α if x ≥ 0, -λ(-x)^β if x < 0 }
For risk scores (always positive loss domain):
perceived_loss = risk_score^α * λ
"""
if x >= 0:
# Gains domain (not typically used for risk)
return x ** self.params.alpha
else:
# Loss domain (risk is always positive loss)
return -self.params.lambda_param * ((-x) ** self.params.beta)
def probability_weighting(self, p: float, is_gain: bool = False) -> float:
"""
Probability weighting function π(p)
Overweights small probabilities, underweights large probabilities
π(p) = p^γ / (p^γ + (1-p)^γ)^(1/γ) for gains
π(p) = p^δ / (p^δ + (1-p)^δ)^(1/δ) for losses
"""
if p == 0:
return 0
if p == 1:
return 1
gamma = self.params.gamma if is_gain else self.params.delta
numerator = p ** gamma
denominator = (p ** gamma + (1 - p) ** gamma) ** (1 / gamma)
return numerator / denominator
def weighted_perceived_risk(self, risk_score: float) -> float:
"""
Calculate prospect-theory weighted perceived risk
Combines value function with probability weighting
"""
# Loss domain (risk is always positive loss)
base_value = self.value_function(-risk_score) # Negative because it's a loss
# Probability weighting for losses
weighted_prob = self.probability_weighting(risk_score, is_gain=False)
# Combine
perceived_risk = abs(base_value) * weighted_prob
return min(1.0, perceived_risk)
def calculate_psychological_impact(self, risk_score: float, license_tier: str) -> Dict[str, Any]:
"""
Multi-dimensional psychological impact calculation
Based on Prospect Theory with tier-specific adjustments
"""
# Base perceived risk using Prospect Theory
perceived_risk = self.weighted_perceived_risk(risk_score)
# License-tier anxiety multiplier (enterprise reduces anxiety)
anxiety_multipliers = {
'oss': 1.3, # Higher anxiety without protection
'trial': 1.0, # Balanced with temporary protection
'starter': 0.9, # Some protection
'professional': 0.8, # Good protection
'enterprise': 0.7 # Full protection
}
final_anxiety = perceived_risk * anxiety_multipliers.get(license_tier, 1.0)
# Conversion probability based on anxiety and tier (sigmoid function)
# Higher anxiety → higher conversion probability up to a point
conversion_probability = self._sigmoid_conversion(final_anxiety, license_tier)
# Urgency score (derivative of anxiety)
urgency_score = min(1.0, final_anxiety * 1.2)
# Loss aversion weight (tier-specific)
loss_aversion_weight = self.params.lambda_param * (1 + (license_tier == 'oss') * 0.5)
return {
'perceived_risk': round(perceived_risk, 3),
'anxiety_level': round(final_anxiety, 3),
'conversion_probability': round(conversion_probability, 3),
'urgency_score': round(urgency_score, 3),
'loss_aversion_weight': round(loss_aversion_weight, 2),
'psychological_impact_category': self._categorize_impact(final_anxiety),
'prospect_theory_parameters': {
'alpha': self.params.alpha,
'beta': self.params.beta,
'lambda': self.params.lambda_param,
'gamma': self.params.gamma,
'delta': self.params.delta
}
}
def _sigmoid_conversion(self, anxiety: float, license_tier: str) -> float:
"""Sigmoid function for conversion probability"""
# Base conversion curve
x = (anxiety - 0.5) * 3 # Center at 0.5 anxiety, scale by 3
# Sigmoid with tier-specific adjustments
base_sigmoid = 1 / (1 + np.exp(-x))
# Tier multipliers (enterprise users convert more easily)
tier_multipliers = {
'oss': 0.6,
'trial': 0.8,
'starter': 0.85,
'professional': 0.9,
'enterprise': 0.95
}
multiplier = tier_multipliers.get(license_tier, 0.8)
converted = base_sigmoid * multiplier
# Add minimum conversion probability
return min(0.95, max(0.1, converted))
def _categorize_impact(self, anxiety: float) -> str:
"""Categorize psychological impact"""
if anxiety > 0.8:
return "CRITICAL_IMPACT"
elif anxiety > 0.6:
return "HIGH_IMPACT"
elif anxiety > 0.4:
return "MODERATE_IMPACT"
elif anxiety > 0.2:
return "LOW_IMPACT"
else:
return "MINIMAL_IMPACT"
class BayesianSocialProofEngine:
"""Bayesian social proof optimization with credibility updating"""
def __init__(self):
# Beta distribution priors for different proof types
# α = successes + 1, β = failures + 1
self.priors = {
'fortune_500': (9, 2), # α=9, β=2 → 82% prior credibility
'scaleup': (7, 4), # α=7, β=4 → 64% prior credibility
'developer_count': (8, 3), # α=8, β=3 → 73% prior credibility
'savings': (10, 1), # α=10, β=1 → 91% prior credibility
'incident_reduction': (9, 2), # 82% prior credibility
'compliance': (8, 2), # 80% prior credibility
}
# User type profiles with likelihood weights
self.user_profiles = {
'engineer': {
'fortune_500': 0.6,
'scaleup': 0.8,
'developer_count': 0.9,
'savings': 0.7,
'incident_reduction': 0.95,
'compliance': 0.5
},
'executive': {
'fortune_500': 0.9,
'savings': 0.95,
'scaleup': 0.7,
'incident_reduction': 0.85,
'compliance': 0.9,
'developer_count': 0.4
},
'investor': {
'savings': 0.9,
'fortune_500': 0.85,
'growth': 0.8,
'incident_reduction': 0.75,
'compliance': 0.7,
'scaleup': 0.6
},
'compliance_officer': {
'compliance': 0.95,
'fortune_500': 0.8,
'incident_reduction': 0.85,
'savings': 0.6,
'developer_count': 0.3,
'scaleup': 0.4
}
}
# Proof templates
self.proof_templates = {
'fortune_500': {
'title': '🏢 Trusted by Fortune 500',
'message': 'Deployed at 50+ Fortune 500 companies including FAANG',
'icon': '🏢',
'credibility_baseline': 0.85
},
'scaleup': {
'title': '🚀 Scale-up Proven',
'message': 'Trusted by 200+ high-growth tech scale-ups',
'icon': '🚀',
'credibility_baseline': 0.75
},
'developer_count': {
'title': '👨‍💻 Developer Love',
'message': 'Join 1,000+ active developers using ARF for AI safety',
'icon': '👨‍💻',
'credibility_baseline': 0.8
},
'savings': {
'title': '💰 Proven Savings',
'message': 'Average $3.9M breach cost prevented, 92% incident reduction',
'icon': '💰',
'credibility_baseline': 0.9
},
'incident_reduction': {
'title': '🛡️ Risk Reduction',
'message': '92% of incidents prevented with mechanical gates',
'icon': '🛡️',
'credibility_baseline': 0.88
},
'compliance': {
'title': '📋 Compliance Ready',
'message': 'SOC 2, GDPR, ISO 27001 certified with zero findings',
'icon': '📋',
'credibility_baseline': 0.82
}
}
def get_optimized_proof(self, user_type: str, license_tier: str,
risk_context: Dict[str, Any]) -> Dict[str, Any]:
"""
Get psychologically optimized social proof using Bayesian updating
"""
user_type = user_type if user_type in self.user_profiles else 'engineer'
user_profile = self.user_profiles[user_type]
# Calculate posterior credibility for each proof type
posteriors = {}
for proof_type, (alpha_prior, beta_prior) in self.priors.items():
if proof_type not in user_profile:
continue
likelihood = user_profile[proof_type]
# Bayesian update: Posterior = Beta(α + successes, β + failures)
# successes = likelihood * 10, failures = (1 - likelihood) * 10
successes = likelihood * 10
failures = (1 - likelihood) * 10
posterior_alpha = alpha_prior + successes
posterior_beta = beta_prior + failures
posterior_mean = posterior_alpha / (posterior_alpha + posterior_beta)
posterior_variance = (posterior_alpha * posterior_beta) / \
((posterior_alpha + posterior_beta) ** 2 * \
(posterior_alpha + posterior_beta + 1))
posteriors[proof_type] = {
'credibility': posterior_mean,
'confidence': 1 - posterior_variance,
'alpha': posterior_alpha,
'beta': posterior_beta,
'likelihood': likelihood
}
if not posteriors:
return self._get_default_proof(license_tier)
# Select proof with highest credibility
best_proof_type = max(posteriors.items(), key=lambda x: x[1]['credibility'])[0]
best_proof_data = posteriors[best_proof_type]
return self._format_proof(
best_proof_type,
best_proof_data,
user_type,
license_tier,
risk_context
)
def _format_proof(self, proof_type: str, proof_data: Dict[str, Any],
user_type: str, license_tier: str,
risk_context: Dict[str, Any]) -> Dict[str, Any]:
"""Format social proof with credibility metrics"""
template = self.proof_templates.get(
proof_type,
self.proof_templates['developer_count']
)
# Adjust message based on license tier
tier_adjustments = {
'trial': "Start your free trial today",
'starter': "Upgrade to Starter for mechanical gates",
'professional': "Professional includes 24/7 support",
'enterprise': "Enterprise includes dedicated support"
}
adjusted_message = f"{template['message']}. {tier_adjustments.get(license_tier, '')}"
return {
**template,
'message': adjusted_message,
'proof_type': proof_type,
'credibility': round(proof_data['credibility'], 3),
'confidence': round(proof_data['confidence'], 3),
'credibility_interval': self._calculate_credibility_interval(
proof_data['alpha'], proof_data['beta']
),
'optimized_for': user_type,
'recommended_for_tier': license_tier,
'risk_context_match': self._assess_risk_context_match(proof_type, risk_context),
'bayesian_parameters': {
'prior_alpha': self.priors[proof_type][0],
'prior_beta': self.priors[proof_type][1],
'posterior_alpha': proof_data['alpha'],
'posterior_beta': proof_data['beta'],
'likelihood': proof_data['likelihood']
}
}
def _calculate_credibility_interval(self, alpha: float, beta: float,
confidence: float = 0.95) -> Tuple[float, float]:
"""Calculate credibility interval for Beta distribution"""
# Simplified calculation for demo
mean = alpha / (alpha + beta)
variance = (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
std_dev = np.sqrt(variance)
# Approximate 95% interval
lower = max(0, mean - 1.96 * std_dev)
upper = min(1, mean + 1.96 * std_dev)
return round(lower, 3), round(upper, 3)
def _assess_risk_context_match(self, proof_type: str, risk_context: Dict[str, Any]) -> float:
"""Assess how well proof matches risk context"""
risk_score = risk_context.get('risk_score', 0.5)
risk_category = risk_context.get('risk_category', 'MEDIUM')
# Proof effectiveness by risk level
effectiveness = {
'fortune_500': {'LOW': 0.7, 'MEDIUM': 0.8, 'HIGH': 0.9, 'CRITICAL': 0.95},
'savings': {'LOW': 0.6, 'MEDIUM': 0.8, 'HIGH': 0.9, 'CRITICAL': 0.95},
'incident_reduction': {'LOW': 0.5, 'MEDIUM': 0.7, 'HIGH': 0.85, 'CRITICAL': 0.9},
'compliance': {'LOW': 0.6, 'MEDIUM': 0.7, 'HIGH': 0.8, 'CRITICAL': 0.85},
'developer_count': {'LOW': 0.8, 'MEDIUM': 0.7, 'HIGH': 0.6, 'CRITICAL': 0.5},
'scaleup': {'LOW': 0.7, 'MEDIUM': 0.75, 'HIGH': 0.8, 'CRITICAL': 0.7}
}
return effectiveness.get(proof_type, {}).get(risk_category, 0.7)
def _get_default_proof(self, license_tier: str) -> Dict[str, Any]:
"""Get default social proof"""
return {
'title': '👨‍💻 Developer Trusted',
'message': 'Join 1,000+ developers using ARF for AI safety',
'icon': '👨‍💻',
'credibility': 0.8,
'confidence': 0.7,
'proof_type': 'default',
'optimized_for': 'default',
'recommended_for_tier': license_tier,
'risk_context_match': 0.7,
'credibility_interval': (0.72, 0.88)
}
class EnhancedPsychologyEngine:
"""Complete psychology engine combining all principles"""
def __init__(self):
self.prospect_theory = ProspectTheoryEngine()
self.social_proof = BayesianSocialProofEngine()
# Loss aversion scenarios with financial impact
self.loss_scenarios = {
"CRITICAL": [
{"text": "Data breach ($3.9M average cost)", "impact": 3900000},
{"text": "Service disruption ($300k/hour)", "impact": 7200000},
{"text": "Compliance fines (up to $20M)", "impact": 20000000},
{"text": "Reputational damage (6+ months recovery)", "impact": 5000000}
],
"HIGH": [
{"text": "Data corruption (24h recovery)", "impact": 1000000},
{"text": "Performance degradation (50% slower)", "impact": 500000},
{"text": "Security vulnerability exposure", "impact": 750000},
{"text": "Customer churn (15% increase)", "impact": 1500000}
],
"MEDIUM": [
{"text": "Increased operational overhead", "impact": 250000},
{"text": "Manual review delays (2+ hours)", "impact": 150000},
{"text": "Team productivity loss (20%)", "impact": 300000},
{"text": "Audit findings & remediation", "impact": 200000}
],
"LOW": [
{"text": "Minor configuration drift", "impact": 50000},
{"text": "Documentation gaps", "impact": 25000},
{"text": "Process inefficiencies", "impact": 75000},
{"text": "Training requirements", "impact": 100000}
]
}
# Scarcity messaging with mathematical decay
self.scarcity_patterns = {
"trial": {
"base_urgency": 0.8,
"decay_rate": 0.07, # per day
"messages": [
"⏳ {days} days remaining in free trial",
"🎁 Trial ends in {days} days - upgrade to keep mechanical gates",
"⚠️ Free access expires in {days} days"
]
},
"starter": {
"base_urgency": 0.6,
"decay_rate": 0.05,
"messages": [
"💰 Special pricing ends in {days} days",
"👥 Limited seats at current price",
"⏰ Quarterly offer expires soon"
]
}
}
# Authority signals with credibility scores
self.authority_signals = [
{"text": "SOC 2 Type II Certified", "credibility": 0.95, "audience": ["executive", "compliance"]},
{"text": "GDPR & CCPA Compliant", "credibility": 0.9, "audience": ["compliance", "executive"]},
{"text": "ISO 27001 Certified", "credibility": 0.92, "audience": ["executive", "compliance"]},
{"text": "99.9% SLA Guarantee", "credibility": 0.88, "audience": ["engineer", "executive"]},
{"text": "24/7 Dedicated Support", "credibility": 0.85, "audience": ["engineer", "executive"]},
{"text": "On-prem Deployment Available", "credibility": 0.87, "audience": ["executive", "compliance"]},
{"text": "Fortune 500 Deployed", "credibility": 0.93, "audience": ["executive", "investor"]},
{"text": "Venture Backed", "credibility": 0.8, "audience": ["investor", "executive"]}
]
def generate_comprehensive_insights(self, risk_score: float, risk_category: str,
license_tier: str, user_type: str = "engineer",
days_remaining: int = 14) -> Dict[str, Any]:
"""
Generate comprehensive psychological insights for investor demos
"""
# Prospect Theory impact
prospect_impact = self.prospect_theory.calculate_psychological_impact(
risk_score, license_tier
)
# Social proof optimization
social_proof = self.social_proof.get_optimized_proof(
user_type, license_tier,
{"risk_score": risk_score, "risk_category": risk_category}
)
# Loss aversion framing
loss_aversion = self._generate_loss_aversion_framing(risk_category, risk_score)
# Scarcity messaging
scarcity = self._generate_scarcity_message(license_tier, days_remaining)
# Authority signals
authority = self._generate_authority_signals(user_type)
# Anchoring effect (reference pricing)
anchoring = self._generate_anchoring_effect(license_tier)
# Conversion prediction
conversion_prediction = self._predict_conversion(
prospect_impact['anxiety_level'],
social_proof['credibility'],
scarcity.get('urgency', 0.5),
license_tier
)
return {
"prospect_theory_impact": prospect_impact,
"optimized_social_proof": social_proof,
"loss_aversion_framing": loss_aversion,
"scarcity_signaling": scarcity,
"authority_signals": authority,
"anchoring_effects": anchoring,
"conversion_prediction": conversion_prediction,
"psychological_summary": self._generate_psychological_summary(
prospect_impact, social_proof, loss_aversion
),
"user_type": user_type,
"license_tier": license_tier,
"risk_context": {
"score": risk_score,
"category": risk_category,
"perceived_impact": prospect_impact['perceived_risk']
}
}
def _generate_loss_aversion_framing(self, risk_category: str, risk_score: float) -> Dict[str, Any]:
"""Generate loss aversion framing with financial impact"""
scenarios = self.loss_scenarios.get(risk_category, self.loss_scenarios["MEDIUM"])
# Select scenarios based on risk score
num_scenarios = min(3, int(risk_score * 4) + 1)
selected = random.sample(scenarios, min(num_scenarios, len(scenarios)))
# Calculate total potential impact
total_impact = sum(s["impact"] for s in selected)
return {
"title": f"🚨 Without Enterprise protection, you risk:",
"scenarios": [s["text"] for s in selected],
"total_potential_impact": f"${total_impact:,.0f}",
"average_scenario_impact": f"${total_impact/len(selected):,.0f}",
"risk_category": risk_category,
"psychological_impact": "HIGH" if risk_category in ["CRITICAL", "HIGH"] else "MODERATE"
}
def _generate_scarcity_message(self, license_tier: str, days_remaining: int) -> Dict[str, Any]:
"""Generate scarcity messaging with mathematical urgency"""
if license_tier not in self.scarcity_patterns:
return {"message": "", "urgency": 0.0}
pattern = self.scarcity_patterns[license_tier]
# Calculate urgency with decay
urgency = pattern["base_urgency"] * (1 - pattern["decay_rate"] * (14 - days_remaining))
urgency = max(0.1, min(0.95, urgency))
# Select message
message_template = random.choice(pattern["messages"])
message = message_template.format(days=days_remaining)
return {
"message": message,
"urgency": round(urgency, 2),
"days_remaining": days_remaining,
"urgency_category": "HIGH" if urgency > 0.7 else "MEDIUM" if urgency > 0.4 else "LOW"
}
def _generate_authority_signals(self, user_type: str, count: int = 3) -> List[Dict[str, Any]]:
"""Generate authority signals optimized for user type"""
# Filter signals for user type
relevant_signals = [
s for s in self.authority_signals
if user_type in s["audience"]
]
# Sort by credibility
relevant_signals.sort(key=lambda x: x["credibility"], reverse=True)
# Select top signals
selected = relevant_signals[:count]
return [
{
"text": s["text"],
"credibility": s["credibility"],
"relevance_to_user": "HIGH" if user_type in s["audience"] else "MEDIUM",
"formatted": f"✓ {s['text']} ({s['credibility']:.0%} credibility)"
}
for s in selected
]
def _generate_anchoring_effect(self, current_tier: str) -> Dict[str, Any]:
"""Generate anchoring effects for pricing"""
tier_prices = {
"oss": 0,
"trial": 0,
"starter": 2000,
"professional": 5000,
"enterprise": 15000
}
current_price = tier_prices.get(current_tier, 0)
# Generate reference prices (anchors)
anchors = []
for tier, price in tier_prices.items():
if price > current_price:
discount = ((price - current_price) / price) * 100
anchors.append({
"reference_tier": tier,
"reference_price": price,
"discount_percentage": round(discount, 1),
"anchor_strength": "STRONG" if discount > 50 else "MODERATE"
})
# Select strongest anchor
if anchors:
strongest_anchor = max(anchors, key=lambda x: x["discount_percentage"])
else:
strongest_anchor = {
"reference_tier": "enterprise",
"reference_price": 15000,
"discount_percentage": 100.0,
"anchor_strength": "MAXIMUM"
}
return {
"current_tier": current_tier,
"current_price": current_price,
"anchors": anchors,
"strongest_anchor": strongest_anchor,
"perceived_value": f"{strongest_anchor['discount_percentage']:.0f}% discount vs {strongest_anchor['reference_tier']}",
"anchoring_effect_strength": strongest_anchor["anchor_strength"]
}
def _predict_conversion(self, anxiety: float, social_credibility: float,
scarcity_urgency: float, license_tier: str) -> Dict[str, Any]:
"""Predict conversion probability using multiple factors"""
# Base conversion probability
base_prob = anxiety * 0.6 + social_credibility * 0.3 + scarcity_urgency * 0.1
# Tier adjustment
tier_multipliers = {
'oss': 1.0,
'trial': 1.2,
'starter': 1.1,
'professional': 1.0,
'enterprise': 0.8
}
adjusted_prob = base_prob * tier_multipliers.get(license_tier, 1.0)
adjusted_prob = min(0.95, max(0.05, adjusted_prob))
# Confidence interval
std_error = np.sqrt(adjusted_prob * (1 - adjusted_prob) / 100) # Assuming 100 samples
ci_lower = max(0, adjusted_prob - 1.96 * std_error)
ci_upper = min(1, adjusted_prob + 1.96 * std_error)
return {
"conversion_probability": round(adjusted_prob, 3),
"confidence_interval": (round(ci_lower, 3), round(ci_upper, 3)),
"confidence_width": round(ci_upper - ci_lower, 3),
"key_factors": {
"anxiety_contribution": round(anxiety * 0.6, 3),
"social_proof_contribution": round(social_credibility * 0.3, 3),
"scarcity_contribution": round(scarcity_urgency * 0.1, 3)
},
"prediction_quality": "HIGH" if (ci_upper - ci_lower) < 0.2 else "MODERATE"
}
def _generate_psychological_summary(self, prospect_impact: Dict,
social_proof: Dict, loss_aversion: Dict) -> str:
"""Generate psychological summary for investors"""
anxiety = prospect_impact.get('anxiety_level', 0.5)
credibility = social_proof.get('credibility', 0.7)
if anxiety > 0.7 and credibility > 0.8:
return "HIGH CONVERSION POTENTIAL: Strong anxiety combined with credible social proof creates ideal conversion conditions."
elif anxiety > 0.5:
return "GOOD CONVERSION POTENTIAL: Moderate anxiety levels with supporting social proof suggest healthy conversion rates."
elif credibility > 0.85:
return "STRONG SOCIAL PROOF: High credibility signals will drive conversions even with lower anxiety levels."
else:
return "BASIC CONVERSION SETUP: Standard psychological triggers in place. Consider increasing urgency or social proof."