petter2025 commited on
Commit
65776b9
·
verified ·
1 Parent(s): ad1c27f

Rename utils/psychology_layer.py to utils/psychology_layer_enhanced.py

Browse files
utils/psychology_layer.py DELETED
@@ -1,191 +0,0 @@
1
- """
2
- Psychological Persuasion Layer
3
- Implements: Loss aversion, social proof, scarcity, authority
4
- """
5
-
6
- import random
7
- from typing import Dict, List
8
-
9
- class PsychologyEngine:
10
- """Applies psychological principles to demo presentation"""
11
-
12
- def __init__(self):
13
- # Loss aversion scenarios
14
- self.loss_scenarios = {
15
- "high": [
16
- "Data breach ($3.9M average cost)",
17
- "Service disruption ($300k/hour)",
18
- "Compliance fines (up to $20M)"
19
- ],
20
- "medium": [
21
- "Data corruption (24h recovery time)",
22
- "Performance degradation (50% slower)",
23
- "Security vulnerability exposure"
24
- ],
25
- "low": [
26
- "Minor configuration drift",
27
- "Increased operational overhead",
28
- "Manual review delays"
29
- ]
30
- }
31
-
32
- # Social proof statements
33
- self.social_proofs = {
34
- "oss": [
35
- "92% of Enterprise users report reduced incidents",
36
- "Fortune 500 companies save $2.3M annually with mechanical gates",
37
- "Developers report 15 minutes saved per decision"
38
- ],
39
- "trial": [
40
- "Join 1,000+ developers using ARF",
41
- "50+ companies started with trial and upgraded",
42
- "Average user prevents 3 high-risk actions weekly"
43
- ],
44
- "professional": [
45
- "Trusted by 200+ scale-ups",
46
- "Teams report 92% faster incident response",
47
- "40% reduction in on-call alerts"
48
- ],
49
- "enterprise": [
50
- "Deployed at 50+ Fortune 500 companies",
51
- "99.9% reliability across 1M+ decisions",
52
- "SOC 2 certified with zero findings"
53
- ]
54
- }
55
-
56
- # Authority signals
57
- self.authority_signals = [
58
- "SOC 2 Type II Certified",
59
- "GDPR & CCPA Compliant",
60
- "ISO 27001 Certified",
61
- "99.9% SLA Guarantee",
62
- "24/7 Dedicated Support",
63
- "On-prem Deployment Available"
64
- ]
65
-
66
- # Scarcity messages
67
- self.scarcity_messages = {
68
- "trial": [
69
- "⏳ Limited time: {days} days remaining in trial",
70
- "🎁 Free trial ends soon - upgrade to keep mechanical gates",
71
- "⚠️ Trial license expires in {days} days"
72
- ],
73
- "starter": [
74
- "💰 Special pricing: First 3 months at 50% off",
75
- "👥 Limited seats available at current price",
76
- "⏰ Offer ends this quarter"
77
- ]
78
- }
79
-
80
- def generate_loss_aversion_message(self, risk_score: float) -> Dict:
81
- """Generate loss aversion framing based on risk"""
82
- if risk_score > 0.7:
83
- category = "high"
84
- elif risk_score > 0.4:
85
- category = "medium"
86
- else:
87
- category = "low"
88
-
89
- scenarios = self.loss_scenarios[category]
90
- selected = random.sample(scenarios, min(3, len(scenarios)))
91
-
92
- return {
93
- "title": f"🚨 Without Enterprise, you risk:",
94
- "points": selected,
95
- "category": category,
96
- "risk_score": risk_score
97
- }
98
-
99
- def generate_social_proof(self, license_tier: str) -> str:
100
- """Generate tier-specific social proof"""
101
- proofs = self.social_proofs.get(license_tier, self.social_proofs["oss"])
102
- return random.choice(proofs)
103
-
104
- def generate_scarcity_message(self, license_tier: str, days_remaining: int = 14) -> str:
105
- """Generate scarcity messaging"""
106
- if license_tier in self.scarcity_messages:
107
- messages = self.scarcity_messages[license_tier]
108
- message = random.choice(messages)
109
- return message.format(days=days_remaining)
110
- return ""
111
-
112
- def generate_authority_signals(self, count: int = 3) -> List[str]:
113
- """Generate authority signals"""
114
- return random.sample(self.authority_signals, min(count, len(self.authority_signals)))
115
-
116
- def apply_prospect_theory(self, risk_score: float) -> float:
117
- """
118
- Apply Kahneman & Tversky's Prospect Theory:
119
- - Losses loom larger than gains (λ ≈ 2.25)
120
- - Value function is concave for gains, convex for losses
121
- """
122
- # For losses (risk > 0), apply convex weighting
123
- if risk_score > 0:
124
- # Diminishing sensitivity for losses
125
- perceived_risk = risk_score ** 0.88
126
- # Loss aversion coefficient (losses feel 2.25x worse)
127
- perceived_risk *= 2.25
128
- else:
129
- # For gains, apply concave weighting
130
- perceived_risk = -((-risk_score) ** 0.88)
131
-
132
- return min(1.0, max(0.0, perceived_risk))
133
-
134
- def generate_psychological_insights(self, risk_score: float, recommendation: str, license_tier: str) -> Dict:
135
- """Generate comprehensive psychological insights"""
136
- return {
137
- "loss_aversion": self.generate_loss_aversion_message(risk_score),
138
- "social_proof": self.generate_social_proof(license_tier),
139
- "scarcity": self.generate_scarcity_message(license_tier),
140
- "authority": self.generate_authority_signals(2),
141
- "perceived_risk": self.apply_prospect_theory(risk_score),
142
- "recommendation_impact": self._assess_recommendation_impact(recommendation),
143
- "tier_motivation": self._generate_tier_motivation(license_tier, risk_score)
144
- }
145
-
146
- def _assess_recommendation_impact(self, recommendation: str) -> str:
147
- """Assess psychological impact of recommendation"""
148
- if "BLOCKED" in recommendation or "HIGH RISK" in recommendation:
149
- return "high_anxiety"
150
- elif "REQUIRES APPROVAL" in recommendation or "MODERATE RISK" in recommendation:
151
- return "moderate_concern"
152
- else:
153
- return "low_concern"
154
-
155
- def _generate_tier_motivation(self, current_tier: str, risk_score: float) -> Dict:
156
- """Generate motivation to upgrade from current tier"""
157
- if current_tier == "oss":
158
- return {
159
- "target_tier": "trial",
160
- "value_prop": "Get mechanical gates for free",
161
- "motivation": "fear_of_loss",
162
- "urgency": "high" if risk_score > 0.5 else "medium"
163
- }
164
- elif current_tier == "trial":
165
- return {
166
- "target_tier": "starter",
167
- "value_prop": "Keep mechanical gates after trial",
168
- "motivation": "fear_of_loss",
169
- "urgency": "high"
170
- }
171
- elif current_tier == "starter":
172
- return {
173
- "target_tier": "professional",
174
- "value_prop": "Get 24/7 support and advanced gates",
175
- "motivation": "aspiration",
176
- "urgency": "medium"
177
- }
178
- elif current_tier == "professional":
179
- return {
180
- "target_tier": "enterprise",
181
- "value_prop": "Enterprise features and dedicated support",
182
- "motivation": "authority",
183
- "urgency": "low"
184
- }
185
- else:
186
- return {
187
- "target_tier": None,
188
- "value_prop": "You have the highest tier",
189
- "motivation": None,
190
- "urgency": None
191
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/psychology_layer_enhanced.py ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced Psychology Layer with Prospect Theory Mathematics
3
+ PhD-Level Psychological Optimization for Investor Demos
4
+ """
5
+
6
+ import random
7
+ import numpy as np
8
+ from typing import Dict, List, Tuple, Any
9
+ from dataclasses import dataclass
10
+ from enum import Enum
11
+
12
+ class PsychologicalPrinciple(Enum):
13
+ """Psychological principles with mathematical implementations"""
14
+ LOSS_AVERSION = "loss_aversion"
15
+ PROSPECT_THEORY = "prospect_theory"
16
+ SOCIAL_PROOF = "social_proof"
17
+ SCARCITY = "scarcity"
18
+ AUTHORITY = "authority"
19
+ ANCHORING = "anchoring"
20
+
21
+ @dataclass
22
+ class ProspectTheoryParameters:
23
+ """Kahneman & Tversky's Prospect Theory parameters"""
24
+ alpha: float = 0.88 # Risk aversion for gains (0 ≤ α ≤ 1)
25
+ beta: float = 0.88 # Risk seeking for losses (0 ≤ β ≤ 1)
26
+ lambda_param: float = 2.25 # Loss aversion coefficient (λ > 1)
27
+ gamma: float = 0.61 # Probability weighting for gains
28
+ delta: float = 0.69 # Probability weighting for losses
29
+
30
+ def __post_init__(self):
31
+ """Validate parameters"""
32
+ assert 0 < self.alpha <= 1, "Alpha must be between 0 and 1"
33
+ assert 0 < self.beta <= 1, "Beta must be between 0 and 1"
34
+ assert self.lambda_param > 1, "Lambda must be greater than 1"
35
+ assert 0 < self.gamma <= 1, "Gamma must be between 0 and 1"
36
+ assert 0 < self.delta <= 1, "Delta must be between 0 and 1"
37
+
38
+ class ProspectTheoryEngine:
39
+ """Mathematical implementation of Kahneman & Tversky's Prospect Theory"""
40
+
41
+ def __init__(self, params: ProspectTheoryParameters = None):
42
+ self.params = params or ProspectTheoryParameters()
43
+
44
+ def value_function(self, x: float) -> float:
45
+ """
46
+ Kahneman & Tversky's value function:
47
+ v(x) = { x^α if x ≥ 0, -λ(-x)^β if x < 0 }
48
+
49
+ For risk scores (always positive loss domain):
50
+ perceived_loss = risk_score^α * λ
51
+ """
52
+ if x >= 0:
53
+ # Gains domain (not typically used for risk)
54
+ return x ** self.params.alpha
55
+ else:
56
+ # Loss domain (risk is always positive loss)
57
+ return -self.params.lambda_param * ((-x) ** self.params.beta)
58
+
59
+ def probability_weighting(self, p: float, is_gain: bool = False) -> float:
60
+ """
61
+ Probability weighting function π(p)
62
+ Overweights small probabilities, underweights large probabilities
63
+
64
+ π(p) = p^γ / (p^γ + (1-p)^γ)^(1/γ) for gains
65
+ π(p) = p^δ / (p^δ + (1-p)^δ)^(1/δ) for losses
66
+ """
67
+ if p == 0:
68
+ return 0
69
+ if p == 1:
70
+ return 1
71
+
72
+ gamma = self.params.gamma if is_gain else self.params.delta
73
+
74
+ numerator = p ** gamma
75
+ denominator = (p ** gamma + (1 - p) ** gamma) ** (1 / gamma)
76
+
77
+ return numerator / denominator
78
+
79
+ def weighted_perceived_risk(self, risk_score: float) -> float:
80
+ """
81
+ Calculate prospect-theory weighted perceived risk
82
+ Combines value function with probability weighting
83
+ """
84
+ # Loss domain (risk is always positive loss)
85
+ base_value = self.value_function(-risk_score) # Negative because it's a loss
86
+
87
+ # Probability weighting for losses
88
+ weighted_prob = self.probability_weighting(risk_score, is_gain=False)
89
+
90
+ # Combine
91
+ perceived_risk = abs(base_value) * weighted_prob
92
+
93
+ return min(1.0, perceived_risk)
94
+
95
+ def calculate_psychological_impact(self, risk_score: float, license_tier: str) -> Dict[str, Any]:
96
+ """
97
+ Multi-dimensional psychological impact calculation
98
+ Based on Prospect Theory with tier-specific adjustments
99
+ """
100
+ # Base perceived risk using Prospect Theory
101
+ perceived_risk = self.weighted_perceived_risk(risk_score)
102
+
103
+ # License-tier anxiety multiplier (enterprise reduces anxiety)
104
+ anxiety_multipliers = {
105
+ 'oss': 1.3, # Higher anxiety without protection
106
+ 'trial': 1.0, # Balanced with temporary protection
107
+ 'starter': 0.9, # Some protection
108
+ 'professional': 0.8, # Good protection
109
+ 'enterprise': 0.7 # Full protection
110
+ }
111
+
112
+ final_anxiety = perceived_risk * anxiety_multipliers.get(license_tier, 1.0)
113
+
114
+ # Conversion probability based on anxiety and tier (sigmoid function)
115
+ # Higher anxiety → higher conversion probability up to a point
116
+ conversion_probability = self._sigmoid_conversion(final_anxiety, license_tier)
117
+
118
+ # Urgency score (derivative of anxiety)
119
+ urgency_score = min(1.0, final_anxiety * 1.2)
120
+
121
+ # Loss aversion weight (tier-specific)
122
+ loss_aversion_weight = self.params.lambda_param * (1 + (license_tier == 'oss') * 0.5)
123
+
124
+ return {
125
+ 'perceived_risk': round(perceived_risk, 3),
126
+ 'anxiety_level': round(final_anxiety, 3),
127
+ 'conversion_probability': round(conversion_probability, 3),
128
+ 'urgency_score': round(urgency_score, 3),
129
+ 'loss_aversion_weight': round(loss_aversion_weight, 2),
130
+ 'psychological_impact_category': self._categorize_impact(final_anxiety),
131
+ 'prospect_theory_parameters': {
132
+ 'alpha': self.params.alpha,
133
+ 'beta': self.params.beta,
134
+ 'lambda': self.params.lambda_param,
135
+ 'gamma': self.params.gamma,
136
+ 'delta': self.params.delta
137
+ }
138
+ }
139
+
140
+ def _sigmoid_conversion(self, anxiety: float, license_tier: str) -> float:
141
+ """Sigmoid function for conversion probability"""
142
+ # Base conversion curve
143
+ x = (anxiety - 0.5) * 3 # Center at 0.5 anxiety, scale by 3
144
+
145
+ # Sigmoid with tier-specific adjustments
146
+ base_sigmoid = 1 / (1 + np.exp(-x))
147
+
148
+ # Tier multipliers (enterprise users convert more easily)
149
+ tier_multipliers = {
150
+ 'oss': 0.6,
151
+ 'trial': 0.8,
152
+ 'starter': 0.85,
153
+ 'professional': 0.9,
154
+ 'enterprise': 0.95
155
+ }
156
+
157
+ multiplier = tier_multipliers.get(license_tier, 0.8)
158
+ converted = base_sigmoid * multiplier
159
+
160
+ # Add minimum conversion probability
161
+ return min(0.95, max(0.1, converted))
162
+
163
+ def _categorize_impact(self, anxiety: float) -> str:
164
+ """Categorize psychological impact"""
165
+ if anxiety > 0.8:
166
+ return "CRITICAL_IMPACT"
167
+ elif anxiety > 0.6:
168
+ return "HIGH_IMPACT"
169
+ elif anxiety > 0.4:
170
+ return "MODERATE_IMPACT"
171
+ elif anxiety > 0.2:
172
+ return "LOW_IMPACT"
173
+ else:
174
+ return "MINIMAL_IMPACT"
175
+
176
+ class BayesianSocialProofEngine:
177
+ """Bayesian social proof optimization with credibility updating"""
178
+
179
+ def __init__(self):
180
+ # Beta distribution priors for different proof types
181
+ # α = successes + 1, β = failures + 1
182
+ self.priors = {
183
+ 'fortune_500': (9, 2), # α=9, β=2 → 82% prior credibility
184
+ 'scaleup': (7, 4), # α=7, β=4 → 64% prior credibility
185
+ 'developer_count': (8, 3), # α=8, β=3 → 73% prior credibility
186
+ 'savings': (10, 1), # α=10, β=1 → 91% prior credibility
187
+ 'incident_reduction': (9, 2), # 82% prior credibility
188
+ 'compliance': (8, 2), # 80% prior credibility
189
+ }
190
+
191
+ # User type profiles with likelihood weights
192
+ self.user_profiles = {
193
+ 'engineer': {
194
+ 'fortune_500': 0.6,
195
+ 'scaleup': 0.8,
196
+ 'developer_count': 0.9,
197
+ 'savings': 0.7,
198
+ 'incident_reduction': 0.95,
199
+ 'compliance': 0.5
200
+ },
201
+ 'executive': {
202
+ 'fortune_500': 0.9,
203
+ 'savings': 0.95,
204
+ 'scaleup': 0.7,
205
+ 'incident_reduction': 0.85,
206
+ 'compliance': 0.9,
207
+ 'developer_count': 0.4
208
+ },
209
+ 'investor': {
210
+ 'savings': 0.9,
211
+ 'fortune_500': 0.85,
212
+ 'growth': 0.8,
213
+ 'incident_reduction': 0.75,
214
+ 'compliance': 0.7,
215
+ 'scaleup': 0.6
216
+ },
217
+ 'compliance_officer': {
218
+ 'compliance': 0.95,
219
+ 'fortune_500': 0.8,
220
+ 'incident_reduction': 0.85,
221
+ 'savings': 0.6,
222
+ 'developer_count': 0.3,
223
+ 'scaleup': 0.4
224
+ }
225
+ }
226
+
227
+ # Proof templates
228
+ self.proof_templates = {
229
+ 'fortune_500': {
230
+ 'title': '🏢 Trusted by Fortune 500',
231
+ 'message': 'Deployed at 50+ Fortune 500 companies including FAANG',
232
+ 'icon': '🏢',
233
+ 'credibility_baseline': 0.85
234
+ },
235
+ 'scaleup': {
236
+ 'title': '🚀 Scale-up Proven',
237
+ 'message': 'Trusted by 200+ high-growth tech scale-ups',
238
+ 'icon': '🚀',
239
+ 'credibility_baseline': 0.75
240
+ },
241
+ 'developer_count': {
242
+ 'title': '👨‍💻 Developer Love',
243
+ 'message': 'Join 1,000+ active developers using ARF for AI safety',
244
+ 'icon': '👨‍💻',
245
+ 'credibility_baseline': 0.8
246
+ },
247
+ 'savings': {
248
+ 'title': '💰 Proven Savings',
249
+ 'message': 'Average $3.9M breach cost prevented, 92% incident reduction',
250
+ 'icon': '💰',
251
+ 'credibility_baseline': 0.9
252
+ },
253
+ 'incident_reduction': {
254
+ 'title': '🛡️ Risk Reduction',
255
+ 'message': '92% of incidents prevented with mechanical gates',
256
+ 'icon': '🛡️',
257
+ 'credibility_baseline': 0.88
258
+ },
259
+ 'compliance': {
260
+ 'title': '📋 Compliance Ready',
261
+ 'message': 'SOC 2, GDPR, ISO 27001 certified with zero findings',
262
+ 'icon': '📋',
263
+ 'credibility_baseline': 0.82
264
+ }
265
+ }
266
+
267
+ def get_optimized_proof(self, user_type: str, license_tier: str,
268
+ risk_context: Dict[str, Any]) -> Dict[str, Any]:
269
+ """
270
+ Get psychologically optimized social proof using Bayesian updating
271
+ """
272
+ user_type = user_type if user_type in self.user_profiles else 'engineer'
273
+ user_profile = self.user_profiles[user_type]
274
+
275
+ # Calculate posterior credibility for each proof type
276
+ posteriors = {}
277
+ for proof_type, (alpha_prior, beta_prior) in self.priors.items():
278
+ if proof_type not in user_profile:
279
+ continue
280
+
281
+ likelihood = user_profile[proof_type]
282
+
283
+ # Bayesian update: Posterior = Beta(α + successes, β + failures)
284
+ # successes = likelihood * 10, failures = (1 - likelihood) * 10
285
+ successes = likelihood * 10
286
+ failures = (1 - likelihood) * 10
287
+
288
+ posterior_alpha = alpha_prior + successes
289
+ posterior_beta = beta_prior + failures
290
+
291
+ posterior_mean = posterior_alpha / (posterior_alpha + posterior_beta)
292
+ posterior_variance = (posterior_alpha * posterior_beta) / \
293
+ ((posterior_alpha + posterior_beta) ** 2 * \
294
+ (posterior_alpha + posterior_beta + 1))
295
+
296
+ posteriors[proof_type] = {
297
+ 'credibility': posterior_mean,
298
+ 'confidence': 1 - posterior_variance,
299
+ 'alpha': posterior_alpha,
300
+ 'beta': posterior_beta,
301
+ 'likelihood': likelihood
302
+ }
303
+
304
+ if not posteriors:
305
+ return self._get_default_proof(license_tier)
306
+
307
+ # Select proof with highest credibility
308
+ best_proof_type = max(posteriors.items(), key=lambda x: x[1]['credibility'])[0]
309
+ best_proof_data = posteriors[best_proof_type]
310
+
311
+ return self._format_proof(
312
+ best_proof_type,
313
+ best_proof_data,
314
+ user_type,
315
+ license_tier,
316
+ risk_context
317
+ )
318
+
319
+ def _format_proof(self, proof_type: str, proof_data: Dict[str, Any],
320
+ user_type: str, license_tier: str,
321
+ risk_context: Dict[str, Any]) -> Dict[str, Any]:
322
+ """Format social proof with credibility metrics"""
323
+ template = self.proof_templates.get(
324
+ proof_type,
325
+ self.proof_templates['developer_count']
326
+ )
327
+
328
+ # Adjust message based on license tier
329
+ tier_adjustments = {
330
+ 'trial': "Start your free trial today",
331
+ 'starter': "Upgrade to Starter for mechanical gates",
332
+ 'professional': "Professional includes 24/7 support",
333
+ 'enterprise': "Enterprise includes dedicated support"
334
+ }
335
+
336
+ adjusted_message = f"{template['message']}. {tier_adjustments.get(license_tier, '')}"
337
+
338
+ return {
339
+ **template,
340
+ 'message': adjusted_message,
341
+ 'proof_type': proof_type,
342
+ 'credibility': round(proof_data['credibility'], 3),
343
+ 'confidence': round(proof_data['confidence'], 3),
344
+ 'credibility_interval': self._calculate_credibility_interval(
345
+ proof_data['alpha'], proof_data['beta']
346
+ ),
347
+ 'optimized_for': user_type,
348
+ 'recommended_for_tier': license_tier,
349
+ 'risk_context_match': self._assess_risk_context_match(proof_type, risk_context),
350
+ 'bayesian_parameters': {
351
+ 'prior_alpha': self.priors[proof_type][0],
352
+ 'prior_beta': self.priors[proof_type][1],
353
+ 'posterior_alpha': proof_data['alpha'],
354
+ 'posterior_beta': proof_data['beta'],
355
+ 'likelihood': proof_data['likelihood']
356
+ }
357
+ }
358
+
359
+ def _calculate_credibility_interval(self, alpha: float, beta: float,
360
+ confidence: float = 0.95) -> Tuple[float, float]:
361
+ """Calculate credibility interval for Beta distribution"""
362
+ # Simplified calculation for demo
363
+ mean = alpha / (alpha + beta)
364
+ variance = (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
365
+ std_dev = np.sqrt(variance)
366
+
367
+ # Approximate 95% interval
368
+ lower = max(0, mean - 1.96 * std_dev)
369
+ upper = min(1, mean + 1.96 * std_dev)
370
+
371
+ return round(lower, 3), round(upper, 3)
372
+
373
+ def _assess_risk_context_match(self, proof_type: str, risk_context: Dict[str, Any]) -> float:
374
+ """Assess how well proof matches risk context"""
375
+ risk_score = risk_context.get('risk_score', 0.5)
376
+ risk_category = risk_context.get('risk_category', 'MEDIUM')
377
+
378
+ # Proof effectiveness by risk level
379
+ effectiveness = {
380
+ 'fortune_500': {'LOW': 0.7, 'MEDIUM': 0.8, 'HIGH': 0.9, 'CRITICAL': 0.95},
381
+ 'savings': {'LOW': 0.6, 'MEDIUM': 0.8, 'HIGH': 0.9, 'CRITICAL': 0.95},
382
+ 'incident_reduction': {'LOW': 0.5, 'MEDIUM': 0.7, 'HIGH': 0.85, 'CRITICAL': 0.9},
383
+ 'compliance': {'LOW': 0.6, 'MEDIUM': 0.7, 'HIGH': 0.8, 'CRITICAL': 0.85},
384
+ 'developer_count': {'LOW': 0.8, 'MEDIUM': 0.7, 'HIGH': 0.6, 'CRITICAL': 0.5},
385
+ 'scaleup': {'LOW': 0.7, 'MEDIUM': 0.75, 'HIGH': 0.8, 'CRITICAL': 0.7}
386
+ }
387
+
388
+ return effectiveness.get(proof_type, {}).get(risk_category, 0.7)
389
+
390
+ def _get_default_proof(self, license_tier: str) -> Dict[str, Any]:
391
+ """Get default social proof"""
392
+ return {
393
+ 'title': '👨‍💻 Developer Trusted',
394
+ 'message': 'Join 1,000+ developers using ARF for AI safety',
395
+ 'icon': '👨‍💻',
396
+ 'credibility': 0.8,
397
+ 'confidence': 0.7,
398
+ 'proof_type': 'default',
399
+ 'optimized_for': 'default',
400
+ 'recommended_for_tier': license_tier,
401
+ 'risk_context_match': 0.7,
402
+ 'credibility_interval': (0.72, 0.88)
403
+ }
404
+
405
+ class EnhancedPsychologyEngine:
406
+ """Complete psychology engine combining all principles"""
407
+
408
+ def __init__(self):
409
+ self.prospect_theory = ProspectTheoryEngine()
410
+ self.social_proof = BayesianSocialProofEngine()
411
+
412
+ # Loss aversion scenarios with financial impact
413
+ self.loss_scenarios = {
414
+ "CRITICAL": [
415
+ {"text": "Data breach ($3.9M average cost)", "impact": 3900000},
416
+ {"text": "Service disruption ($300k/hour)", "impact": 7200000},
417
+ {"text": "Compliance fines (up to $20M)", "impact": 20000000},
418
+ {"text": "Reputational damage (6+ months recovery)", "impact": 5000000}
419
+ ],
420
+ "HIGH": [
421
+ {"text": "Data corruption (24h recovery)", "impact": 1000000},
422
+ {"text": "Performance degradation (50% slower)", "impact": 500000},
423
+ {"text": "Security vulnerability exposure", "impact": 750000},
424
+ {"text": "Customer churn (15% increase)", "impact": 1500000}
425
+ ],
426
+ "MEDIUM": [
427
+ {"text": "Increased operational overhead", "impact": 250000},
428
+ {"text": "Manual review delays (2+ hours)", "impact": 150000},
429
+ {"text": "Team productivity loss (20%)", "impact": 300000},
430
+ {"text": "Audit findings & remediation", "impact": 200000}
431
+ ],
432
+ "LOW": [
433
+ {"text": "Minor configuration drift", "impact": 50000},
434
+ {"text": "Documentation gaps", "impact": 25000},
435
+ {"text": "Process inefficiencies", "impact": 75000},
436
+ {"text": "Training requirements", "impact": 100000}
437
+ ]
438
+ }
439
+
440
+ # Scarcity messaging with mathematical decay
441
+ self.scarcity_patterns = {
442
+ "trial": {
443
+ "base_urgency": 0.8,
444
+ "decay_rate": 0.07, # per day
445
+ "messages": [
446
+ "⏳ {days} days remaining in free trial",
447
+ "🎁 Trial ends in {days} days - upgrade to keep mechanical gates",
448
+ "⚠️ Free access expires in {days} days"
449
+ ]
450
+ },
451
+ "starter": {
452
+ "base_urgency": 0.6,
453
+ "decay_rate": 0.05,
454
+ "messages": [
455
+ "💰 Special pricing ends in {days} days",
456
+ "👥 Limited seats at current price",
457
+ "⏰ Quarterly offer expires soon"
458
+ ]
459
+ }
460
+ }
461
+
462
+ # Authority signals with credibility scores
463
+ self.authority_signals = [
464
+ {"text": "SOC 2 Type II Certified", "credibility": 0.95, "audience": ["executive", "compliance"]},
465
+ {"text": "GDPR & CCPA Compliant", "credibility": 0.9, "audience": ["compliance", "executive"]},
466
+ {"text": "ISO 27001 Certified", "credibility": 0.92, "audience": ["executive", "compliance"]},
467
+ {"text": "99.9% SLA Guarantee", "credibility": 0.88, "audience": ["engineer", "executive"]},
468
+ {"text": "24/7 Dedicated Support", "credibility": 0.85, "audience": ["engineer", "executive"]},
469
+ {"text": "On-prem Deployment Available", "credibility": 0.87, "audience": ["executive", "compliance"]},
470
+ {"text": "Fortune 500 Deployed", "credibility": 0.93, "audience": ["executive", "investor"]},
471
+ {"text": "Venture Backed", "credibility": 0.8, "audience": ["investor", "executive"]}
472
+ ]
473
+
474
+ def generate_comprehensive_insights(self, risk_score: float, risk_category: str,
475
+ license_tier: str, user_type: str = "engineer",
476
+ days_remaining: int = 14) -> Dict[str, Any]:
477
+ """
478
+ Generate comprehensive psychological insights for investor demos
479
+ """
480
+ # Prospect Theory impact
481
+ prospect_impact = self.prospect_theory.calculate_psychological_impact(
482
+ risk_score, license_tier
483
+ )
484
+
485
+ # Social proof optimization
486
+ social_proof = self.social_proof.get_optimized_proof(
487
+ user_type, license_tier,
488
+ {"risk_score": risk_score, "risk_category": risk_category}
489
+ )
490
+
491
+ # Loss aversion framing
492
+ loss_aversion = self._generate_loss_aversion_framing(risk_category, risk_score)
493
+
494
+ # Scarcity messaging
495
+ scarcity = self._generate_scarcity_message(license_tier, days_remaining)
496
+
497
+ # Authority signals
498
+ authority = self._generate_authority_signals(user_type)
499
+
500
+ # Anchoring effect (reference pricing)
501
+ anchoring = self._generate_anchoring_effect(license_tier)
502
+
503
+ # Conversion prediction
504
+ conversion_prediction = self._predict_conversion(
505
+ prospect_impact['anxiety_level'],
506
+ social_proof['credibility'],
507
+ scarcity.get('urgency', 0.5),
508
+ license_tier
509
+ )
510
+
511
+ return {
512
+ "prospect_theory_impact": prospect_impact,
513
+ "optimized_social_proof": social_proof,
514
+ "loss_aversion_framing": loss_aversion,
515
+ "scarcity_signaling": scarcity,
516
+ "authority_signals": authority,
517
+ "anchoring_effects": anchoring,
518
+ "conversion_prediction": conversion_prediction,
519
+ "psychological_summary": self._generate_psychological_summary(
520
+ prospect_impact, social_proof, loss_aversion
521
+ ),
522
+ "user_type": user_type,
523
+ "license_tier": license_tier,
524
+ "risk_context": {
525
+ "score": risk_score,
526
+ "category": risk_category,
527
+ "perceived_impact": prospect_impact['perceived_risk']
528
+ }
529
+ }
530
+
531
+ def _generate_loss_aversion_framing(self, risk_category: str, risk_score: float) -> Dict[str, Any]:
532
+ """Generate loss aversion framing with financial impact"""
533
+ scenarios = self.loss_scenarios.get(risk_category, self.loss_scenarios["MEDIUM"])
534
+
535
+ # Select scenarios based on risk score
536
+ num_scenarios = min(3, int(risk_score * 4) + 1)
537
+ selected = random.sample(scenarios, min(num_scenarios, len(scenarios)))
538
+
539
+ # Calculate total potential impact
540
+ total_impact = sum(s["impact"] for s in selected)
541
+
542
+ return {
543
+ "title": f"🚨 Without Enterprise protection, you risk:",
544
+ "scenarios": [s["text"] for s in selected],
545
+ "total_potential_impact": f"${total_impact:,.0f}",
546
+ "average_scenario_impact": f"${total_impact/len(selected):,.0f}",
547
+ "risk_category": risk_category,
548
+ "psychological_impact": "HIGH" if risk_category in ["CRITICAL", "HIGH"] else "MODERATE"
549
+ }
550
+
551
+ def _generate_scarcity_message(self, license_tier: str, days_remaining: int) -> Dict[str, Any]:
552
+ """Generate scarcity messaging with mathematical urgency"""
553
+ if license_tier not in self.scarcity_patterns:
554
+ return {"message": "", "urgency": 0.0}
555
+
556
+ pattern = self.scarcity_patterns[license_tier]
557
+
558
+ # Calculate urgency with decay
559
+ urgency = pattern["base_urgency"] * (1 - pattern["decay_rate"] * (14 - days_remaining))
560
+ urgency = max(0.1, min(0.95, urgency))
561
+
562
+ # Select message
563
+ message_template = random.choice(pattern["messages"])
564
+ message = message_template.format(days=days_remaining)
565
+
566
+ return {
567
+ "message": message,
568
+ "urgency": round(urgency, 2),
569
+ "days_remaining": days_remaining,
570
+ "urgency_category": "HIGH" if urgency > 0.7 else "MEDIUM" if urgency > 0.4 else "LOW"
571
+ }
572
+
573
+ def _generate_authority_signals(self, user_type: str, count: int = 3) -> List[Dict[str, Any]]:
574
+ """Generate authority signals optimized for user type"""
575
+ # Filter signals for user type
576
+ relevant_signals = [
577
+ s for s in self.authority_signals
578
+ if user_type in s["audience"]
579
+ ]
580
+
581
+ # Sort by credibility
582
+ relevant_signals.sort(key=lambda x: x["credibility"], reverse=True)
583
+
584
+ # Select top signals
585
+ selected = relevant_signals[:count]
586
+
587
+ return [
588
+ {
589
+ "text": s["text"],
590
+ "credibility": s["credibility"],
591
+ "relevance_to_user": "HIGH" if user_type in s["audience"] else "MEDIUM",
592
+ "formatted": f"✓ {s['text']} ({s['credibility']:.0%} credibility)"
593
+ }
594
+ for s in selected
595
+ ]
596
+
597
+ def _generate_anchoring_effect(self, current_tier: str) -> Dict[str, Any]:
598
+ """Generate anchoring effects for pricing"""
599
+ tier_prices = {
600
+ "oss": 0,
601
+ "trial": 0,
602
+ "starter": 2000,
603
+ "professional": 5000,
604
+ "enterprise": 15000
605
+ }
606
+
607
+ current_price = tier_prices.get(current_tier, 0)
608
+
609
+ # Generate reference prices (anchors)
610
+ anchors = []
611
+ for tier, price in tier_prices.items():
612
+ if price > current_price:
613
+ discount = ((price - current_price) / price) * 100
614
+ anchors.append({
615
+ "reference_tier": tier,
616
+ "reference_price": price,
617
+ "discount_percentage": round(discount, 1),
618
+ "anchor_strength": "STRONG" if discount > 50 else "MODERATE"
619
+ })
620
+
621
+ # Select strongest anchor
622
+ if anchors:
623
+ strongest_anchor = max(anchors, key=lambda x: x["discount_percentage"])
624
+ else:
625
+ strongest_anchor = {
626
+ "reference_tier": "enterprise",
627
+ "reference_price": 15000,
628
+ "discount_percentage": 100.0,
629
+ "anchor_strength": "MAXIMUM"
630
+ }
631
+
632
+ return {
633
+ "current_tier": current_tier,
634
+ "current_price": current_price,
635
+ "anchors": anchors,
636
+ "strongest_anchor": strongest_anchor,
637
+ "perceived_value": f"{strongest_anchor['discount_percentage']:.0f}% discount vs {strongest_anchor['reference_tier']}",
638
+ "anchoring_effect_strength": strongest_anchor["anchor_strength"]
639
+ }
640
+
641
+ def _predict_conversion(self, anxiety: float, social_credibility: float,
642
+ scarcity_urgency: float, license_tier: str) -> Dict[str, Any]:
643
+ """Predict conversion probability using multiple factors"""
644
+ # Base conversion probability
645
+ base_prob = anxiety * 0.6 + social_credibility * 0.3 + scarcity_urgency * 0.1
646
+
647
+ # Tier adjustment
648
+ tier_multipliers = {
649
+ 'oss': 1.0,
650
+ 'trial': 1.2,
651
+ 'starter': 1.1,
652
+ 'professional': 1.0,
653
+ 'enterprise': 0.8
654
+ }
655
+
656
+ adjusted_prob = base_prob * tier_multipliers.get(license_tier, 1.0)
657
+ adjusted_prob = min(0.95, max(0.05, adjusted_prob))
658
+
659
+ # Confidence interval
660
+ std_error = np.sqrt(adjusted_prob * (1 - adjusted_prob) / 100) # Assuming 100 samples
661
+ ci_lower = max(0, adjusted_prob - 1.96 * std_error)
662
+ ci_upper = min(1, adjusted_prob + 1.96 * std_error)
663
+
664
+ return {
665
+ "conversion_probability": round(adjusted_prob, 3),
666
+ "confidence_interval": (round(ci_lower, 3), round(ci_upper, 3)),
667
+ "confidence_width": round(ci_upper - ci_lower, 3),
668
+ "key_factors": {
669
+ "anxiety_contribution": round(anxiety * 0.6, 3),
670
+ "social_proof_contribution": round(social_credibility * 0.3, 3),
671
+ "scarcity_contribution": round(scarcity_urgency * 0.1, 3)
672
+ },
673
+ "prediction_quality": "HIGH" if (ci_upper - ci_lower) < 0.2 else "MODERATE"
674
+ }
675
+
676
+ def _generate_psychological_summary(self, prospect_impact: Dict,
677
+ social_proof: Dict, loss_aversion: Dict) -> str:
678
+ """Generate psychological summary for investors"""
679
+ anxiety = prospect_impact.get('anxiety_level', 0.5)
680
+ credibility = social_proof.get('credibility', 0.7)
681
+
682
+ if anxiety > 0.7 and credibility > 0.8:
683
+ return "HIGH CONVERSION POTENTIAL: Strong anxiety combined with credible social proof creates ideal conversion conditions."
684
+ elif anxiety > 0.5:
685
+ return "GOOD CONVERSION POTENTIAL: Moderate anxiety levels with supporting social proof suggest healthy conversion rates."
686
+ elif credibility > 0.85:
687
+ return "STRONG SOCIAL PROOF: High credibility signals will drive conversions even with lower anxiety levels."
688
+ else:
689
+ return "BASIC CONVERSION SETUP: Standard psychological triggers in place. Consider increasing urgency or social proof."