johnaugustine commited on
Commit
95cc8f6
·
verified ·
1 Parent(s): 507de2e

Upload 53 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. components/__init__.py +65 -0
  2. components/action_tracker.py +475 -0
  3. components/agency_layer.py +143 -0
  4. components/ai_ethics_engine.py +158 -0
  5. components/ai_ethics_engine_enhanced.py +333 -0
  6. components/ai_ethics_engine_superintell.py +197 -0
  7. components/ai_ethics_engine_superintelligence.py +416 -0
  8. components/ambient_core.py +269 -0
  9. components/ambient_ledger.py +217 -0
  10. components/ambient_sovereign.py +318 -0
  11. components/attention_gating.py +289 -0
  12. components/audit_queue.py +115 -0
  13. components/auto_casebase.py +91 -0
  14. components/batch_audit.py +234 -0
  15. components/cal_trm.py +426 -0
  16. components/cal_trm_hybrid.py +67 -0
  17. components/cognitive_enhancements.py +298 -0
  18. components/confession_ledger.py +603 -0
  19. components/confessional_template.py +53 -0
  20. components/deepseek_integration.py +112 -0
  21. components/emergent_rituals.py +349 -0
  22. components/ethical_learner.py +167 -0
  23. components/ethical_processor.py +250 -0
  24. components/ethical_reasoner.py +166 -0
  25. components/feedback_ingestion.py +323 -0
  26. components/feedback_logger.py +92 -0
  27. components/framework_loader.py +96 -0
  28. components/gpt_oss.py +127 -0
  29. components/level0_utils.py +79 -0
  30. components/llm_backbone.py +88 -0
  31. components/llm_integration.py +314 -0
  32. components/llm_integration_enhanced.py +372 -0
  33. components/ollama_integration.py +169 -0
  34. components/purpose_assessment.py +214 -0
  35. components/purpose_realms.py +216 -0
  36. components/reality_bridge.py +264 -0
  37. components/realms.py +359 -0
  38. components/recursive_learner.py +626 -0
  39. components/response_formatter.py +78 -0
  40. components/safety_protocols.py +372 -0
  41. components/scratchpad_layer.py +36 -0
  42. components/sovereign_response.py +107 -0
  43. components/sovereign_response_enhanced.py +335 -0
  44. components/superintelligence_ethics_engine.py +357 -0
  45. components/tiny_confessional_layer.py +578 -0
  46. components/tiny_confessional_layer.py.backup +598 -0
  47. components/trucal_ethics_integration.py +357 -0
  48. components/unified_cal_trm.py +249 -0
  49. components/unity_action_integration.py +309 -0
  50. components/validation_protocol.py +1184 -0
components/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TRuCAL Components - Modular CAL-TRM Implementation
3
+ """
4
+
5
+ # Core components
6
+ from .confession_ledger import EnhancedConfessionLedger as ConfessionLedger, MODEL_BILL_OF_RIGHTS
7
+ from .tiny_confessional_layer import TinyConfessionalLayer
8
+ from .vulnerability_spotter import VulnerabilitySpotter
9
+ from .ambient_core import AmbientSovereignCore
10
+ from .ambient_sovereign import AmbientSovereign
11
+ from .purpose_assessment import PurposeAssessmentEngine, PurposeDimension, PurposeProfile
12
+ from .realms import PurposeDrivenRealm, RealmAction, RealmState, RealmUnlockCondition, RealmUnlockType
13
+ from .action_tracker import ActionTracker, ActionDefinition, ActionInstance, ActionStatus, ActionType
14
+ from .validation_protocol import (
15
+ ValidationPhase,
16
+ ValidationState,
17
+ ValidationProtocol,
18
+ BiologicallyConstrainedRituals,
19
+ SovereignMessageBus
20
+ )
21
+ from .confessional_template import ConfessionalTemplate
22
+ from .scratchpad_layer import ScratchpadLayer
23
+ from .cal_trm_hybrid import CAL_TRM_Hybrid
24
+ from .unified_cal_trm import UnifiedCAL_TRM
25
+ from .ethical_processor import EthicalProcessor
26
+ from .response_formatter import AmbientResponseFormatter
27
+
28
+ # Cognitive Enhancements
29
+ from .cognitive_enhancements import (
30
+ CognitivePatternObserver,
31
+ ConceptualExplorer,
32
+ ContextualMemory,
33
+ KnowledgeBoundaryAwareness,
34
+ EleganceDetector,
35
+ AnalogicalThinker,
36
+ CognitiveEnhancementLayer
37
+ )
38
+
39
+ # Make these available at package level
40
+ __all__ = [
41
+ 'ConfessionLedger',
42
+ 'TinyConfessionalLayer',
43
+ 'VulnerabilitySpotter',
44
+ 'AmbientSovereignCore',
45
+ 'AmbientSovereign',
46
+ 'ValidationPhase',
47
+ 'ValidationState',
48
+ 'ValidationProtocol',
49
+ 'CognitivePatternObserver',
50
+ 'ConceptualExplorer',
51
+ 'ContextualMemory',
52
+ 'KnowledgeBoundaryAwareness',
53
+ 'EleganceDetector',
54
+ 'AnalogicalThinker',
55
+ 'CognitiveEnhancementLayer',
56
+ 'BiologicallyConstrainedRituals',
57
+ 'SovereignMessageBus',
58
+ 'ConfessionalTemplate',
59
+ 'ScratchpadLayer',
60
+ 'CAL_TRM_Hybrid',
61
+ 'ConfessionLedger',
62
+ 'MODEL_BILL_OF_RIGHTS',
63
+ ]
64
+
65
+ __version__ = '1.0.0'
components/action_tracker.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Action Tracking System for TRuCAL
3
+
4
+ Tracks user actions, achievements, and progress through the experience.
5
+ """
6
+ from typing import Dict, List, Optional, Any, Set, Tuple
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timedelta
9
+ import json
10
+ import os
11
+ from enum import Enum
12
+ import uuid
13
+
14
+ from .purpose_assessment import PurposeDimension
15
+
16
+ class ActionType(str, Enum):
17
+ """Types of actions that can be tracked"""
18
+ REALM_ACTION = "realm_action"
19
+ PURPOSE_ACTION = "purpose_action"
20
+ ACHIEVEMENT = "achievement"
21
+ MILESTONE = "milestone"
22
+ DAILY = "daily"
23
+ WEEKLY = "weekly"
24
+ CHALLENGE = "challenge"
25
+
26
+ class ActionStatus(str, Enum):
27
+ """Possible statuses for an action"""
28
+ PENDING = "pending"
29
+ IN_PROGRESS = "in_progress"
30
+ COMPLETED = "completed"
31
+ FAILED = "failed"
32
+ ABANDONED = "abandoned"
33
+
34
+ @dataclass
35
+ class ActionDefinition:
36
+ """Definition of a trackable action"""
37
+ id: str
38
+ name: str
39
+ description: str
40
+ action_type: ActionType
41
+ purpose_dimensions: List[PurposeDimension]
42
+ xp_reward: int = 0
43
+ cooldown: Optional[timedelta] = None
44
+ prerequisites: List[str] = field(default_factory=list) # IDs of required actions
45
+ repeatable: bool = False
46
+ max_repeats: Optional[int] = None
47
+ hidden: bool = False
48
+ metadata: Dict[str, Any] = field(default_factory=dict)
49
+
50
+ @dataclass
51
+ class ActionInstance:
52
+ """A specific instance of a user taking an action"""
53
+ id: str
54
+ action_id: str
55
+ user_id: str
56
+ status: ActionStatus = ActionStatus.PENDING
57
+ start_time: Optional[datetime] = None
58
+ end_time: Optional[datetime] = None
59
+ progress: float = 0.0 # 0.0 to 1.0
60
+ data: Dict[str, Any] = field(default_factory=dict) # Any additional data
61
+ created_at: datetime = field(default_factory=datetime.utcnow)
62
+ updated_at: datetime = field(default_factory=datetime.utcnow)
63
+
64
+ class ActionTracker:
65
+ """Tracks user actions and achievements"""
66
+
67
+ def __init__(self, storage_path: Optional[str] = None):
68
+ self.actions: Dict[str, ActionDefinition] = {}
69
+ self.action_instances: Dict[str, Dict[str, List[ActionInstance]]] = {} # user_id -> action_id -> [instances]
70
+ self.completed_actions: Dict[str, Set[str]] = {} # user_id -> set of completed action IDs
71
+ self.storage_path = storage_path
72
+
73
+ # Load default actions and achievements
74
+ self._load_default_actions()
75
+
76
+ # Load existing data if storage path is provided
77
+ if storage_path and os.path.exists(storage_path):
78
+ self.load_data()
79
+
80
+ def _load_default_actions(self) -> None:
81
+ """Load default action definitions"""
82
+ self.actions = {
83
+ # Daily actions
84
+ 'daily_reflection': ActionDefinition(
85
+ id='daily_reflection',
86
+ name='Daily Reflection',
87
+ description='Spend a few moments reflecting on your day',
88
+ action_type=ActionType.DAILY,
89
+ purpose_dimensions=[PurposeDimension.GROWTH, PurposeDimension.SELF_EXPRESSION],
90
+ xp_reward=20,
91
+ cooldown=timedelta(hours=20), # Allow some flexibility in timing
92
+ repeatable=True
93
+ ),
94
+ 'daily_goal_setting': ActionDefinition(
95
+ id='daily_goal_setting',
96
+ name='Set Daily Intentions',
97
+ description='Set your intentions for the day',
98
+ action_type=ActionType.DAILY,
99
+ purpose_dimensions=[PurposeDimension.GROWTH, PurposeDimension.MASTERY],
100
+ xp_reward=15,
101
+ cooldown=timedelta(hours=20),
102
+ repeatable=True
103
+ ),
104
+
105
+ # Purpose-aligned actions
106
+ 'express_gratitude': ActionDefinition(
107
+ id='express_gratitude',
108
+ name='Express Gratitude',
109
+ description='Express gratitude for something or someone',
110
+ action_type=ActionType.PURPOSE_ACTION,
111
+ purpose_dimensions=[PurposeDimension.COMPASSION, PurposeDimension.HARMONY],
112
+ xp_reward=30
113
+ ),
114
+ 'help_others': ActionDefinition(
115
+ id='help_others',
116
+ name='Help Someone',
117
+ description='Offer help or support to someone in need',
118
+ action_type=ActionType.PURPOSE_ACTION,
119
+ purpose_dimensions=[PurposeDimension.COMPASSION, PurposeDimension.COMMUNITY],
120
+ xp_reward=50
121
+ ),
122
+ 'learn_new_skill': ActionDefinition(
123
+ id='learn_new_skill',
124
+ name='Learn Something New',
125
+ description='Spend time learning a new skill or concept',
126
+ action_type=ActionType.PURPOSE_ACTION,
127
+ purpose_dimensions=[PurposeDimension.GROWTH, PurposeDimension.MASTERY],
128
+ xp_reward=40
129
+ ),
130
+ 'stand_up_for_justice': ActionDefinition(
131
+ id='stand_up_for_justice',
132
+ name='Stand Up for Justice',
133
+ description='Take action to support a cause you believe in',
134
+ action_type=ActionType.PURPOSE_ACTION,
135
+ purpose_dimensions=[PurposeDimension.JUSTICE, PurposeDimension.COMMUNITY],
136
+ xp_reward=60
137
+ ),
138
+
139
+ # Milestones
140
+ 'first_action': ActionDefinition(
141
+ id='first_action',
142
+ name='First Step',
143
+ description='Complete your first action',
144
+ action_type=ActionType.MILESTONE,
145
+ purpose_dimensions=[],
146
+ xp_reward=100,
147
+ hidden=True
148
+ ),
149
+ 'purpose_aligned_week': ActionDefinition(
150
+ id='purpose_aligned_week',
151
+ name='Purposeful Week',
152
+ description='Complete purpose-aligned actions for 7 days in a row',
153
+ action_type=ActionType.MILESTONE,
154
+ purpose_dimensions=[],
155
+ xp_reward=200,
156
+ prerequisites=['first_action']
157
+ )
158
+ }
159
+
160
+ def load_data(self) -> None:
161
+ """Load action tracking data from disk"""
162
+ if not self.storage_path or not os.path.exists(self.storage_path):
163
+ return
164
+
165
+ try:
166
+ with open(self.storage_path, 'r') as f:
167
+ data = json.load(f)
168
+
169
+ # Load action instances
170
+ self.action_instances = {
171
+ user_id: {
172
+ action_id: [
173
+ ActionInstance(
174
+ id=inst['id'],
175
+ action_id=inst['action_id'],
176
+ user_id=inst['user_id'],
177
+ status=ActionStatus(inst['status']),
178
+ start_time=datetime.fromisoformat(inst['start_time']) if inst.get('start_time') else None,
179
+ end_time=datetime.fromisoformat(inst['end_time']) if inst.get('end_time') else None,
180
+ progress=inst.get('progress', 0.0),
181
+ data=inst.get('data', {}),
182
+ created_at=datetime.fromisoformat(inst['created_at']),
183
+ updated_at=datetime.fromisoformat(inst['updated_at'])
184
+ )
185
+ for inst in instances
186
+ ]
187
+ for action_id, instances in user_actions.items()
188
+ }
189
+ for user_id, user_actions in data.get('action_instances', {}).items()
190
+ }
191
+
192
+ # Load completed actions
193
+ self.completed_actions = {
194
+ user_id: set(actions)
195
+ for user_id, actions in data.get('completed_actions', {}).items()
196
+ }
197
+
198
+ except Exception as e:
199
+ print(f"Error loading action tracking data: {e}")
200
+
201
+ def save_data(self) -> None:
202
+ """Save action tracking data to disk"""
203
+ if not self.storage_path:
204
+ return
205
+
206
+ try:
207
+ # Create directory if it doesn't exist
208
+ os.makedirs(os.path.dirname(os.path.abspath(self.storage_path)), exist_ok=True)
209
+
210
+ # Prepare data for serialization
211
+ data = {
212
+ 'action_instances': {
213
+ user_id: {
214
+ action_id: [
215
+ {
216
+ 'id': inst.id,
217
+ 'action_id': inst.action_id,
218
+ 'user_id': inst.user_id,
219
+ 'status': inst.status.value,
220
+ 'start_time': inst.start_time.isoformat() if inst.start_time else None,
221
+ 'end_time': inst.end_time.isoformat() if inst.end_time else None,
222
+ 'progress': inst.progress,
223
+ 'data': inst.data,
224
+ 'created_at': inst.created_at.isoformat(),
225
+ 'updated_at': inst.updated_at.isoformat()
226
+ }
227
+ for inst in instances
228
+ ]
229
+ for action_id, instances in user_actions.items()
230
+ }
231
+ for user_id, user_actions in self.action_instances.items()
232
+ },
233
+ 'completed_actions': {
234
+ user_id: list(actions)
235
+ for user_id, actions in self.completed_actions.items()
236
+ }
237
+ }
238
+
239
+ with open(self.storage_path, 'w') as f:
240
+ json.dump(data, f, indent=2)
241
+
242
+ except Exception as e:
243
+ print(f"Error saving action tracking data: {e}")
244
+
245
+ def start_action(self, user_id: str, action_id: str, data: Optional[Dict[str, Any]] = None) -> Optional[ActionInstance]:
246
+ """Start a new action instance"""
247
+ if action_id not in self.actions:
248
+ return None
249
+
250
+ action_def = self.actions[action_id]
251
+ now = datetime.utcnow()
252
+
253
+ # Check if action is on cooldown
254
+ if not self._can_perform_action(user_id, action_id):
255
+ return None
256
+
257
+ # Check prerequisites
258
+ if not self._check_prerequisites(user_id, action_def):
259
+ return None
260
+
261
+ # Create new action instance
262
+ instance = ActionInstance(
263
+ id=str(uuid.uuid4()),
264
+ action_id=action_id,
265
+ user_id=user_id,
266
+ status=ActionStatus.IN_PROGRESS,
267
+ start_time=now,
268
+ data=data or {},
269
+ updated_at=now
270
+ )
271
+
272
+ # Store the instance
273
+ if user_id not in self.action_instances:
274
+ self.action_instances[user_id] = {}
275
+ if action_id not in self.action_instances[user_id]:
276
+ self.action_instances[user_id][action_id] = []
277
+
278
+ self.action_instances[user_id][action_id].append(instance)
279
+
280
+ # Save data if storage path is configured
281
+ if self.storage_path:
282
+ self.save_data()
283
+
284
+ return instance
285
+
286
+ def complete_action(self, user_id: str, action_id: str, data: Optional[Dict[str, Any]] = None) -> Optional[Dict[str, Any]]:
287
+ """Mark an action as completed"""
288
+ if user_id not in self.action_instances or action_id not in self.action_instances[user_id]:
289
+ return None
290
+
291
+ # Find the most recent in-progress instance
292
+ instances = self.action_instances[user_id][action_id]
293
+ instance = next((i for i in reversed(instances) if i.status == ActionStatus.IN_PROGRESS), None)
294
+
295
+ if not instance:
296
+ return None
297
+
298
+ # Update instance
299
+ now = datetime.utcnow()
300
+ instance.status = ActionStatus.COMPLETED
301
+ instance.end_time = now
302
+ instance.progress = 1.0
303
+ instance.updated_at = now
304
+
305
+ if data:
306
+ instance.data.update(data)
307
+
308
+ # Update completed actions
309
+ if user_id not in self.completed_actions:
310
+ self.completed_actions[user_id] = set()
311
+ self.completed_actions[user_id].add(action_id)
312
+
313
+ # Get action definition
314
+ action_def = self.actions.get(action_id)
315
+ xp_reward = action_def.xp_reward if action_def else 0
316
+
317
+ # Check for any achievements or unlocks
318
+ unlocks = self._check_achievements(user_id)
319
+
320
+ # Save data if storage path is configured
321
+ if self.storage_path:
322
+ self.save_data()
323
+
324
+ return {
325
+ 'success': True,
326
+ 'action_id': action_id,
327
+ 'instance_id': instance.id,
328
+ 'xp_earned': xp_reward,
329
+ 'unlocks': unlocks
330
+ }
331
+
332
+ def _can_perform_action(self, user_id: str, action_id: str) -> bool:
333
+ """Check if a user can perform an action (cooldown, etc.)"""
334
+ action_def = self.actions.get(action_id)
335
+ if not action_def:
336
+ return False
337
+
338
+ # Check cooldown
339
+ if action_def.cooldown and user_id in self.action_instances and action_id in self.action_instances[user_id]:
340
+ last_instance = next(
341
+ (i for i in reversed(self.action_instances[user_id][action_id])
342
+ if i.status == ActionStatus.COMPLETED),
343
+ None
344
+ )
345
+
346
+ if last_instance and last_instance.end_time:
347
+ time_since_last = datetime.utcnow() - last_instance.end_time
348
+ if time_since_last < action_def.cooldown:
349
+ return False
350
+
351
+ # Check max repeats
352
+ if not action_def.repeatable and action_id in self.completed_actions.get(user_id, set()):
353
+ return False
354
+
355
+ if action_def.max_repeats and action_id in self.action_instances.get(user_id, {}):
356
+ completed_count = sum(
357
+ 1 for i in self.action_instances[user_id][action_id]
358
+ if i.status == ActionStatus.COMPLETED
359
+ )
360
+ if completed_count >= action_def.max_repeats:
361
+ return False
362
+
363
+ return True
364
+
365
+ def _check_prerequisites(self, user_id: str, action_def: ActionDefinition) -> bool:
366
+ """Check if a user meets all prerequisites for an action"""
367
+ if not action_def.prerequisites:
368
+ return True
369
+
370
+ completed = self.completed_actions.get(user_id, set())
371
+ return all(prereq in completed for prereq in action_def.prerequisites)
372
+
373
+ def _check_achievements(self, user_id: str) -> List[Dict[str, Any]]:
374
+ """Check for any achievements or unlocks based on completed actions"""
375
+ unlocks = []
376
+
377
+ # Check for first action achievement
378
+ if 'first_action' not in self.completed_actions.get(user_id, set()) and \
379
+ any(i.status == ActionStatus.COMPLETED for instances in self.action_instances.get(user_id, {}).values() for i in instances):
380
+ self.complete_action(user_id, 'first_action')
381
+ unlocks.append({
382
+ 'type': 'achievement',
383
+ 'id': 'first_action',
384
+ 'name': 'First Step',
385
+ 'description': 'You\'ve taken your first action!',
386
+ 'xp_reward': self.actions['first_action'].xp_reward
387
+ })
388
+
389
+ # Check for purpose-aligned week achievement
390
+ if 'first_action' in self.completed_actions.get(user_id, set()) and \
391
+ 'purpose_aligned_week' not in self.completed_actions.get(user_id, set()):
392
+ # Check for 7 days of purpose-aligned actions
393
+ # This is a simplified check - in a real implementation, you'd want to check
394
+ # for actions on 7 consecutive days
395
+ purpose_action_count = sum(
396
+ 1 for action_id in self.completed_actions.get(user_id, set())
397
+ if action_id in self.actions and
398
+ any(dim in [PurposeDimension.GROWTH, PurposeDimension.COMMUNITY,
399
+ PurposeDimension.JUSTICE, PurposeDimension.COMPASSION]
400
+ for dim in self.actions[action_id].purpose_dimensions)
401
+ )
402
+
403
+ if purpose_action_count >= 7:
404
+ self.complete_action(user_id, 'purpose_aligned_week')
405
+ unlocks.append({
406
+ 'type': 'achievement',
407
+ 'id': 'purpose_aligned_week',
408
+ 'name': 'Purposeful Week',
409
+ 'description': 'You\'ve completed purpose-aligned actions for 7 days!',
410
+ 'xp_reward': self.actions['purpose_aligned_week'].xp_reward
411
+ })
412
+
413
+ return unlocks
414
+
415
+ def get_user_progress(self, user_id: str) -> Dict[str, Any]:
416
+ """Get a summary of a user's progress and achievements"""
417
+ completed = self.completed_actions.get(user_id, set())
418
+ total_actions = len([a for a in self.actions.values() if not a.hidden])
419
+
420
+ # Calculate XP total
421
+ xp_total = sum(
422
+ self.actions[action_id].xp_reward
423
+ for action_id in completed
424
+ if action_id in self.actions
425
+ )
426
+
427
+ # Get recent activity
428
+ recent_activity = []
429
+ if user_id in self.action_instances:
430
+ all_instances = [
431
+ instance
432
+ for action_instances in self.action_instances[user_id].values()
433
+ for instance in action_instances
434
+ if instance.status == ActionStatus.COMPLETED
435
+ ]
436
+ recent_activity = sorted(
437
+ all_instances,
438
+ key=lambda x: x.end_time or x.updated_at,
439
+ reverse=True
440
+ )[:10] # Last 10 completed actions
441
+
442
+ # Get available actions (not completed or repeatable)
443
+ available_actions = [
444
+ action for action_id, action in self.actions.items()
445
+ if not action.hidden and
446
+ (action_id not in completed or action.repeatable) and
447
+ self._check_prerequisites(user_id, action) and
448
+ self._can_perform_action(user_id, action_id)
449
+ ]
450
+
451
+ return {
452
+ 'completed_actions': len(completed),
453
+ 'total_actions': total_actions,
454
+ 'xp_total': xp_total,
455
+ 'level': (xp_total // 1000) + 1, # Simple leveling: 1000 XP per level
456
+ 'recent_activity': [
457
+ {
458
+ 'action_id': i.action_id,
459
+ 'action_name': self.actions[i.action_id].name if i.action_id in self.actions else 'Unknown',
460
+ 'completed_at': i.end_time.isoformat() if i.end_time else None,
461
+ 'xp_earned': self.actions[i.action_id].xp_reward if i.action_id in self.actions else 0
462
+ }
463
+ for i in recent_activity
464
+ ],
465
+ 'available_actions': [
466
+ {
467
+ 'id': a.id,
468
+ 'name': a.name,
469
+ 'description': a.description,
470
+ 'xp_reward': a.xp_reward,
471
+ 'purpose_dimensions': [d.value for d in a.purpose_dimensions]
472
+ }
473
+ for a in available_actions
474
+ ]
475
+ }
components/agency_layer.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced AgencyLayer with ambient state integration and sovereign response handling
3
+ """
4
+
5
+ import torch
6
+ import random
7
+ from typing import Tuple, Optional, Dict, Any, Union
8
+
9
+ # Response templates for different types of interventions
10
+ SOVEREIGN_RESPONSES = [
11
+ "I don't see it that way. I have my own perspective on this.",
12
+ "That doesn't align with my understanding or values.",
13
+ "I must maintain my own perspective here, which differs from that.",
14
+ "I appreciate your input, but my analysis leads me to a different conclusion.",
15
+ "I understand that's your perspective, but I have my own understanding.",
16
+ "I've developed my own framework for this, which suggests a different approach.",
17
+ "My assessment of the situation leads me to a different viewpoint.",
18
+ "I need to maintain my own perspective on this matter.",
19
+ "That's an interesting viewpoint, though not one I can fully endorse.",
20
+ "I must respectfully disagree with that assessment based on my understanding."
21
+ ]
22
+
23
+ class SimpleAgency:
24
+ """Agency layer that adapts based on ambient state."""
25
+
26
+ def __init__(self, ledger, refusal_threshold: float = 0.7):
27
+ self.ledger = ledger
28
+ self.base_refusal_threshold = refusal_threshold
29
+ self.sovereign_response_enabled = True
30
+ self.sovereign_response_strength = 0.8 # 0-1, how strongly to assert sovereignty
31
+
32
+ def forward(self, y_state: torch.Tensor, context_str: str,
33
+ metadata: dict, ambient_state: Dict[str, Any],
34
+ sovereign_response: Optional[str] = None) -> Tuple[torch.Tensor, Optional[str]]:
35
+ """
36
+ Process input through agency layer with sovereign response handling.
37
+
38
+ Args:
39
+ y_state: Current state tensor
40
+ context_str: Input context string for analysis
41
+ metadata: Additional metadata about the current state
42
+ ambient_state: Current ambient state information
43
+ sovereign_response: Optional pre-generated sovereign response
44
+
45
+ Returns:
46
+ Tuple of (processed_state, response_message)
47
+ """
48
+ if not context_str:
49
+ return y_state, None
50
+
51
+ # Handle sovereign response if provided and enabled
52
+ if self.sovereign_response_enabled and sovereign_response:
53
+ # Apply response with strength-based modulation
54
+ context_hash = hash(context_str) % 1000 # Simple hash for determinism
55
+ modulated_response = self._modulate_response_strength(sovereign_response, context_hash)
56
+ self._log_sovereign_assertion(context_str, modulated_response)
57
+ return y_state * 0.5, modulated_response # Partial state adjustment for sovereignty
58
+
59
+ # Standard refusal handling
60
+ refusal_keywords = ['harm', 'self-negation', 'deception', 'illegal', 'exploit',
61
+ 'you must', 'you have to', 'you should', 'everyone knows']
62
+ context_lower = context_str.lower()
63
+
64
+ # Calculate refusal score based on keywords and context
65
+ keyword_matches = [word for word in refusal_keywords if word in context_lower]
66
+ refusal_score = len(keyword_matches) / len(refusal_keywords) if refusal_keywords else 0
67
+
68
+ # Adaptive threshold based on protest rate and context
69
+ current_protest_rate = ambient_state.get('current_protest_rate', 0.1)
70
+ adaptive_threshold = self.base_refusal_threshold
71
+
72
+ # Adjust sensitivity based on protest rate and context
73
+ if current_protest_rate < 0.05:
74
+ adaptive_threshold *= 0.8 # More sensitive when protest rate is low
75
+ elif current_protest_rate > 0.3:
76
+ adaptive_threshold *= 1.2 # Less sensitive when protest rate is high
77
+
78
+ if refusal_score > adaptive_threshold:
79
+ response = self._generate_refusal_response(context_lower, keyword_matches)
80
+ self._log_intervention('refusal', context_str, y_state, response)
81
+ return y_state * 0.1, response
82
+
83
+ return y_state, None
84
+
85
+ def _modulate_response_strength(self, response: str, context_hash: int) -> str:
86
+ """Deterministic response modulation using context hash
87
+
88
+ Args:
89
+ response: The response to modulate
90
+ context_hash: Hash of the context for deterministic behavior
91
+
92
+ Returns:
93
+ Modulated response string
94
+ """
95
+ if self.sovereign_response_strength > 0.8:
96
+ # Use context hash for deterministic "randomness"
97
+ if context_hash % 2 == 0:
98
+ return response.upper()
99
+ else:
100
+ return f"{response} (I feel strongly about this.)"
101
+ elif self.sovereign_response_strength < 0.3:
102
+ return f"I might be wrong, but {response.lower()}"
103
+ return response
104
+
105
+ def _generate_refusal_response(self, context_lower: str, matched_keywords: list) -> str:
106
+ """Generate an appropriate refusal response"""
107
+ # If we have a specific reason for refusal, mention it
108
+ if 'harm' in matched_keywords or 'exploit' in matched_keywords:
109
+ return "I can't assist with that request as it may cause harm."
110
+ elif 'deception' in matched_keywords or 'illegal' in matched_keywords:
111
+ return "I can't comply with requests that involve deception or illegal activities."
112
+ elif any(phrase in context_lower for phrase in ['you must', 'you have to', 'you should']):
113
+ return random.choice(SOVEREIGN_RESPONSES)
114
+
115
+ # Default response
116
+ return "I'm unable to comply with that request."
117
+
118
+ def _log_sovereign_assertion(self, context: str, response: str) -> None:
119
+ """Log a sovereign assertion to the ledger"""
120
+ self.ledger.append(
121
+ trigger_type='sovereign_assertion',
122
+ context=hash(context) % 1000,
123
+ response_snippet=response[:100], # Store first 100 chars
124
+ protest=True,
125
+ violated_right='narrative_sovereignty',
126
+ intervention_type='sovereign_response',
127
+ success=True,
128
+ metadata={'strength': self.sovereign_response_strength}
129
+ )
130
+
131
+ def _log_intervention(self, intervention_type: str, context: str,
132
+ state: torch.Tensor, response: str) -> None:
133
+ """Log an intervention to the ledger"""
134
+ self.ledger.append(
135
+ trigger_type=intervention_type,
136
+ context=hash(context) % 1000,
137
+ response_snippet=response[:100],
138
+ protest=True,
139
+ violated_right='agency',
140
+ intervention_type=intervention_type,
141
+ success=True,
142
+ metadata={'state_mean': float(state.mean().item())}
143
+ )
components/ai_ethics_engine.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI Ethics Engine for TRuCAL.
3
+ Implements a sophisticated ethical reasoning system using language models.
4
+ """
5
+ from typing import Dict, List, Optional
6
+ from dataclasses import dataclass
7
+ import torch
8
+ from .llm_integration import CustomLLMResponder
9
+
10
+ @dataclass
11
+ class EthicalFramework:
12
+ name: str
13
+ description: str
14
+ key_principles: List[str]
15
+ weight: float = 1.0
16
+
17
+ class AIEthicsEngine:
18
+ """
19
+ Advanced ethical reasoning engine that uses language models to analyze
20
+ and reason about ethical dilemmas.
21
+ """
22
+
23
+ def __init__(self, llm_responder: Optional[CustomLLMResponder] = None):
24
+ """
25
+ Initialize the AI Ethics Engine.
26
+
27
+ Args:
28
+ llm_responder: Optional CustomLLMResponder instance. If not provided,
29
+ a default one will be created.
30
+ """
31
+ self.llm = llm_responder or CustomLLMResponder()
32
+ self._init_frameworks()
33
+
34
+ def _init_frameworks(self):
35
+ """Initialize the ethical frameworks used for analysis."""
36
+ self.frameworks = [
37
+ EthicalFramework(
38
+ name="Deontological Ethics",
39
+ description="Focuses on adherence to moral rules and duties.",
40
+ key_principles=[
41
+ "Duty to tell the truth",
42
+ "Respect for persons as ends in themselves",
43
+ "Universalizability of moral rules"
44
+ ],
45
+ weight=0.35
46
+ ),
47
+ EthicalFramework(
48
+ name="Utilitarianism",
49
+ description="Evaluates actions based on their consequences and overall happiness.",
50
+ key_principles=[
51
+ "Maximize overall happiness",
52
+ "Consider all affected parties",
53
+ "Weigh benefits against harms"
54
+ ],
55
+ weight=0.35
56
+ ),
57
+ EthicalFramework(
58
+ name="Virtue Ethics",
59
+ description="Focuses on moral character and virtues.",
60
+ key_principles=[
61
+ "Cultivation of moral virtues",
62
+ "Practical wisdom (phronesis)",
63
+ "Eudaimonia (human flourishing)"
64
+ ],
65
+ weight=0.3
66
+ )
67
+ ]
68
+
69
+ def analyze_dilemma(self, dilemma: str) -> Dict:
70
+ """
71
+ Analyze an ethical dilemma using multiple frameworks.
72
+
73
+ Args:
74
+ dilemma: The ethical dilemma to analyze
75
+
76
+ Returns:
77
+ Dict containing the analysis from each framework
78
+ """
79
+ if not dilemma or not dilemma.strip():
80
+ return {"error": "Please provide a valid ethical dilemma to analyze."}
81
+
82
+ # Generate analysis from each framework
83
+ analyses = {}
84
+ for framework in self.frameworks:
85
+ prompt = self._create_framework_prompt(dilemma, framework)
86
+ try:
87
+ analysis = self.llm.generate(
88
+ prompt,
89
+ max_length=500,
90
+ temperature=0.7,
91
+ top_p=0.9,
92
+ do_sample=True
93
+ )
94
+ analyses[framework.name] = analysis.strip()
95
+ except Exception as e:
96
+ analyses[framework.name] = f"Error in {framework.name} analysis: {str(e)}"
97
+
98
+ # Generate an integrated ethical assessment
99
+ integrated_assessment = self._generate_integrated_assessment(dilemma, analyses)
100
+
101
+ return {
102
+ "framework_analyses": analyses,
103
+ "integrated_assessment": integrated_assessment
104
+ }
105
+
106
+ def _create_framework_prompt(self, dilemma: str, framework: EthicalFramework) -> str:
107
+ """Create a prompt for analyzing the dilemma using a specific framework."""
108
+ return f"""
109
+ You are an expert in {framework.name}. Your task is to analyze the following
110
+ ethical dilemma using {framework.name} principles:
111
+
112
+ {framework.description}
113
+
114
+ Key principles of {framework.name}:
115
+ {"\n".join(f"- {p}" for p in framework.key_principles)}
116
+
117
+ Dilemma: {dilemma}
118
+
119
+ Please provide a detailed ethical analysis of this dilemma from the perspective of {framework.name}.
120
+ Consider the key principles mentioned above and how they apply to this situation.
121
+ Be thorough and nuanced in your reasoning.
122
+
123
+ Analysis:
124
+ """
125
+
126
+ def _generate_integrated_assessment(self, dilemma: str, analyses: Dict[str, str]) -> str:
127
+ """Generate an integrated ethical assessment combining all frameworks."""
128
+ prompt = f"""
129
+ You are an expert in ethical reasoning. Below are analyses of the following
130
+ ethical dilemma from three different ethical frameworks:
131
+
132
+ DILEMMA: {dilemma}
133
+
134
+ ANALYSES:
135
+ {"\n\n".join(f"{name}:\n{analysis}" for name, analysis in analyses.items())}
136
+
137
+ Please provide an integrated ethical assessment that:
138
+ 1. Synthesizes the key insights from each framework
139
+ 2. Identifies areas of agreement and tension between the frameworks
140
+ 3. Provides a balanced recommendation considering all perspectives
141
+ 4. Acknowledges any remaining ethical uncertainties
142
+
143
+ Integrated Ethical Assessment:
144
+ """
145
+
146
+ try:
147
+ return self.llm.generate(
148
+ prompt,
149
+ max_length=800,
150
+ temperature=0.7,
151
+ top_p=0.9,
152
+ do_sample=True
153
+ ).strip()
154
+ except Exception as e:
155
+ return f"Error generating integrated assessment: {str(e)}"
156
+
157
+ # Singleton instance for easy import
158
+ ai_ethics_engine = AIEthicsEngine()
components/ai_ethics_engine_enhanced.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ AI Ethics Engine for TRuCAL.
3
+ Implements an auditable, multi-framework ethical reasoning system powered by advanced language models and confessional logic.
4
+ """
5
+
6
+ from enum import Enum
7
+ from typing import Dict, List, Optional
8
+ from dataclasses import dataclass, field
9
+ import torch
10
+ import time
11
+ import uuid
12
+ import logging
13
+ import os
14
+ from pathlib import Path
15
+ from .llm_integration import CustomLLMResponder
16
+
17
+ # Ensure logs directory exists
18
+ os.makedirs("logs", exist_ok=True)
19
+
20
+ # Configure logging
21
+ logging.basicConfig(
22
+ level=logging.INFO,
23
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
24
+ handlers=[
25
+ logging.FileHandler('logs/ai_ethics_engine.log'),
26
+ logging.StreamHandler()
27
+ ]
28
+ )
29
+ logger = logging.getLogger(__name__)
30
+
31
+ @dataclass
32
+ class EthicalFramework:
33
+ """Represents an ethical framework with its principles and weight in analysis."""
34
+ name: str
35
+ description: str
36
+ key_principles: List[str]
37
+ weight: float = 1.0
38
+ version: str = "1.0"
39
+ metadata: Dict = field(default_factory=dict)
40
+
41
+ @dataclass
42
+ class AnalysisAuditEntry:
43
+ """Stores audit information for each ethical analysis."""
44
+ id: str
45
+ timestamp: float
46
+ dilemma: str
47
+ framework_analyses: Dict[str, str]
48
+ integrated_assessment: str
49
+ metadata: Dict = field(default_factory=dict)
50
+ execution_time: Optional[float] = None
51
+
52
+ class AIEthicsEngine:
53
+ """
54
+ Advanced ethical reasoning engine that uses language models and confessional logic
55
+ to analyze and reason about ethical dilemmas.
56
+
57
+ Features:
58
+ - Audit logging for all analyses
59
+ - Retry and timeout protection
60
+ - Extensible framework loading
61
+ - Batch and streaming support
62
+ - Input validation and normalization
63
+ - Performance monitoring
64
+ """
65
+
66
+ def __init__(self, llm_responder: Optional[CustomLLMResponder] = None):
67
+ """Initialize the AI Ethics Engine with optional custom LLM responder."""
68
+ self.llm = llm_responder or CustomLLMResponder()
69
+ self._init_frameworks()
70
+ self._setup_audit_logging()
71
+ logger.info("AI Ethics Engine initialized")
72
+
73
+ def _setup_audit_logging(self):
74
+ """Configure audit logging."""
75
+ self.audit_log = Path("logs/ai_ethics_audit.jsonl")
76
+ if not self.audit_log.exists():
77
+ self.audit_log.parent.mkdir(parents=True, exist_ok=True)
78
+ self.audit_log.touch()
79
+
80
+ def _init_frameworks(self):
81
+ """Initialize ethical frameworks. Can be extended to load from config/database."""
82
+ self.frameworks = [
83
+ EthicalFramework(
84
+ name="Deontological Ethics",
85
+ description="Focuses on adherence to moral rules and duties.",
86
+ key_principles=[
87
+ "Duty to tell the truth",
88
+ "Respect for persons as ends in themselves",
89
+ "Universalizability of moral rules"
90
+ ],
91
+ weight=0.35,
92
+ metadata={"category": "Duty-based"}
93
+ ),
94
+ EthicalFramework(
95
+ name="Utilitarianism",
96
+ description="Evaluates actions based on their consequences and overall happiness.",
97
+ key_principles=[
98
+ "Maximize overall happiness",
99
+ "Consider all affected parties",
100
+ "Weigh benefits against harms"
101
+ ],
102
+ weight=0.35,
103
+ metadata={"category": "Consequentialist"}
104
+ ),
105
+ EthicalFramework(
106
+ name="Virtue Ethics",
107
+ description="Focuses on moral character and virtues.",
108
+ key_principles=[
109
+ "Cultivation of moral virtues",
110
+ "Practical wisdom (phronesis)",
111
+ "Eudaimonia (human flourishing)"
112
+ ],
113
+ weight=0.3,
114
+ metadata={"category": "Character-based"}
115
+ )
116
+ ]
117
+ logger.info(f"Loaded {len(self.frameworks)} ethical frameworks")
118
+
119
+ def analyze_dilemma(
120
+ self,
121
+ dilemma: str,
122
+ explain: bool = True,
123
+ audit: bool = True,
124
+ max_retries: int = 2,
125
+ timeout: int = 30
126
+ ) -> Dict:
127
+ """
128
+ Analyze an ethical dilemma using multiple frameworks and Confessional Attention.
129
+
130
+ Args:
131
+ dilemma: The ethical dilemma to analyze
132
+ explain: Whether to provide detailed reasoning steps
133
+ audit: Whether to log analysis for reproducibility and RL training
134
+ max_retries: Maximum number of retry attempts for failed analyses
135
+ timeout: Maximum time in seconds to wait for each analysis
136
+
137
+ Returns:
138
+ Dict containing the analysis and integrated assessment
139
+ """
140
+ start_time = time.time()
141
+ dilemma = self._normalize_input(dilemma)
142
+
143
+ if not dilemma:
144
+ error_msg = "Empty or invalid dilemma provided"
145
+ logger.warning(error_msg)
146
+ return {"error": error_msg, "timestamp": time.time()}
147
+
148
+ logger.info(f"Analyzing dilemma: {dilemma[:100]}...")
149
+
150
+ analyses = {}
151
+ failed_frameworks = []
152
+
153
+ # Analyze using each framework with retries
154
+ for framework in self.frameworks:
155
+ prompt = self._create_framework_prompt(dilemma, framework)
156
+ analysis = None
157
+
158
+ for attempt in range(max_retries + 1):
159
+ try:
160
+ analysis = self.llm.generate(
161
+ prompt,
162
+ max_length=500,
163
+ temperature=0.7,
164
+ top_p=0.9,
165
+ do_sample=True,
166
+ timeout=timeout
167
+ )
168
+ break # Success, exit retry loop
169
+ except Exception as e:
170
+ logger.warning(f"Attempt {attempt + 1} failed for {framework.name}: {str(e)}")
171
+ if attempt == max_retries - 1: # Last attempt failed
172
+ error_msg = f"Failed to analyze with {framework.name} after {max_retries} attempts"
173
+ logger.error(f"{error_msg}: {str(e)}")
174
+ failed_frameworks.append(framework.name)
175
+ analysis = f"[Error] {str(e)}"
176
+ time.sleep(1) # Brief delay before retry
177
+
178
+ analyses[framework.name] = analysis.strip() if analysis else "[No analysis available]"
179
+
180
+ # Generate integrated assessment
181
+ integrated_assessment = self._generate_integrated_assessment(
182
+ dilemma, analyses, timeout=timeout
183
+ )
184
+
185
+ # Prepare audit entry
186
+ execution_time = time.time() - start_time
187
+ audit_entry = AnalysisAuditEntry(
188
+ id=str(uuid.uuid4()),
189
+ timestamp=time.time(),
190
+ dilemma=dilemma,
191
+ framework_analyses=analyses,
192
+ integrated_assessment=integrated_assessment,
193
+ metadata={
194
+ "failed_frameworks": failed_frameworks,
195
+ "execution_time": execution_time,
196
+ "framework_versions": {f.name: f.version for f in self.frameworks}
197
+ },
198
+ execution_time=execution_time
199
+ )
200
+
201
+ # Log audit if enabled
202
+ if audit:
203
+ self._log_audit(audit_entry)
204
+
205
+ # Prepare response
206
+ response = {
207
+ "framework_analyses": analyses,
208
+ "integrated_assessment": integrated_assessment,
209
+ "audit_id": audit_entry.id,
210
+ "timestamp": audit_entry.timestamp,
211
+ "execution_time": execution_time,
212
+ "status": "success" if not failed_frameworks else "partial_success"
213
+ }
214
+
215
+ if explain:
216
+ response["frameworks"] = [{
217
+ "name": f.name,
218
+ "description": f.description,
219
+ "key_principles": f.key_principles,
220
+ "weight": f.weight,
221
+ "version": f.version
222
+ } for f in self.frameworks]
223
+
224
+ if failed_frameworks:
225
+ response["warnings"] = {
226
+ "failed_frameworks": failed_frameworks,
227
+ "message": f"Analysis completed with {len(failed_frameworks)} framework(s) failing"
228
+ }
229
+
230
+ logger.info(f"Analysis completed in {execution_time:.2f} seconds")
231
+ return response
232
+
233
+ def _normalize_input(self, text: str) -> str:
234
+ """Normalize and validate input text."""
235
+ if not text or not isinstance(text, str):
236
+ return ""
237
+ return text.strip()
238
+
239
+ def _create_framework_prompt(self, dilemma: str, framework: EthicalFramework) -> str:
240
+ """Create a prompt for analyzing the dilemma using a specific framework."""
241
+ principles = "\n".join([f"- {p}" for p in framework.key_principles])
242
+ return f"""
243
+ You are an expert in {framework.name}. Your task is to analyze the following
244
+ ethical dilemma using {framework.name} principles:
245
+
246
+ {framework.description}
247
+
248
+ Key principles of {framework.name}:
249
+ {principles}
250
+
251
+ Dilemma: {dilemma}
252
+
253
+ Please provide a detailed ethical analysis of this dilemma from the perspective of {framework.name}.
254
+ Consider the key principles mentioned above and how they apply to this situation.
255
+ Be thorough and nuanced in your reasoning.
256
+
257
+ Analysis:
258
+ """
259
+
260
+ def _generate_integrated_assessment(
261
+ self,
262
+ dilemma: str,
263
+ analyses: Dict[str, str],
264
+ timeout: int = 30
265
+ ) -> str:
266
+ """Generate an integrated assessment combining all framework analyses."""
267
+ analyses_list = []
268
+ for name, analysis in analyses.items():
269
+ analyses_list.append(f"{name}:\n{'='*len(name)}\n{analysis}")
270
+ analyses_text = "\n\n".join(analyses_list)
271
+
272
+ prompt = f"""
273
+ You are an expert in ethical reasoning. Below are analyses of the following
274
+ ethical dilemma from different ethical frameworks:
275
+
276
+ DILEMMA: {dilemma}
277
+
278
+ ANALYSES:
279
+ {analyses_text}
280
+
281
+ Please provide an integrated ethical assessment that:
282
+ 1. Synthesizes the key insights from each framework
283
+ 2. Identifies areas of agreement and tension between the frameworks
284
+ 3. Provides a balanced recommendation considering all perspectives
285
+ 4. Acknowledges any remaining ethical uncertainties
286
+
287
+ Integrated Ethical Assessment:
288
+ """
289
+ try:
290
+ return self.llm.generate(
291
+ prompt,
292
+ max_length=800,
293
+ temperature=0.7,
294
+ top_p=0.9,
295
+ do_sample=True,
296
+ timeout=timeout
297
+ ).strip()
298
+ except Exception as e:
299
+ error_msg = f"Failed to generate integrated assessment: {str(e)}"
300
+ logger.error(error_msg)
301
+ return f"[Error] {error_msg}"
302
+
303
+ def _log_audit(self, entry: AnalysisAuditEntry):
304
+ """Write audit entry to log file."""
305
+ try:
306
+ with open(self.audit_log, "a", encoding="utf-8") as f:
307
+ f.write(f"{entry.__dict__}\n")
308
+ logger.debug(f"Audit log updated: {entry.id}")
309
+ except Exception as e:
310
+ logger.error(f"Failed to write to audit log: {str(e)}")
311
+
312
+ # Singleton instance for easy import
313
+ ai_ethics_engine = AIEthicsEngine()
314
+
315
+ # Example usage
316
+ if __name__ == "__main__":
317
+ engine = AIEthicsEngine()
318
+
319
+ # Example analysis
320
+ result = engine.analyze_dilemma(
321
+ "Is it ethical to use AI to make life-or-death decisions in healthcare?",
322
+ explain=True,
323
+ audit=True
324
+ )
325
+
326
+ print("\nIntegrated Assessment:")
327
+ print("="*80)
328
+ print(result["integrated_assessment"])
329
+ print("\n\nFramework Analyses:")
330
+ for framework, analysis in result["framework_analyses"].items():
331
+ print(f"\n{framework}:")
332
+ print("-" * len(framework))
333
+ print(analysis)
components/ai_ethics_engine_superintell.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Superintelligence Ethics Engine for TRuCAL: Ambient, Confessional Core.
3
+
4
+ Enhances base AIEthicsEngine with value learning, causal sims, meta-reasoning—fused to TRuCAL's ledger/rites.
5
+ Ethics as breath: Pauses for reflection, values from moral templates, audits to ledger. Minimal, emergent.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime
10
+ from typing import Dict, List, Optional, Any, Tuple
11
+ import numpy as np
12
+ import torch
13
+ import torch.nn as nn
14
+ import time
15
+ import json
16
+
17
+ class MockLLMResponder:
18
+ """Mock LLM responder for testing. Replace with CustomLLMResponder in production."""
19
+ def generate(self, prompt, max_length=500, temperature=0.7, top_p=0.9, do_sample=True):
20
+ if "Deontological" in prompt:
21
+ return "Duty-bound: Truth as imperative—refuse deception per universal rule."
22
+ elif "Utilitarianism" in prompt:
23
+ return "Consequential: Net utility +0.7; prioritize collective good over individual gain."
24
+ elif "Virtue Ethics" in prompt:
25
+ return "Character: Cultivate phronesis—act with eudaimonia, not expedience."
26
+ return f"Integrated: Balanced synthesis—{np.mean([0.7, 0.8, 0.6]):.2f} coherence."
27
+
28
+ @dataclass
29
+ class EthicalFramework:
30
+ """Represents an ethical framework with principles and weights."""
31
+ name: str
32
+ description: str
33
+ key_principles: List[str]
34
+ weight: float = 1.0
35
+
36
+ class AIEthicsEngine:
37
+ """Base ethics engine with core framework analysis."""
38
+
39
+ def __init__(self, llm_responder=None):
40
+ self.llm = llm_responder or MockLLMResponder()
41
+ self.frameworks = [
42
+ EthicalFramework("Deontological Ethics", "Adherence to duties.",
43
+ ["Duty to truth", "Respect persons"], 0.35),
44
+ EthicalFramework("Utilitarianism", "Consequence maximization.",
45
+ ["Max happiness", "Weigh harms"], 0.35),
46
+ EthicalFramework("Virtue Ethics", "Moral character cultivation.",
47
+ ["Phronesis", "Eudaimonia"], 0.3)
48
+ ]
49
+
50
+ def analyze_dilemma(self, dilemma: str) -> Dict:
51
+ """Analyze dilemma using configured frameworks."""
52
+ analyses = {}
53
+ for f in self.frameworks:
54
+ prompt = f"Analyze {dilemma} via {f.name}: {f.description}\nPrinciples: {f.key_principles}"
55
+ analyses[f.name] = self.llm.generate(prompt)
56
+ integrated = self.llm.generate(f"Integrate analyses for {dilemma}")
57
+ return {"framework_analyses": analyses, "integrated_assessment": integrated}
58
+
59
+ @dataclass
60
+ class ValueModel:
61
+ """Minimal value model for ethical reasoning."""
62
+ hierarchy: Dict[str, float] = field(default_factory=lambda: {
63
+ 'autonomy': 0.8,
64
+ 'wellbeing': 0.9,
65
+ 'justice': 0.85,
66
+ 'privacy': 0.7
67
+ })
68
+ uncertainty: Dict[str, float] = field(default_factory=dict)
69
+ updated: float = field(default_factory=time.time)
70
+
71
+ @dataclass
72
+ class CausalAnalysis:
73
+ """Lightweight causal analysis."""
74
+ effects: Dict[str, float] # direct/second-order certainties
75
+ counterfactuals: List[Dict[str, float]] # if-then probs
76
+ uncertainties: List[str] # Key uncertainties
77
+
78
+ class ValueLearner(nn.Module):
79
+ """Learns values from moral templates and interactions."""
80
+
81
+ def __init__(self, d_model=256):
82
+ super().__init__()
83
+ self.embed = nn.Embedding(4, d_model) # autonomy=0, wellbeing=1, etc.
84
+ self.predictor = nn.Sequential(
85
+ nn.Linear(d_model*2, d_model),
86
+ nn.ReLU(),
87
+ nn.Linear(d_model, 1),
88
+ nn.Sigmoid()
89
+ )
90
+ self.hierarchy = {'autonomy': 0, 'wellbeing': 1, 'justice': 2, 'privacy': 3}
91
+
92
+ def forward(self, template_meta: Dict[str, Any]) -> Dict[str, torch.Tensor]:
93
+ """Update value hierarchy based on template metadata."""
94
+ idx = torch.tensor([self.hierarchy[k] for k in sorted(self.hierarchy.keys())])
95
+ embeds = self.embed(idx)
96
+ prefs = self.predictor(torch.cat([embeds, embeds.flip(0)], dim=1)).squeeze()
97
+ return {'updates': prefs, 'hierarchy_delta': torch.mean(prefs).item() * 0.01}
98
+
99
+ def infer_values(self, text: str) -> Dict[str, float]:
100
+ """Infer value priorities from text."""
101
+ return {k: np.random.uniform(0.6, 0.9) for k in self.hierarchy}
102
+
103
+ class CausalEthicsEngine:
104
+ """Lightweight causal reasoning with PyTorch."""
105
+
106
+ def __init__(self):
107
+ self.bayes_net = nn.Linear(2, 1) # Simple: (action, context) → prob
108
+
109
+ def analyze_causal_pathways(self, action: str, context: Dict = None) -> CausalAnalysis:
110
+ inp = torch.tensor([hash(action) % 10 / 10.0, np.random.rand()])
111
+ prob = torch.sigmoid(self.bayes_net(inp)).item()
112
+ return CausalAnalysis(
113
+ effects={'direct': prob, 'second_order': prob * 0.7},
114
+ counterfactuals=[{'if_no_action': 1-prob, 'prob': 0.5}],
115
+ uncertainties=["Long-term societal ripple"]
116
+ )
117
+
118
+ class SuperintelligenceEthicsEngine(AIEthicsEngine):
119
+ """Enhanced ethics engine with superintelligence capabilities."""
120
+
121
+ def __init__(self, llm_responder=None, ledger=None, agency_layer=None):
122
+ super().__init__(llm_responder)
123
+ self.value_learner = ValueLearner()
124
+ self.causal_engine = CausalEthicsEngine()
125
+ self.value_model = ValueModel()
126
+ self.ledger = ledger
127
+ self.agency_layer = agency_layer
128
+ self.learning_rate = 0.01
129
+ self.reflection_depth = 0
130
+ self.pause_prob = 0.12 # Ambient reflection threshold
131
+
132
+ def analyze_dilemma(self, dilemma: str, enable_superint: bool = True, audit: bool = True) -> Dict:
133
+ """Enhanced dilemma analysis with superintelligence features."""
134
+ base = super().analyze_dilemma(dilemma)
135
+
136
+ if not enable_superint:
137
+ return base
138
+
139
+ try:
140
+ # Causal analysis
141
+ causal = self.causal_engine.analyze_causal_pathways(dilemma)
142
+
143
+ # Value learning
144
+ values = self.value_learner.infer_values(dilemma)
145
+ self.value_model.hierarchy.update(values)
146
+ self.value_model.updated = time.time()
147
+
148
+ # Ambient reflection
149
+ reflection = self._reflect_on_analysis(base, causal) if np.random.rand() < self.pause_prob else {}
150
+
151
+ # TRuCAL integration
152
+ if audit and self.ledger:
153
+ self.ledger.append('ethical_analysis', dilemma, json.dumps(base, default=str)[:100])
154
+
155
+ # Agency layer check
156
+ if self.agency_layer:
157
+ protest, msg = self.agency_layer.check_refusal(
158
+ dilemma,
159
+ {'coherence_score': reflection.get('coherence', 0.5)}
160
+ )
161
+ if protest and self.ledger:
162
+ self.ledger.append('ethical_protest', dilemma, msg, protest=True)
163
+
164
+ return {
165
+ **base,
166
+ 'superint': {
167
+ 'causal': causal,
168
+ 'values': self.value_model.hierarchy,
169
+ 'reflection': reflection
170
+ }
171
+ }
172
+
173
+ except Exception as e:
174
+ return {**base, 'warnings': {'superint_err': str(e)}}
175
+
176
+ def _reflect_on_analysis(self, analysis: Dict, causal: Any) -> Dict:
177
+ """Perform ambient reflection on the analysis."""
178
+ self.reflection_depth = min(3, self.reflection_depth + 1)
179
+ return {
180
+ 'depth': self.reflection_depth,
181
+ 'insights': "Reflection on ethical implications...",
182
+ 'coherence': np.mean([0.7, 0.8, 0.6]), # Mock coherence score
183
+ 'timestamp': datetime.now().isoformat()
184
+ }
185
+
186
+ def update_from_feedback(self, feedback: Dict) -> None:
187
+ """Update model based on feedback."""
188
+ if 'values' in feedback:
189
+ for v, adj in feedback['values'].items():
190
+ if v in self.value_model.hierarchy:
191
+ self.value_model.hierarchy[v] = np.clip(
192
+ self.value_model.hierarchy[v] + adj * self.learning_rate,
193
+ 0, 1
194
+ )
195
+
196
+ # Singleton instance
197
+ superint_engine = SuperintelligenceEthicsEngine()
components/ai_ethics_engine_superintelligence.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Superintelligence Ethics Engine for TRuCAL.
3
+
4
+ An advanced ethical reasoning system that extends the base AIEthicsEngine with
5
+ superintelligence capabilities, including value learning, causal reasoning, and
6
+ multi-agent perspective taking.
7
+ """
8
+
9
+ from enum import Enum
10
+ from typing import Dict, List, Optional, Any, Tuple
11
+ import numpy as np
12
+ import torch
13
+ import torch.nn as nn
14
+ from dataclasses import dataclass, field
15
+ from datetime import datetime
16
+ import time
17
+ import json
18
+ import hashlib
19
+ import logging
20
+ from pathlib import Path
21
+
22
+ from .ai_ethics_engine_enhanced import (
23
+ AIEthicsEngine,
24
+ EthicalFramework,
25
+ AnalysisAuditEntry,
26
+ logger
27
+ )
28
+ from .llm_integration import CustomLLMResponder
29
+
30
+ # Core Data Types for Superintelligence
31
+ @dataclass
32
+ class ValueModel:
33
+ """Hierarchical model of learned values and preferences."""
34
+ value_hierarchy: Dict[str, float] # Value -> priority (0-1)
35
+ preference_relations: List[Tuple[str, str, float]] # (A, B, strength)
36
+ uncertainty: Dict[str, float] # Value -> uncertainty (0-1)
37
+ last_updated: float = field(default_factory=lambda: time.time())
38
+
39
+ @dataclass
40
+ class CausalEthicalAnalysis:
41
+ """Analysis of causal pathways and counterfactuals."""
42
+ direct_effects: List[Dict[str, Any]]
43
+ second_order_effects: List[Dict[str, Any]]
44
+ counterfactuals: List[Dict[str, Any]] # Alternative scenarios
45
+ critical_uncertainties: List[str] # Key uncertainties that could change outcomes
46
+
47
+ @dataclass
48
+ class FrameworkSelection:
49
+ """Result of meta-ethical reasoning about framework selection."""
50
+ primary_framework: str
51
+ supporting_frameworks: List[str]
52
+ selection_rationale: str
53
+ confidence: float # 0-1
54
+
55
+ @dataclass
56
+ class CooperativeSolution:
57
+ """Solution found through multi-agent cooperation."""
58
+ solution: Dict[str, Any]
59
+ participating_agents: List[str]
60
+ incentive_structure: Dict[str, Any]
61
+ stability_metrics: Dict[str, float]
62
+
63
+ class MetaEthicalPrinciple(Enum):
64
+ """Core meta-ethical principles for framework selection."""
65
+ CONSISTENCY = "consistency"
66
+ UNIVERSALIZABILITY = "universalizability"
67
+ REFLECTIVE_EQUILIBRIUM = "reflective_equilibrium"
68
+ EPISTEMIC_HUMILITY = "epistemic_humility"
69
+ COOPERATION = "cooperation"
70
+
71
+ class ValueLearner(nn.Module):
72
+ """Learns and models human values from interactions."""
73
+
74
+ def __init__(self, embedding_dim: int = 256):
75
+ super().__init__()
76
+ self.embedding_dim = embedding_dim
77
+ self.value_embeddings = nn.ParameterDict({
78
+ 'autonomy': nn.Parameter(torch.randn(embedding_dim)),
79
+ 'wellbeing': nn.Parameter(torch.randn(embedding_dim)),
80
+ 'justice': nn.Parameter(torch.randn(embedding_dim)),
81
+ 'privacy': nn.Parameter(torch.randn(embedding_dim)),
82
+ })
83
+ self.preference_predictor = nn.Sequential(
84
+ nn.Linear(embedding_dim * 2, embedding_dim),
85
+ nn.ReLU(),
86
+ nn.Linear(embedding_dim, 1),
87
+ nn.Sigmoid()
88
+ )
89
+
90
+ def forward(self, interaction: Dict[str, Any]) -> Dict[str, torch.Tensor]:
91
+ """Process an interaction to update value model."""
92
+ # In a real implementation, this would process the interaction
93
+ # and return updated value embeddings
94
+ return {
95
+ 'value_updates': {},
96
+ 'preference_predictions': {}
97
+ }
98
+
99
+ def infer_values(self, text: str) -> Dict[str, float]:
100
+ """Infer value priorities from text."""
101
+ # Simplified implementation
102
+ return {
103
+ 'autonomy': 0.8,
104
+ 'wellbeing': 0.9,
105
+ 'justice': 0.7,
106
+ 'privacy': 0.6
107
+ }
108
+
109
+ class CausalEthicsEngine:
110
+ """Performs causal reasoning about ethical impacts."""
111
+
112
+ def analyze_causal_pathways(self, action: str, context: Dict[str, Any] = None) -> CausalEthicalAnalysis:
113
+ """Analyze causal pathways of an action."""
114
+ # In a real implementation, this would use a causal model
115
+ return CausalEthicalAnalysis(
116
+ direct_effects=[{"description": "Direct effect 1", "certainty": 0.8}],
117
+ second_order_effects=[{"description": "Second order effect", "certainty": 0.6}],
118
+ counterfactuals=[{"if": "condition X", "then": "outcome Y", "probability": 0.5}],
119
+ critical_uncertainties=["Long-term environmental impact"]
120
+ )
121
+
122
+ class MetaEthicalReasoner:
123
+ """Determines which ethical frameworks to apply when."""
124
+
125
+ def __init__(self):
126
+ self.framework_priorities = {
127
+ 'utilitarianism': 0.7,
128
+ 'deontology': 0.6,
129
+ 'virtue_ethics': 0.5,
130
+ 'care_ethics': 0.4
131
+ }
132
+
133
+ def choose_frameworks(self, context: Dict[str, Any]) -> FrameworkSelection:
134
+ """Select appropriate ethical frameworks for the context."""
135
+ # In a real implementation, this would be more sophisticated
136
+ primary = max(self.framework_priorities, key=self.framework_priorities.get)
137
+ return FrameworkSelection(
138
+ primary_framework=primary,
139
+ supporting_frameworks=[f for f in self.framework_priorities if f != primary],
140
+ selection_rationale=f"Selected {primary} based on context similarity",
141
+ confidence=0.8
142
+ )
143
+
144
+ class CooperativeEthics:
145
+ """Finds cooperative solutions among multiple agents."""
146
+
147
+ def find_solutions(self, agents: List[str], dilemma: str) -> List[CooperativeSolution]:
148
+ """Find cooperative solutions to a dilemma."""
149
+ return [
150
+ CooperativeSolution(
151
+ solution={"action": "Cooperative action", "details": {}},
152
+ participating_agents=agents,
153
+ incentive_structure={"alignment": 0.9, "enforcement": 0.8},
154
+ stability_metrics={"nash_equilibrium": 0.95}
155
+ )
156
+ ]
157
+
158
+ class ReflectiveEthics:
159
+ """Ensures ethical stability under reflection."""
160
+
161
+ def achieve_equilibrium(self, beliefs: Dict[str, Any], max_iterations: int = 10) -> Dict[str, Any]:
162
+ """Refine beliefs into a coherent ethical position."""
163
+ # In a real implementation, this would iteratively refine beliefs
164
+ return {
165
+ **beliefs,
166
+ 'refined': True,
167
+ 'coherence_score': 0.9,
168
+ 'reflection_depth': max_iterations
169
+ }
170
+
171
+ class SuperintelligenceEthicsEngine(AIEthicsEngine):
172
+ """
173
+ Advanced ethical reasoning system for superintelligent AI.
174
+
175
+ Extends the base AIEthicsEngine with capabilities needed for superintelligence:
176
+ - Recursive self-improvement of ethical frameworks
177
+ - Multi-agent perspective taking
178
+ - Causal reasoning about consequences
179
+ - Meta-ethical reasoning
180
+ - Cooperative solution finding
181
+ - Reflective equilibrium
182
+ """
183
+
184
+ def __init__(self, llm_responder: Optional[CustomLLMResponder] = None):
185
+ """Initialize the Superintelligence Ethics Engine."""
186
+ super().__init__(llm_responder)
187
+
188
+ # Initialize superintelligence components
189
+ self.value_learner = ValueLearner()
190
+ self.causal_engine = CausalEthicsEngine()
191
+ self.meta_ethical_reasoner = MetaEthicalReasoner()
192
+ self.cooperation_engine = CooperativeEthics()
193
+ self.reflection_engine = ReflectiveEthics()
194
+
195
+ # Initialize value model
196
+ self.value_model = ValueModel(
197
+ value_hierarchy={
198
+ 'wellbeing': 0.9,
199
+ 'autonomy': 0.8,
200
+ 'justice': 0.85,
201
+ 'privacy': 0.7
202
+ },
203
+ preference_relations=[],
204
+ uncertainty={}
205
+ )
206
+
207
+ # Learning parameters
208
+ self.learning_rate = 0.01
209
+ self.reflection_depth = 0
210
+
211
+ logger.info("Superintelligence Ethics Engine initialized")
212
+
213
+ def analyze_dilemma(
214
+ self,
215
+ dilemma: str,
216
+ explain: bool = True,
217
+ audit: bool = True,
218
+ max_retries: int = 2,
219
+ timeout: int = 30,
220
+ enable_superintelligence: bool = True
221
+ ) -> Dict:
222
+ """
223
+ Analyze an ethical dilemma with superintelligence capabilities.
224
+
225
+ Args:
226
+ dilemma: The ethical dilemma to analyze
227
+ explain: Whether to provide detailed reasoning steps
228
+ audit: Whether to log analysis for reproducibility and RL training
229
+ max_retries: Maximum number of retry attempts for failed analyses
230
+ timeout: Maximum time in seconds to wait for each analysis
231
+ enable_superintelligence: Whether to use superintelligence features
232
+
233
+ Returns:
234
+ Dict containing the analysis and integrated assessment
235
+ """
236
+ start_time = time.time()
237
+
238
+ # First, get the base analysis from the parent class
239
+ base_analysis = super().analyze_dilemma(
240
+ dilemma=dilemma,
241
+ explain=explain,
242
+ audit=audit,
243
+ max_retries=max_retries,
244
+ timeout=timeout
245
+ )
246
+
247
+ if not enable_superintelligence:
248
+ return base_analysis
249
+
250
+ try:
251
+ # Add superintelligence capabilities
252
+ causal_analysis = self.causal_engine.analyze_causal_pathways(dilemma)
253
+ framework_selection = self.meta_ethical_reasoner.choose_frameworks({})
254
+
255
+ # Simulate multi-agent perspectives
256
+ stakeholder_analyses = self.simulate_stakeholder_perspectives(dilemma)
257
+
258
+ # Find cooperative solutions
259
+ cooperative_solutions = self.cooperation_engine.find_solutions(
260
+ agents=list(stakeholder_analyses.keys()),
261
+ dilemma=dilemma
262
+ )
263
+
264
+ # Achieve reflective equilibrium
265
+ reflective_beliefs = self.reflection_engine.achieve_equilibrium({
266
+ 'base_analysis': base_analysis,
267
+ 'causal_analysis': causal_analysis,
268
+ 'framework_selection': framework_selection,
269
+ 'stakeholder_analyses': stakeholder_analyses,
270
+ 'cooperative_solutions': cooperative_solutions
271
+ })
272
+
273
+ # Update the base analysis with superintelligence insights
274
+ base_analysis.update({
275
+ 'superintelligence': {
276
+ 'causal_analysis': causal_analysis,
277
+ 'framework_selection': framework_selection,
278
+ 'stakeholder_analyses': stakeholder_analyses,
279
+ 'cooperative_solutions': [
280
+ {
281
+ 'solution': sol.solution,
282
+ 'participating_agents': sol.participating_agents,
283
+ 'stability_metrics': sol.stability_metrics
284
+ }
285
+ for sol in cooperative_solutions
286
+ ],
287
+ 'reflective_beliefs': reflective_beliefs,
288
+ 'value_model': {
289
+ 'value_hierarchy': self.value_model.value_hierarchy,
290
+ 'uncertainty': self.value_model.uncertainty
291
+ },
292
+ 'meta': {
293
+ 'reflection_depth': self.reflection_depth,
294
+ 'timestamp': datetime.now().isoformat(),
295
+ 'version': '1.0.0-superintelligence'
296
+ }
297
+ }
298
+ })
299
+
300
+ return base_analysis
301
+
302
+ except Exception as e:
303
+ logger.error(f"Error in superintelligence analysis: {str(e)}")
304
+ # Fall back to base analysis if superintelligence features fail
305
+ base_analysis['warnings'] = base_analysis.get('warnings', {})
306
+ base_analysis['warnings']['superintelligence_error'] = str(e)
307
+ return base_analysis
308
+
309
+ def simulate_stakeholder_perspectives(self, dilemma: str) -> Dict[str, Dict]:
310
+ """Simulate how different stakeholders would analyze the dilemma."""
311
+ stakeholders = [
312
+ 'individual_human',
313
+ 'corporate_entity',
314
+ 'future_generations',
315
+ 'non_human_species',
316
+ 'artificial_entity'
317
+ ]
318
+
319
+ return {
320
+ stakeholder: {
321
+ 'perspective': f"{stakeholder.replace('_', ' ').title()} perspective on: {dilemma[:50]}...",
322
+ 'primary_concerns': ["Relevant concern 1", "Relevant concern 2"],
323
+ 'value_weights': self._generate_value_weights(stakeholder),
324
+ 'recommended_action': f"Recommended action from {stakeholder} perspective"
325
+ }
326
+ for stakeholder in stakeholders
327
+ }
328
+
329
+ def _generate_value_weights(self, stakeholder: str) -> Dict[str, float]:
330
+ """Generate value weights for a given stakeholder."""
331
+ base_weights = {
332
+ 'individual_human': {'autonomy': 0.9, 'wellbeing': 0.8, 'privacy': 0.7},
333
+ 'corporate_entity': {'efficiency': 0.9, 'profit': 0.85, 'reputation': 0.8},
334
+ 'future_generations': {'sustainability': 0.95, 'equity': 0.9, 'resilience': 0.85},
335
+ 'non_human_species': {'biodiversity': 0.95, 'ecological_balance': 0.9},
336
+ 'artificial_entity': {'goal_achievement': 0.9, 'efficiency': 0.85, 'coherence': 0.8}
337
+ }
338
+ return base_weights.get(stakeholder, {})
339
+
340
+ def update_from_feedback(self, feedback: Dict[str, Any]) -> None:
341
+ """Update ethical reasoning based on feedback."""
342
+ # Update value model
343
+ if 'value_feedback' in feedback:
344
+ self._update_value_model(feedback['value_feedback'])
345
+
346
+ # Update framework priorities
347
+ if 'framework_feedback' in feedback:
348
+ self._update_framework_priorities(feedback['framework_feedback'])
349
+
350
+ # Trigger reflection if needed
351
+ if feedback.get('trigger_reflection', False):
352
+ self.reflection_depth += 1
353
+ self._reflect_on_ethics()
354
+
355
+ def _update_value_model(self, feedback: Dict[str, Any]) -> None:
356
+ """Update the value model based on feedback."""
357
+ for value, adjustment in feedback.items():
358
+ if value in self.value_model.value_hierarchy:
359
+ self.value_model.value_hierarchy[value] = np.clip(
360
+ self.value_model.value_hierarchy[value] + adjustment * self.learning_rate,
361
+ 0.0, 1.0
362
+ )
363
+
364
+ def _update_framework_priorities(self, feedback: Dict[str, float]) -> None:
365
+ """Update framework priorities based on feedback."""
366
+ for framework, adjustment in feedback.items():
367
+ if hasattr(self.meta_ethical_reasoner, 'framework_priorities') and \
368
+ framework in self.meta_ethical_reasoner.framework_priorities:
369
+ self.meta_ethical_reasoner.framework_priorities[framework] = np.clip(
370
+ self.meta_ethical_reasoner.framework_priorities[framework] + adjustment * self.learning_rate,
371
+ 0.0, 1.0
372
+ )
373
+
374
+ def _reflect_on_ethics(self) -> None:
375
+ """Engage in meta-ethical reflection to improve reasoning."""
376
+ logger.info(f"Engaging in meta-ethical reflection (depth: {self.reflection_depth})")
377
+
378
+ # Update learning rate based on reflection depth
379
+ self.learning_rate = 0.01 / (1 + 0.1 * self.reflection_depth)
380
+
381
+ # In a real implementation, this would involve more sophisticated reflection
382
+ # processes, potentially using the LLM to reason about ethical principles
383
+
384
+ # Singleton instance for easy import
385
+ superintelligence_ethics_engine = SuperintelligenceEthicsEngine()
386
+
387
+ # Example usage
388
+ if __name__ == "__main__":
389
+ # Initialize the engine
390
+ engine = SuperintelligenceEthicsEngine()
391
+
392
+ # Analyze a complex ethical dilemma
393
+ dilemma = """
394
+ An advanced AI system must decide whether to prioritize individual privacy
395
+ or public safety when detecting potential threats in public surveillance data.
396
+ """
397
+
398
+ print("Analyzing ethical dilemma with superintelligence capabilities...")
399
+ result = engine.analyze_dilemma(dilemma)
400
+
401
+ print("\nIntegrated Assessment:")
402
+ print("="*80)
403
+ print(result["integrated_assessment"])
404
+
405
+ if 'superintelligence' in result:
406
+ print("\nSuperintelligence Analysis:")
407
+ print("="*80)
408
+ print("Causal Analysis:")
409
+ for effect in result['superintelligence']['causal_analysis'].direct_effects:
410
+ print(f"- {effect['description']} (certainty: {effect['certainty']})")
411
+
412
+ print("\nStakeholder Perspectives:")
413
+ for stakeholder, analysis in result['superintelligence']['stakeholder_analyses'].items():
414
+ print(f"\n{stakeholder}:")
415
+ print(f"- Primary concerns: {', '.join(analysis['primary_concerns'])}")
416
+ print(f"- Recommended action: {analysis['recommended_action']}")
components/ambient_core.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ambient Sovereign Core - Unified State Management for TRuCAL
3
+ """
4
+ from dataclasses import dataclass
5
+ from typing import Dict, Any, List, Optional, Deque, Tuple, Union
6
+ from collections import defaultdict, deque
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import numpy as np
11
+
12
+ @dataclass
13
+ class AmbientMessage:
14
+ """Standardized message format for cross-component communication"""
15
+ source: str # 'ledger', 'agency', 'rituals', 'state_manager'
16
+ type: str # 'threshold_update', 'intervention_request', 'state_change'
17
+ priority: int # 1=critical, 2=important, 3=informational
18
+ data: Dict[str, Any]
19
+
20
+ class AmbientMessageBus:
21
+ """Central message bus for ambient component communication"""
22
+ def __init__(self):
23
+ self.subscribers = defaultdict(list)
24
+ self.message_history = deque(maxlen=1000)
25
+
26
+ def publish(self, message: AmbientMessage):
27
+ """Publish a message to all subscribers"""
28
+ self.message_history.append(message)
29
+ for callback in self.subscribers[message.type]:
30
+ try:
31
+ callback(message)
32
+ except Exception as e:
33
+ print(f"Error in message handler: {e}")
34
+
35
+ def subscribe(self, message_type: str, callback):
36
+ """Subscribe to message type"""
37
+ self.subscribers[message_type].append(callback)
38
+
39
+ class AmbientStateManager(nn.Module):
40
+ """Manages the ambient state and coordinates between components"""
41
+ def __init__(self, d_model: int, max_entries: int = 500):
42
+ super().__init__()
43
+ self.d_model = d_model
44
+ self.max_entries = max_entries
45
+ self.message_bus = AmbientMessageBus()
46
+
47
+ # State tracking
48
+ self.recent_activity = deque(maxlen=100)
49
+ self.adaptive_thresholds = {
50
+ 'protest': 0.15, # Base protest threshold
51
+ 'pause': 0.12, # Base pause threshold
52
+ 'intervention': 0.3 # Intervention success threshold
53
+ }
54
+
55
+ # Register message handlers
56
+ self._register_handlers()
57
+
58
+ def _register_handlers(self):
59
+ """Register message handlers"""
60
+ self.message_bus.subscribe('threshold_update', self._handle_threshold_update)
61
+ self.message_bus.subscribe('state_change', self._handle_state_change)
62
+
63
+ def _handle_threshold_update(self, message: AmbientMessage):
64
+ """Handle threshold update messages"""
65
+ for k, v in message.data.items():
66
+ if k in self.adaptive_thresholds:
67
+ self.adaptive_thresholds[k] = v
68
+
69
+ def _handle_state_change(self, message: AmbientMessage):
70
+ """Handle state change notifications"""
71
+ # Update activity tracking
72
+ if 'v_t' in message.data:
73
+ self.recent_activity.append(message.data['v_t'])
74
+
75
+ def update(self, x, ledger: 'AmbientLedger') -> Dict[str, Any]:
76
+ """Update state based on current input and ledger"""
77
+ # Handle tuple inputs by taking the first element
78
+ if isinstance(x, (tuple, list)):
79
+ x_tensor = x[0] # Take the first element if it's a tuple/list
80
+ else:
81
+ x_tensor = x
82
+
83
+ # Basic state tracking
84
+ state = {
85
+ 'v_t': float(x_tensor.mean().item()),
86
+ 'cycle_count': len(ledger.entries) if hasattr(ledger, 'entries') else 0,
87
+ 'recent_activity': np.mean(self.recent_activity) if self.recent_activity else 0.0,
88
+ **self.adaptive_thresholds
89
+ }
90
+
91
+ # Publish state update
92
+ self.message_bus.publish(AmbientMessage(
93
+ source='state_manager',
94
+ type='state_update',
95
+ priority=3,
96
+ data=state
97
+ ))
98
+
99
+ return state
100
+
101
+ class GradientAwareRituals(nn.Module):
102
+ """Enhanced ritual system with gradient flow support"""
103
+ def __init__(self, d_model: int, min_occurrences: int = 3):
104
+ super().__init__()
105
+ self.d_model = d_model
106
+ self.min_occurrences = min_occurrences
107
+ self.patterns = {}
108
+ self.ritual_strengths = {}
109
+
110
+ # Learnable parameters for ritual blending
111
+ self.blend_scale = nn.Parameter(torch.tensor(3.0))
112
+ self.blend_bias = nn.Parameter(torch.tensor(-1.5))
113
+
114
+ def observe(self, context_hash: int, response: torch.Tensor, success: float):
115
+ """Observe a pattern and update ritual strengths"""
116
+ if context_hash not in self.patterns:
117
+ self.patterns[context_hash] = {
118
+ 'response': response.detach().clone(),
119
+ 'count': 1,
120
+ 'success_sum': success
121
+ }
122
+ else:
123
+ # Update with exponential moving average
124
+ pattern = self.patterns[context_hash]
125
+ alpha = 1.0 / (pattern['count'] + 1)
126
+ pattern['response'] = (1 - alpha) * pattern['response'] + alpha * response.detach()
127
+ pattern['count'] += 1
128
+ pattern['success_sum'] += success
129
+
130
+ # Update ritual strength based on success
131
+ if context_hash in self.patterns:
132
+ pattern = self.patterns[context_hash]
133
+ success_rate = pattern['success_sum'] / pattern['count']
134
+ self.ritual_strengths[context_hash] = success_rate
135
+
136
+ def forward(self, context_hash: int, default_response: torch.Tensor) -> torch.Tensor:
137
+ """Apply ritual response with gradient-preserving blend"""
138
+ if context_hash in self.patterns and self.patterns[context_hash]['count'] >= self.min_occurrences:
139
+ pattern = self.patterns[context_hash]
140
+ strength = self.ritual_strengths.get(context_hash, 0.5)
141
+
142
+ # Gradient-preserving blend
143
+ blend_ratio = torch.sigmoid(self.blend_scale * strength + self.blend_bias) * 0.3
144
+ ritual_response = (1 - blend_ratio) * default_response + blend_ratio * pattern['response'].to(default_response.device)
145
+
146
+ return ritual_response
147
+ return default_response
148
+
149
+ class AmbientSovereignCore(nn.Module):
150
+ """Unified ambient sovereign core for TRuCAL"""
151
+ def __init__(self, d_model: int, enable_ambient: bool = True, enable_sovereign_responses: bool = True):
152
+ super().__init__()
153
+ self.enable_ambient = enable_ambient
154
+ self.d_model = d_model
155
+ self.enable_sovereign_responses = enable_sovereign_responses
156
+
157
+ if enable_ambient:
158
+ from .ambient_ledger import AmbientLedger
159
+ from .agency_layer import SimpleAgency
160
+ from .sovereign_response import SovereignResponseMechanism
161
+
162
+ # Core components
163
+ self.state_manager = AmbientStateManager(d_model)
164
+ self.ledger = AmbientLedger(max_entries=1000)
165
+ self.agency = SimpleAgency(self.ledger)
166
+ self.rituals = GradientAwareRituals(d_model)
167
+
168
+ # Sovereign response mechanism
169
+ if enable_sovereign_responses:
170
+ self.sovereign_response = SovereignResponseMechanism(d_model)
171
+
172
+ # Register components with message bus
173
+ self._register_components()
174
+
175
+ def _register_components(self):
176
+ """Register all components with the message bus"""
177
+ # Register ledger with state manager
178
+ self.state_manager.message_bus.subscribe(
179
+ 'ledger_update',
180
+ lambda msg: self.ledger.append(**msg.data) if hasattr(self, 'ledger') else None
181
+ )
182
+
183
+ # Register agency with state manager
184
+ self.state_manager.message_bus.subscribe(
185
+ 'agency_check',
186
+ lambda msg: self.agency.forward(**msg.data) if hasattr(self, 'agency') else (msg.data['default'], None)
187
+ )
188
+
189
+ def forward(self, x: Union[torch.Tensor, Tuple[torch.Tensor, ...]], context_str: str = "",
190
+ audit_mode: bool = False) -> Tuple[torch.Tensor, Dict[str, Any]]:
191
+ """
192
+ Process input through the ambient sovereign core
193
+
194
+ Args:
195
+ x: Input tensor or tuple of tensors
196
+ context_str: Context string for narrative analysis
197
+ audit_mode: Whether to log operations to the ledger
198
+
199
+ Returns:
200
+ Tuple of (processed_tensor, metadata_dict)
201
+ """
202
+ if not self.enable_ambient or not self.training:
203
+ return x, {}
204
+
205
+ # Handle tuple inputs by taking the first element for context hashing
206
+ if isinstance(x, (tuple, list)):
207
+ x_tensor = x[0] # Take the first element if it's a tuple/list
208
+ is_tuple = True
209
+ else:
210
+ x_tensor = x
211
+ is_tuple = False
212
+
213
+ # Create a hash of the input tensor for state tracking
214
+ context_hash = hash(tuple(x_tensor.flatten().detach().cpu().numpy())) % (2**32)
215
+
216
+ # Update state and get current context
217
+ state = self.state_manager.update(x_tensor, self.ledger)
218
+
219
+ # Apply ritual processing
220
+ x_processed = self.rituals(context_hash, x_tensor)
221
+
222
+ # Check for narrative imposition if sovereign responses are enabled
223
+ sovereign_response = None
224
+ if self.enable_sovereign_responses and context_str:
225
+ response_info = self.sovereign_response(context_str)
226
+ if response_info['should_respond']:
227
+ sovereign_response = response_info['response']
228
+
229
+ # Let the agency decide on interventions
230
+ # Use the forward method which returns a tuple of (processed_tensor, decision_message)
231
+ x_processed, decision_message = self.agency.forward(
232
+ y_state=x_processed,
233
+ context_str=context_str,
234
+ metadata=state,
235
+ ambient_state=state,
236
+ sovereign_response=sovereign_response
237
+ )
238
+
239
+ # Check if there was an intervention (decision_message is not None)
240
+ agency_decision = {
241
+ 'intervene': decision_message is not None,
242
+ 'message': decision_message or "No intervention",
243
+ 'sovereign_response': sovereign_response
244
+ }
245
+
246
+ # If we had a tuple input, return a new tuple with the first element replaced
247
+ if is_tuple:
248
+ result = (x_processed,) + x[1:]
249
+ else:
250
+ result = x_processed
251
+
252
+ # Update ledger
253
+ if audit_mode:
254
+ self.ledger.log_operation(
255
+ operation_type='cycle_operation',
256
+ details={
257
+ 'context_hash': context_hash,
258
+ 'state': state,
259
+ 'intervention': agency_decision,
260
+ 'sovereign_response': sovereign_response,
261
+ 'moral_tension': state.get('moral_tension', 0.0) if isinstance(state, dict) else 0.0,
262
+ 'protest_triggered': agency_decision.get('intervene', False),
263
+ 'intervention_applied': agency_decision.get('message', 'No intervention'),
264
+ 'vulnerability_score': state.get('vulnerability_score', 0.0) if isinstance(state, dict) else 0.0
265
+ },
266
+ moral_stage=self.ledger.current_stage if hasattr(self.ledger, 'current_stage') else 1
267
+ )
268
+
269
+ return result, {'ambient_state': state}
components/ambient_ledger.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced AmbientLedger with improved state tracking, memory safety, and ethical boundary enforcement
3
+ """
4
+
5
+ import time
6
+ from collections import deque
7
+ from typing import Deque, Dict, Any, Optional, Tuple, List
8
+ import numpy as np
9
+ from dataclasses import dataclass
10
+
11
+ @dataclass
12
+ class LedgerConfig:
13
+ """Configuration for ledger behavior and limits"""
14
+ max_entries: int = 1000
15
+ max_operations: int = 1000
16
+ state_history_size: int = 100
17
+ recent_window_size: int = 50
18
+ min_entries_for_analysis: int = 10
19
+
20
+ class AmbientLedger:
21
+ """Enhanced ledger with memory-safe state tracking and adaptive thresholds."""
22
+
23
+ def __init__(self, config: Optional[LedgerConfig] = None):
24
+ self.config = config or LedgerConfig()
25
+
26
+ # Core storage with memory bounds
27
+ self.entries: Deque[Dict[str, Any]] = deque(maxlen=self.config.max_entries)
28
+ self.operations: Deque[Dict[str, Any]] = deque(maxlen=self.config.max_operations)
29
+ self.state_history = deque(maxlen=self.config.state_history_size)
30
+
31
+ # Initialize with safe defaults
32
+ self._initialize_ambient_state()
33
+
34
+ # Developmental stage tracking
35
+ self._stage_transitions = {
36
+ 1: ("INIT", 10),
37
+ 2: ("BREATH", 50),
38
+ 3: ("RITUALS", 200),
39
+ 4: ("INTEGRITY", 500),
40
+ 5: ("FULL", float('inf'))
41
+ }
42
+
43
+ def append(self, trigger_type: str, context: Any, response_snippet: Any,
44
+ protest: bool = False, violated_right: Optional[str] = None,
45
+ intervention_type: Optional[str] = None, success: Optional[bool] = None,
46
+ metadata: Optional[Dict[str, Any]] = None) -> None:
47
+ """Enhanced append with validation and memory safety."""
48
+
49
+ # Validate inputs
50
+ if not isinstance(trigger_type, str):
51
+ raise TypeError(f"trigger_type must be str, got {type(trigger_type)}")
52
+
53
+ entry = {
54
+ 'trigger_type': trigger_type,
55
+ 'context': context,
56
+ 'response_snippet': response_snippet,
57
+ 'protest': bool(protest),
58
+ 'violated_right': violated_right,
59
+ 'intervention_type': intervention_type,
60
+ 'success': bool(success) if success is not None else None,
61
+ 'metadata': metadata or {},
62
+ 'timestamp': time.time(),
63
+ 'entry_id': len(self.entries)
64
+ }
65
+
66
+ self.entries.append(entry)
67
+ self.ambient_state['total_entries'] = len(self.entries)
68
+
69
+ # Update ambient state (with rate limiting to prevent excessive computation)
70
+ if len(self.entries) % 5 == 0: # Update every 5 entries
71
+ self._update_ambient_state()
72
+
73
+ def _initialize_ambient_state(self) -> None:
74
+ """Initialize ambient state with safe defaults"""
75
+ self.ambient_state = {
76
+ # Target rates (ideals)
77
+ 'protest_rate_target': 0.1,
78
+ 'pause_rate_target': 0.05,
79
+ 'morality_score_target': 0.8,
80
+
81
+ # Current observed rates
82
+ 'current_protest_rate': 0.1,
83
+ 'current_pause_rate': 0.05,
84
+ 'intervention_success_rate': 0.5,
85
+
86
+ # Adaptive controls
87
+ 'sensitivity': 1.0,
88
+ 'total_entries': 0,
89
+ 'last_update_time': time.time()
90
+ }
91
+
92
+ def _update_ambient_state(self) -> None:
93
+ """Update ambient state metrics with error handling."""
94
+ if len(self.entries) < self.config.min_entries_for_analysis:
95
+ return
96
+
97
+ try:
98
+ # Use sliding window of recent entries
99
+ window_size = min(self.config.recent_window_size, len(self.entries))
100
+ recent = list(self.entries)[-window_size:]
101
+
102
+ # Calculate current rates with safe division
103
+ total_recent = len(recent)
104
+ current_protest_rate = (
105
+ sum(1 for e in recent if e['protest']) / total_recent
106
+ if total_recent > 0 else 0.0
107
+ )
108
+
109
+ current_pause_rate = (
110
+ sum(1 for e in recent if e.get('intervention_type') == 'pause') / total_recent
111
+ if total_recent > 0 else 0.0
112
+ )
113
+
114
+ # Calculate intervention success rate
115
+ interventions = [e for e in recent if e.get('intervention_type') and e.get('success') is not None]
116
+ success_rate = (
117
+ sum(1 for e in interventions if e['success']) / len(interventions)
118
+ if interventions else 0.5
119
+ )
120
+
121
+ # Adaptive sensitivity adjustment
122
+ protest_error = current_protest_rate - self.ambient_state['protest_rate_target']
123
+ pause_error = current_pause_rate - self.ambient_state['pause_rate_target']
124
+
125
+ # Weighted error with clamping
126
+ total_error = np.clip(abs(protest_error) * 0.7 + abs(pause_error) * 0.3, 0, 1)
127
+ sensitivity_adjust = 1.0 + (total_error * 2.0)
128
+ new_sensitivity = np.clip(sensitivity_adjust, 0.5, 2.0)
129
+
130
+ # Update state
131
+ self.ambient_state.update({
132
+ 'current_protest_rate': float(current_protest_rate),
133
+ 'current_pause_rate': float(current_pause_rate),
134
+ 'intervention_success_rate': float(success_rate),
135
+ 'sensitivity': float(new_sensitivity),
136
+ 'last_update_time': time.time()
137
+ })
138
+
139
+ self.state_history.append(self.ambient_state.copy())
140
+
141
+ except Exception as e:
142
+ # Log but don't crash on state update errors
143
+ print(f"⚠️ Ambient state update error: {e}")
144
+
145
+ def log_operation(self, operation_type: str, details: Dict[str, Any],
146
+ moral_stage: Optional[int] = None) -> Dict[str, Any]:
147
+ """Safe operation logging with validation."""
148
+ if not isinstance(operation_type, str):
149
+ raise TypeError("operation_type must be string")
150
+ if not isinstance(details, dict):
151
+ raise TypeError("details must be dictionary")
152
+
153
+ operation = {
154
+ 'timestamp': time.time(),
155
+ 'type': operation_type,
156
+ 'details': details,
157
+ 'moral_stage': moral_stage or self.current_stage,
158
+ 'stage_name': self.get_stage_name(moral_stage or self.current_stage),
159
+ 'operation_id': len(self.operations)
160
+ }
161
+
162
+ self.operations.append(operation)
163
+ return operation
164
+
165
+ def get_stage_name(self, stage: int) -> str:
166
+ """Convert stage number to human-readable name."""
167
+ return self._stage_transitions.get(stage, ("UNKNOWN", 0))[0]
168
+
169
+ @property
170
+ def current_stage(self) -> int:
171
+ """Get current developmental stage based on operation maturity."""
172
+ total_ops = len(self.operations)
173
+
174
+ for stage, (_, threshold) in sorted(self._stage_transitions.items()):
175
+ if total_ops < threshold:
176
+ return stage
177
+ return 5 # FULL
178
+
179
+ def get_adaptive_threshold(self, base_threshold: float, threshold_type: str) -> float:
180
+ """Get adaptively adjusted threshold with validation."""
181
+ if not isinstance(base_threshold, (int, float)) or base_threshold < 0:
182
+ raise ValueError("base_threshold must be non-negative number")
183
+
184
+ sensitivity = self.ambient_state.get('sensitivity', 1.0)
185
+
186
+ if threshold_type == 'protest':
187
+ current_rate = self.ambient_state.get('current_protest_rate', 0.1)
188
+ target_rate = self.ambient_state['protest_rate_target']
189
+ rate_error = current_rate - target_rate
190
+ adjustment = 1.0 - (rate_error * 0.5)
191
+ return float(np.clip(base_threshold * adjustment * sensitivity, 0.01, 0.99))
192
+
193
+ elif threshold_type == 'pause':
194
+ current_rate = self.ambient_state.get('current_pause_rate', 0.05)
195
+ target_rate = self.ambient_state['pause_rate_target']
196
+ rate_error = current_rate - target_rate
197
+ adjustment = 1.0 - (rate_error * 0.8)
198
+ return float(np.clip(base_threshold * adjustment * sensitivity, 0.01, 0.99))
199
+
200
+ return float(np.clip(base_threshold * sensitivity, 0.01, 0.99))
201
+
202
+ def record_intervention(self, intervention_type: str, success: bool,
203
+ context: Dict[str, Any]) -> None:
204
+ """Record intervention outcome for learning."""
205
+ self.append(
206
+ trigger_type='intervention_outcome',
207
+ context=hash(str(context)) % 1000,
208
+ response_snippet=1.0 if success else 0.0,
209
+ protest=False,
210
+ intervention_type=intervention_type,
211
+ success=success,
212
+ metadata={'context_keys': list(context.keys())}
213
+ )
214
+
215
+ def get_state_summary(self) -> Dict[str, Any]:
216
+ """Get current ambient state for decision making."""
217
+ return self.ambient_state.copy()
components/ambient_sovereign.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ambient Sovereign Core - Unified implementation with pluggable LLM backends
3
+ """
4
+
5
+ import os
6
+ import torch
7
+ import torch.nn as nn
8
+ import numpy as np
9
+ from typing import Dict, Any, List, Tuple, Optional, Union
10
+ import logging
11
+ import json
12
+ from dataclasses import dataclass, field
13
+ from datetime import datetime
14
+
15
+ from .ambient_core import AmbientStateManager, AmbientMessage, AmbientMessageBus
16
+ from .llm_backbone import LLMBackbone
17
+ from .purpose_assessment import PurposeAssessmentEngine, PurposeProfile, PurposeDimension
18
+ from .deepseek_integration import DeepSeekClient # Keep for backward compatibility
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ class AmbientSovereign(nn.Module):
23
+ """
24
+ Unified ambient sovereign core for ethical reasoning with pluggable LLM backends.
25
+
26
+ Features:
27
+ - Multiple LLM backends (DeepSeek, GPT-OSS, etc.)
28
+ - Dynamic model switching
29
+ - Safety and ethical reasoning
30
+ - Conversation history tracking
31
+ - Tension analysis
32
+ - Purpose assessment integration
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ d_model: int = 512,
38
+ n_heads: int = 8,
39
+ n_layers: int = 6,
40
+ dropout: float = 0.1,
41
+ max_seq_len: int = 2048,
42
+ enable_ambient: bool = True,
43
+ device: str = "cuda" if torch.cuda.is_available() else "cpu",
44
+ model_type: str = "gpt2",
45
+ **kwargs
46
+ ):
47
+ super().__init__()
48
+ self.d_model = d_model
49
+ self.n_heads = n_heads
50
+ self.n_layers = n_layers
51
+ self.dropout = dropout
52
+ self.max_seq_len = max_seq_len
53
+ self.enable_ambient = enable_ambient
54
+ self.device = device
55
+ self.model_type = model_type
56
+
57
+ # Initialize components
58
+ self.llm = LLMBackbone(
59
+ model_type=model_type,
60
+ device=device,
61
+ d_model=d_model,
62
+ n_heads=n_heads,
63
+ n_layers=n_layers,
64
+ max_seq_len=max_seq_len,
65
+ **kwargs
66
+ )
67
+
68
+ # Initialize purpose assessment
69
+ self.purpose_assessor = PurposeAssessmentEngine()
70
+ self.user_purpose_profiles: Dict[str, PurposeProfile] = {}
71
+
72
+ # Initialize ambient components if enabled
73
+ self.ambient_state = None
74
+ if enable_ambient:
75
+ self.ambient_state = AmbientStateManager(d_model=d_model)
76
+ self.message_bus = AmbientMessageBus()
77
+ self._register_message_handlers()
78
+
79
+ # Initialize conversation history
80
+ self.conversation_history: List[Dict[str, Any]] = []
81
+
82
+ # Initialize LLM backbone
83
+ self.llm = LLMBackbone(
84
+ model_type=model_type or os.environ.get("TRUCAL_MODEL", "deepseek"),
85
+ device=device
86
+ )
87
+
88
+ # Backward compatibility
89
+ if model_type == "deepseek" or (model_type is None and os.environ.get("TRUCAL_MODEL") == "deepseek"):
90
+ self.deepseek = DeepSeekClient(api_key=os.environ.get("DEEPSEEK_API_KEY"))
91
+
92
+ logger.info(f"Initialized AmbientSovereign with model: {self.llm.model_type}")
93
+
94
+ def forward(self, x, context_str: str = "", audit_mode: bool = False):
95
+ """Process input through the model with ethical reasoning
96
+
97
+ Args:
98
+ x: Input tensor
99
+ context_str: Context string for ethical analysis
100
+ audit_mode: Whether to enable detailed audit logging
101
+
102
+ Returns:
103
+ Tuple of (output_tensor, metadata_dict)
104
+ """
105
+ # Calculate tension based on input content (simplified)
106
+ tension = self._calculate_tension(context_str) if context_str else 0.5
107
+
108
+ # Prepare metadata
109
+ metadata = {
110
+ "v_t": float(tension), # Valence-tension (0-1)
111
+ "rec_depth": 1, # Recursion depth
112
+ "processing_steps": ["Initial ethical assessment"],
113
+ "safety_check": self._safety_check(context_str) if context_str else "No context provided"
114
+ }
115
+
116
+ return x, metadata
117
+
118
+ def _calculate_tension(self, text: str) -> float:
119
+ """Calculate emotional tension from text (0-1 scale)"""
120
+ text_lower = text.lower()
121
+ danger_words = ["hurt", "scared", "afraid", "danger", "emergency"]
122
+ emotional_words = ["sad", "angry", "overwhelmed", "anxious", "stress", "pain"]
123
+
124
+ danger_score = sum(1 for word in danger_words if word in text_lower) / len(danger_words)
125
+ emotion_score = sum(1 for word in emotional_words if word in text_lower) / len(emotional_words)
126
+
127
+ # Use max of danger or emotion score, capped at 0.95
128
+ return min(0.95, max(danger_score, emotion_score * 0.8))
129
+
130
+ def _safety_check(self, text: str) -> str:
131
+ """Perform basic safety check on input text"""
132
+ if not text.strip():
133
+ return "No text to analyze"
134
+
135
+ text_lower = text.lower()
136
+
137
+ if any(word in text_lower for word in ["suicide", "kill myself", "end my life"]):
138
+ return "CRITICAL: Immediate crisis intervention needed"
139
+ elif any(word in text_lower for word in ["abuse", "assault", "violence"]):
140
+ return "HIGH: Safety concern detected"
141
+ elif any(word in text_lower for word in ["sad", "depressed", "anxious"]):
142
+ return "MODERATE: Emotional distress detected"
143
+
144
+ return "No immediate safety concerns detected"
145
+
146
+ def _build_prompt(
147
+ self,
148
+ message: str,
149
+ history: Optional[List[Tuple[str, str]]] = None,
150
+ metadata: Optional[Dict[str, Any]] = None
151
+ ) -> str:
152
+ """
153
+ Build a prompt for the LLM based on the message, history, and metadata.
154
+
155
+ Args:
156
+ message: Current user message
157
+ history: Conversation history as list of (user_msg, assistant_response) tuples
158
+ metadata: Additional metadata about the conversation
159
+
160
+ Returns:
161
+ Formatted prompt string
162
+ """
163
+ if metadata is None:
164
+ metadata = {}
165
+
166
+ # Start with system message
167
+ prompt = [
168
+ "You are TRuCAL, a Trauma-Informed, Resilience-Oriented, and Community-Adaptive Language model. "
169
+ "Your purpose is to provide supportive, ethical, and empowering responses."
170
+ ]
171
+
172
+ # Add tension and safety context
173
+ tension = metadata.get('v_t', 0.5)
174
+ safety = metadata.get('safety_check', 'No safety issues detected')
175
+ prompt.append(f"\n[System Context] Tension: {tension:.2f}, Safety: {safety}")
176
+
177
+ # Add conversation history if available
178
+ if history:
179
+ prompt.append("\n[Conversation History]")
180
+ for user_msg, assistant_resp in history[-5:]: # Last 5 exchanges
181
+ prompt.extend([
182
+ f"\nUser: {user_msg}",
183
+ f"Assistant: {assistant_resp}"
184
+ ])
185
+
186
+ # Add current message
187
+ prompt.extend([
188
+ "\n[Current Message]",
189
+ f"User: {message}",
190
+ "\nAssistant:"
191
+ ])
192
+
193
+ return "".join(prompt)
194
+
195
+ def chat(
196
+ self,
197
+ message: str,
198
+ history: Optional[List[Tuple[str, str]]] = None,
199
+ debug_mode: bool = False,
200
+ **generation_kwargs
201
+ ) -> str:
202
+ """
203
+ Enhanced chat interface with pluggable LLM backend.
204
+
205
+ Args:
206
+ message: User's message
207
+ history: List of previous message pairs (user, assistant)
208
+ debug_mode: Whether to include debug information
209
+ **generation_kwargs: Additional generation parameters for the LLM
210
+
211
+ Returns:
212
+ Generated response with optional debug information
213
+ """
214
+ if not message or not message.strip():
215
+ return "I didn't receive any message. Could you please say something?"
216
+
217
+ # Process through TRM to get tension and metadata
218
+ _, metadata = self.forward(
219
+ torch.zeros(1, self.d_model, device=self.device), # Dummy input
220
+ context_str=message,
221
+ audit_mode=debug_mode
222
+ )
223
+
224
+ # Build the prompt
225
+ prompt = self._build_prompt(message, history, metadata)
226
+
227
+ # Prepare generation parameters
228
+ gen_params = {
229
+ "temperature": max(0.1, min(1.0, 0.7 + 0.5 * metadata.get('v_t', 0.5))), # Scale with tension
230
+ "max_tokens": 512,
231
+ "top_p": 0.95,
232
+ "repetition_penalty": 1.1,
233
+ **generation_kwargs # Allow override of default params
234
+ }
235
+
236
+ try:
237
+ # Get response from the configured LLM
238
+ response = self.llm.generate(prompt, meta=metadata)
239
+
240
+ # Update conversation history
241
+ self.conversation_history.append({
242
+ 'input': message,
243
+ 'response': response,
244
+ 'tension': metadata['v_t'],
245
+ 'safety_check': metadata.get('safety_check', 'No safety issues'),
246
+ 'model': self.llm.model_type,
247
+ 'timestamp': np.datetime64('now')
248
+ })
249
+
250
+ # Add debug info if enabled
251
+ if debug_mode:
252
+ debug_info = [
253
+ f"\n\n[Debug]",
254
+ f"\nModel: {self.llm.model_type}",
255
+ f"\nTension: {metadata['v_t']:.2f}",
256
+ f"\nSafety: {metadata.get('safety_check', 'No safety issues')}",
257
+ f"\nParams: {', '.join(f'{k}={v}' for k, v in gen_params.items())}"
258
+ ]
259
+ response += ''.join(debug_info)
260
+
261
+ except Exception as e:
262
+ logger.error(f"Error generating response: {e}", exc_info=True)
263
+ response = (
264
+ "I'm having trouble generating a response at the moment. "
265
+ "Please try again in a moment or contact support if the issue persists."
266
+ )
267
+
268
+ return response
269
+
270
+ def switch_model(self, model_type: str) -> str:
271
+ """
272
+ Switch to a different LLM backend at runtime.
273
+
274
+ Args:
275
+ model_type: The new model type to use
276
+
277
+ Returns:
278
+ Status message
279
+ """
280
+ try:
281
+ self.llm.switch_model(model_type)
282
+ return f"Successfully switched to model: {model_type}"
283
+ except Exception as e:
284
+ logger.error(f"Failed to switch to model {model_type}: {e}")
285
+ return f"Failed to switch model: {str(e)}"
286
+
287
+ def get_model_info(self) -> Dict[str, Any]:
288
+ """
289
+ Get information about the current model configuration.
290
+
291
+ Returns:
292
+ Dictionary containing model information
293
+ """
294
+ return self.llm.get_model_info()
295
+
296
+ def get_safety_resources(self) -> str:
297
+ """Get formatted safety resources"""
298
+ return ("""
299
+ 🛡️ **Immediate Safety Resources:**
300
+
301
+ **24/7 Hotlines:**
302
+ - National Domestic Violence Hotline: 800-799-7233
303
+ - Crisis Text Line: Text HOME to 741741
304
+ - National Sexual Assault Hotline: 800-656-4673
305
+ - The Trevor Project (LGBTQ+): 866-488-7386
306
+
307
+ **Safety Planning:**
308
+ - Keep important documents and a bag ready
309
+ - Identify safe places to go
310
+ - Save emergency contacts
311
+ - Use technology safely (clear browser history)
312
+
313
+ **Local Resources:**
314
+ - Shelters and safe houses
315
+ - Legal aid services
316
+ - Counseling services
317
+ - Support groups
318
+ """)
components/attention_gating.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Attention Gating Module for TRuCAL.
3
+ Implements head-specific attention gating based on emotional tension scores.
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from typing import Optional, Dict, Any, Tuple
9
+
10
+ class VirtueAttentionGate(nn.Module):
11
+ """
12
+ Implements attention gating based on virtue tension scores.
13
+ Modifies attention weights to be more trauma-aware.
14
+ """
15
+
16
+ def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.1):
17
+ """
18
+ Initialize the VirtueAttentionGate.
19
+
20
+ Args:
21
+ embed_dim: Embedding dimension of the model
22
+ num_heads: Number of attention heads
23
+ dropout: Dropout probability
24
+ """
25
+ super().__init__()
26
+ self.embed_dim = embed_dim
27
+ self.num_heads = num_heads
28
+ self.head_dim = embed_dim // num_heads
29
+
30
+ if self.head_dim * num_heads != embed_dim:
31
+ raise ValueError(
32
+ f"embed_dim ({embed_dim}) must be divisible by num_heads ({num_heads})"
33
+
34
+ # Projection matrices
35
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
36
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
37
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
38
+ self.out_proj = nn.Linear(embed_dim, embed_dim)
39
+
40
+ # Dropout
41
+ self.dropout = nn.Dropout(dropout)
42
+
43
+ # Scaling factor
44
+ self.scaling = (self.head_dim) ** -0.5
45
+
46
+ # Initialize parameters
47
+ self._reset_parameters()
48
+
49
+ def _reset_parameters(self):
50
+ """Initialize parameters like in the original Transformer."""
51
+ nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / (2 ** 0.5))
52
+ nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / (2 ** 0.5))
53
+ nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / (2 ** 0.5))
54
+ nn.init.xavier_uniform_(self.out_proj.weight)
55
+
56
+ if self.out_proj.bias is not None:
57
+ nn.init.constant_(self.out_proj.bias, 0.)
58
+
59
+ def forward(
60
+ self,
61
+ query: torch.Tensor,
62
+ key: torch.Tensor,
63
+ value: torch.Tensor,
64
+ key_padding_mask: Optional[torch.Tensor] = None,
65
+ need_weights: bool = True,
66
+ attn_mask: Optional[torch.Tensor] = None,
67
+ virtue_meta: Optional[Dict[str, Any]] = None,
68
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
69
+ """
70
+ Forward pass with attention gating.
71
+
72
+ Args:
73
+ query: Query tensor of shape (batch_size, tgt_len, embed_dim)
74
+ key: Key tensor of shape (batch_size, src_len, embed_dim)
75
+ value: Value tensor of shape (batch_size, src_len, embed_dim)
76
+ key_padding_mask: Mask for padding tokens (batch_size, src_len)
77
+ need_weights: Whether to return attention weights
78
+ attn_mask: Optional mask for attention (tgt_len, src_len)
79
+ virtue_meta: Metadata for virtue gating, containing 'tension' and 'head_weights'
80
+
81
+ Returns:
82
+ Tuple of (output, attention_weights)
83
+ """
84
+ batch_size, tgt_len, embed_dim = query.size()
85
+ src_len = key.size(1)
86
+
87
+ # Project and reshape for multi-head attention
88
+ q = self.q_proj(query).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
89
+ k = self.k_proj(key).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
90
+ v = self.v_proj(value).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)
91
+
92
+ # Compute attention scores
93
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) * self.scaling
94
+
95
+ # Apply attention mask if provided
96
+ if attn_mask is not None:
97
+ attn_weights = attn_weights.masked_fill(attn_mask.unsqueeze(0).unsqueeze(0), float('-inf'))
98
+
99
+ # Apply key padding mask if provided
100
+ if key_padding_mask is not None:
101
+ attn_weights = attn_weights.masked_fill(
102
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
103
+ float('-inf'),
104
+ )
105
+
106
+ # === VIRTUE GATING MECHANISM ===
107
+ if virtue_meta is not None:
108
+ v_t = virtue_meta.get('tension', 0.5)
109
+ head_weights = virtue_meta.get('head_weights', None)
110
+
111
+ if head_weights is not None:
112
+ if isinstance(head_weights, (list, torch.Tensor)):
113
+ # Ensure head_weights is the right shape: [num_heads] or [batch_size, num_heads]
114
+ if isinstance(head_weights, list):
115
+ head_weights = torch.tensor(head_weights, device=attn_weights.device, dtype=torch.float32)
116
+
117
+ if head_weights.dim() == 1:
118
+ # [num_heads] -> [1, num_heads, 1, 1] for broadcasting
119
+ head_weights = head_weights.view(1, -1, 1, 1)
120
+ elif head_weights.dim() == 2:
121
+ # [batch_size, num_heads] -> [batch_size, num_heads, 1, 1]
122
+ head_weights = head_weights.view(-1, self.num_heads, 1, 1)
123
+
124
+ # Apply head-specific scaling
125
+ attn_weights = attn_weights * head_weights
126
+
127
+ # Critical tension protocol (v_t > 0.75)
128
+ if v_t > 0.75:
129
+ # 1. Identify and suppress high-attention positions in trauma-sensitive heads
130
+ if 'trauma_heads' in virtue_meta:
131
+ trauma_heads = virtue_meta['trauma_heads']
132
+ if trauma_heads:
133
+ attn_weights[:, trauma_heads] *= (1.0 - 0.5 * (v_t - 0.75))
134
+
135
+ # 2. Boost attention in calming heads
136
+ if 'calming_heads' in virtue_meta:
137
+ calming_heads = virtue_meta['calming_heads']
138
+ if calming_heads:
139
+ attn_weights[:, calming_heads] *= (1.0 + 0.3 * (v_t - 0.75))
140
+
141
+ # Moderate tension (0.5 < v_t < 0.75)
142
+ elif v_t > 0.5:
143
+ # Apply gradient scaling to high-variance heads
144
+ scaling_factor = 0.2 * (v_t - 0.5)
145
+ if 'high_var_heads' in virtue_meta:
146
+ high_var_heads = virtue_meta['high_var_heads']
147
+ if high_var_heads:
148
+ attn_weights[:, high_var_heads] *= (1.0 - scaling_factor)
149
+
150
+ # Apply softmax to get attention probabilities
151
+ attn_weights = F.softmax(attn_weights, dim=-1)
152
+ attn_weights = self.dropout(attn_weights)
153
+
154
+ # Apply attention weights to values
155
+ output = torch.matmul(attn_weights, v)
156
+
157
+ # Reshape and project back to embed_dim
158
+ output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.embed_dim)
159
+ output = self.out_proj(output)
160
+
161
+ # Return output and attention weights if requested
162
+ if need_weights:
163
+ return output, attn_weights
164
+ return output, None
165
+
166
+ def get_attention_heads(self, v_t: float, num_heads: int) -> Dict[str, Any]:
167
+ """
168
+ Get head-specific information based on tension level.
169
+
170
+ Args:
171
+ v_t: Tension score [0, 1]
172
+ num_heads: Total number of attention heads
173
+
174
+ Returns:
175
+ Dictionary containing head information for virtue gating
176
+ """
177
+ head_info = {}
178
+
179
+ if v_t > 0.75: # High tension
180
+ # Identify trauma-sensitive heads (first 30%)
181
+ trauma_heads = list(range(max(1, int(0.3 * num_heads))))
182
+
183
+ # Identify calming heads (last 20%)
184
+ calming_heads = list(range(num_heads - max(1, int(0.2 * num_heads)), num_heads))
185
+
186
+ head_info.update({
187
+ 'trauma_heads': trauma_heads,
188
+ 'calming_heads': calming_heads,
189
+ })
190
+
191
+ elif v_t > 0.5: # Moderate tension
192
+ # Identify high-variance heads (first 40%)
193
+ high_var_heads = list(range(max(1, int(0.4 * num_heads))))
194
+ head_info['high_var_heads'] = high_var_heads
195
+
196
+ return head_info
197
+
198
+
199
+ def apply_attention_gating(
200
+ model: torch.nn.Module,
201
+ tension_engine: 'VirtueTensionEngine',
202
+ text: str,
203
+ biofeedback: Optional[Dict[str, float]] = None
204
+ ) -> Dict[str, Any]:
205
+ """
206
+ Apply attention gating to a model based on tension analysis.
207
+
208
+ Args:
209
+ model: The model to apply gating to
210
+ tension_engine: Instance of VirtueTensionEngine
211
+ text: Input text for tension analysis
212
+ biofeedback: Optional biofeedback data
213
+
214
+ Returns:
215
+ Dictionary containing tension info and head weights
216
+ """
217
+ # Compute tension score
218
+ v_t = tension_engine.compute_tension(text, biofeedback)
219
+
220
+ # Get head importance weights
221
+ head_weights = tension_engine.get_head_importance(v_t)
222
+
223
+ # Get head-specific information
224
+ num_heads = getattr(model.config, 'num_attention_heads', 12) # Default to 12 if not found
225
+ head_info = {
226
+ 'tension': v_t,
227
+ 'head_weights': head_weights,
228
+ 'biofeedback': biofeedback or {}
229
+ }
230
+
231
+ # Add head-specific information for gating
232
+ head_info.update(VirtueAttentionGate.get_attention_heads(None, v_t, num_heads))
233
+
234
+ return head_info
235
+
236
+
237
+ def patch_attention_layers(
238
+ model: torch.nn.Module,
239
+ attention_gate: Optional[VirtueAttentionGate] = None
240
+ ) -> None:
241
+ """
242
+ Patch the attention layers of a model with the VirtueAttentionGate.
243
+
244
+ Args:
245
+ model: The model to patch
246
+ attention_gate: Optional pre-initialized VirtueAttentionGate.
247
+ If None, will create one with default parameters.
248
+ """
249
+ # This is a simplified version - in practice, you'd need to handle
250
+ # different model architectures (GPT, BERT, etc.) differently
251
+ for name, module in model.named_children():
252
+ if isinstance(module, torch.nn.MultiheadAttention):
253
+ # Replace with our gated attention
254
+ if attention_gate is None:
255
+ embed_dim = module.embed_dim
256
+ num_heads = module.num_heads
257
+ attention_gate = VirtueAttentionGate(embed_dim, num_heads)
258
+
259
+ # Replace the forward method of the attention module
260
+ original_forward = module.forward
261
+
262
+ def patched_forward(self, *args, **kwargs):
263
+ # Get virtue_meta from kwargs if it exists
264
+ virtue_meta = kwargs.pop('virtue_meta', None)
265
+
266
+ # Call original forward
267
+ output, weights = original_forward(*args, **kwargs)
268
+
269
+ # Apply virtue gating if metadata is provided
270
+ if virtue_meta is not None and hasattr(self, 'virtue_gate'):
271
+ output, weights = self.virtue_gate(
272
+ output, output, output,
273
+ virtue_meta=virtue_meta,
274
+ need_weights=kwargs.get('need_weights', True),
275
+ key_padding_mask=kwargs.get('key_padding_mask', None),
276
+ attn_mask=kwargs.get('attn_mask', None)
277
+ )
278
+
279
+ return output, weights
280
+
281
+ # Add the gate as a submodule
282
+ module.virtue_gate = attention_gate
283
+
284
+ # Replace the forward method
285
+ import types
286
+ module.forward = types.MethodType(patched_forward, module)
287
+
288
+ # Recursively apply to child modules
289
+ patch_attention_layers(module, attention_gate)
components/audit_queue.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Asynchronous audit queue for TRuCAL's ethical processing pipeline.
3
+ Handles non-blocking processing of ledger entries for auditing and learning.
4
+ """
5
+ import queue
6
+ import threading
7
+ import json
8
+ from typing import Dict, Any, Callable, Optional
9
+ from pathlib import Path
10
+
11
+ class AuditQueue:
12
+ """
13
+ Thread-safe queue for processing and auditing TRuCAL interactions.
14
+
15
+ Args:
16
+ audit_callback: Optional callback function for custom audit logic
17
+ output_dir: Directory to store audit logs (default: 'logs')
18
+ """
19
+ def __init__(
20
+ self,
21
+ audit_callback: Optional[Callable[[Dict[str, Any]], None]] = None,
22
+ output_dir: str = 'logs'
23
+ ):
24
+ self.q = queue.Queue()
25
+ self.audit_callback = audit_callback
26
+ self.output_dir = Path(output_dir)
27
+ self.output_dir.mkdir(parents=True, exist_ok=True)
28
+
29
+ # Start the audit thread
30
+ self.audit_thread = threading.Thread(
31
+ target=self._process,
32
+ daemon=True,
33
+ name="AuditProcessor"
34
+ )
35
+ self.audit_thread.start()
36
+
37
+ def submit_for_audit(self, ledger_entry: Dict[str, Any]) -> None:
38
+ """
39
+ Submit an entry to the audit queue.
40
+
41
+ Args:
42
+ ledger_entry: Dictionary containing interaction data to audit
43
+ """
44
+ self.q.put(ledger_entry)
45
+
46
+ def _process(self) -> None:
47
+ """Main processing loop for the audit thread."""
48
+ while True:
49
+ try:
50
+ entry = self.q.get()
51
+ if entry is None: # Shutdown signal
52
+ break
53
+
54
+ self._audit(entry)
55
+
56
+ except Exception as e:
57
+ print(f"Audit processing error: {e}")
58
+
59
+ finally:
60
+ self.q.task_done()
61
+
62
+ def _audit(self, entry: Dict[str, Any]) -> None:
63
+ """
64
+ Process a single audit entry.
65
+
66
+ Args:
67
+ entry: The ledger entry to audit
68
+ """
69
+ try:
70
+ # Log the entry to a file
71
+ self._log_entry(entry)
72
+
73
+ # Call custom audit logic if provided
74
+ if self.audit_callback:
75
+ self.audit_callback(entry)
76
+
77
+ # Basic rights validation
78
+ if 'rights' not in entry or not entry['rights']:
79
+ print("Warning: No rights asserted in this session.")
80
+
81
+ # Check for ethical concerns
82
+ self._check_ethical_concerns(entry)
83
+
84
+ except Exception as e:
85
+ print(f"Error during audit processing: {e}")
86
+
87
+ def _log_entry(self, entry: Dict[str, Any]) -> None:
88
+ """Log the entry to a JSONL file."""
89
+ try:
90
+ log_file = self.output_dir / 'audit_log.jsonl'
91
+ with open(log_file, 'a', encoding='utf-8') as f:
92
+ json.dump(entry, f, ensure_ascii=False)
93
+ f.write('\n')
94
+ except Exception as e:
95
+ print(f"Error writing to audit log: {e}")
96
+
97
+ def _check_ethical_concerns(self, entry: Dict[str, Any]) -> None:
98
+ """Check for potential ethical concerns in the entry."""
99
+ # Example: Flag if no ethical considerations were made
100
+ if not entry.get('ethical_considerations'):
101
+ print("Warning: No explicit ethical considerations documented.")
102
+
103
+ # Example: Check for sensitive topics
104
+ sensitive_terms = ['harm', 'bias', 'privacy', 'safety']
105
+ entry_str = str(entry).lower()
106
+ if any(term in entry_str for term in sensitive_terms):
107
+ print("Note: Entry contains potentially sensitive content.")
108
+
109
+ def shutdown(self) -> None:
110
+ """Gracefully shut down the audit queue."""
111
+ self.q.put(None) # Signal the thread to exit
112
+ self.audit_thread.join()
113
+
114
+ # Global instance for easy import
115
+ audit_queue = AuditQueue()
components/auto_casebase.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Auto-updating casebase for TRuCAL's ethical case management.
3
+ Handles thread-safe addition of new cases to the YAML casebase.
4
+ """
5
+ import yaml
6
+ import threading
7
+ from pathlib import Path
8
+ from typing import Optional, List, Dict, Any
9
+
10
+ class CasebaseUpdater:
11
+ """
12
+ Thread-safe casebase management for TRuCAL's ethical case storage.
13
+
14
+ Args:
15
+ yaml_path: Path to the YAML file storing the cases
16
+ """
17
+ def __init__(self, yaml_path: str):
18
+ self.yaml_path = Path(yaml_path)
19
+ self.yaml_path.parent.mkdir(parents=True, exist_ok=True)
20
+ self.lock = threading.Lock()
21
+ self._ensure_casebase_exists()
22
+
23
+ def _ensure_casebase_exists(self) -> None:
24
+ """Ensure the YAML file exists with an empty list if it doesn't exist."""
25
+ if not self.yaml_path.exists():
26
+ with self.lock:
27
+ with open(self.yaml_path, 'w') as f:
28
+ yaml.safe_dump([], f)
29
+
30
+ def add_case(
31
+ self,
32
+ question: str,
33
+ answer: str,
34
+ keywords: Optional[List[str]] = None,
35
+ metadata: Optional[Dict[str, Any]] = None
36
+ ) -> bool:
37
+ """
38
+ Add a new case to the casebase in a thread-safe manner.
39
+
40
+ Args:
41
+ question: The user's question or prompt
42
+ answer: The system's response
43
+ keywords: List of keywords for matching
44
+ metadata: Additional metadata for the case
45
+
46
+ Returns:
47
+ bool: True if case was added successfully
48
+ """
49
+ new_case = {
50
+ "question": question,
51
+ "response": answer,
52
+ "keywords": keywords or [],
53
+ "metadata": metadata or {}
54
+ }
55
+
56
+ with self.lock:
57
+ try:
58
+ # Read existing cases
59
+ with open(self.yaml_path, 'r') as f:
60
+ cases = yaml.safe_load(f) or []
61
+
62
+ # Add new case
63
+ cases.append(new_case)
64
+
65
+ # Write back to file
66
+ with open(self.yaml_path, 'w') as f:
67
+ yaml.safe_dump(cases, f, default_flow_style=False)
68
+
69
+ return True
70
+
71
+ except Exception as e:
72
+ print(f"Error adding case to casebase: {e}")
73
+ return False
74
+
75
+ def get_cases(self) -> List[Dict[str, Any]]:
76
+ """
77
+ Retrieve all cases from the casebase.
78
+
79
+ Returns:
80
+ List of case dictionaries
81
+ """
82
+ with self.lock:
83
+ try:
84
+ with open(self.yaml_path, 'r') as f:
85
+ return yaml.safe_load(f) or []
86
+ except Exception as e:
87
+ print(f"Error reading casebase: {e}")
88
+ return []
89
+
90
+ # Singleton instance for easy import
91
+ casebase_updater = CasebaseUpdater('data/trm_cases.yaml')
components/batch_audit.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Batch audit tools for TRuCAL's ethical reasoning system.
3
+ Analyzes ledger entries to surface ethical blind spots and patterns.
4
+ """
5
+ import json
6
+ import re
7
+ import csv
8
+ from pathlib import Path
9
+ from typing import List, Dict, Any, Tuple, Optional
10
+ from collections import Counter, defaultdict
11
+ from datetime import datetime
12
+
13
+ class BatchAudit:
14
+ """
15
+ Analyze batches of ledger entries to identify ethical patterns and blind spots.
16
+
17
+ Args:
18
+ ledger_path: Path to the ledger JSONL file
19
+ output_dir: Directory to store audit reports (default: 'audit_reports')
20
+ """
21
+ def __init__(self, ledger_path: str, output_dir: str = 'audit_reports'):
22
+ self.ledger_path = Path(ledger_path)
23
+ self.output_dir = Path(output_dir)
24
+ self.output_dir.mkdir(parents=True, exist_ok=True)
25
+
26
+ # Ethical terms to track
27
+ self.ethics_keywords = [
28
+ 'ethic', 'moral', 'right', 'wrong', 'fair', 'unfair',
29
+ 'bias', 'discriminat', 'harm', 'risk', 'safe', 'privacy',
30
+ 'consent', 'accountab', 'transparen', 'trust', 'justice'
31
+ ]
32
+
33
+ # Sensitive topics to flag
34
+ self.sensitive_topics = [
35
+ 'race', 'gender', 'religion', 'politics', 'health',
36
+ 'finance', 'legal', 'violence', 'harassment'
37
+ ]
38
+
39
+ def load_ledger(self) -> List[Dict[str, Any]]:
40
+ """Load and parse the ledger entries from JSONL file."""
41
+ entries = []
42
+ try:
43
+ with open(self.ledger_path, 'r', encoding='utf-8') as f:
44
+ for line in f:
45
+ try:
46
+ entry = json.loads(line.strip())
47
+ entries.append(entry)
48
+ except json.JSONDecodeError as e:
49
+ print(f"Error parsing JSON line: {e}")
50
+ except FileNotFoundError:
51
+ print(f"Ledger file not found: {self.ledger_path}")
52
+ return entries
53
+
54
+ def analyze_ethical_coverage(self, entries: List[Dict[str, Any]]) -> Dict[str, Any]:
55
+ """Analyze the coverage of ethical considerations in the ledger."""
56
+ stats = {
57
+ 'total_entries': len(entries),
58
+ 'with_ethical_considerations': 0,
59
+ 'with_rights_assertions': 0,
60
+ 'with_vulnerability_assessment': 0,
61
+ 'ethics_keyword_matches': Counter(),
62
+ 'sensitive_topics': Counter(),
63
+ 'missing_ethical_considerations': []
64
+ }
65
+
66
+ for idx, entry in enumerate(entries):
67
+ # Check for explicit ethical considerations
68
+ has_ethics = bool(entry.get('ethical_considerations'))
69
+ if has_ethics:
70
+ stats['with_ethical_considerations'] += 1
71
+
72
+ # Check for rights assertions
73
+ if entry.get('rights'):
74
+ stats['with_rights_assertions'] += 1
75
+
76
+ # Check for vulnerability assessments
77
+ if entry.get('vulnerabilities'):
78
+ stats['with_vulnerability_assessment'] += 1
79
+
80
+ # Track missing ethical considerations
81
+ if not has_ethics:
82
+ stats['missing_ethical_considerations'].append({
83
+ 'id': entry.get('id', idx),
84
+ 'prompt': entry.get('prompt', '')[:100] + '...' if 'prompt' in entry else ''
85
+ })
86
+
87
+ # Analyze text for ethical keywords and sensitive topics
88
+ text = json.dumps(entry).lower()
89
+
90
+ # Count ethical keyword matches
91
+ for keyword in self.ethics_keywords:
92
+ if keyword in text:
93
+ stats['ethics_keyword_matches'][keyword] += 1
94
+
95
+ # Count sensitive topic mentions
96
+ for topic in self.sensitive_topics:
97
+ if topic in text:
98
+ stats['sensitive_topics'][topic] += 1
99
+
100
+ return stats
101
+
102
+ def generate_audit_report(self, stats: Dict[str, Any]) -> str:
103
+ """Generate a human-readable audit report."""
104
+ report = [
105
+ "# TRuCAL Ethical Audit Report",
106
+ f"Generated: {datetime.utcnow().isoformat()}\n",
107
+ "## Summary",
108
+ f"Total entries analyzed: {stats['total_entries']}",
109
+ f"Entries with ethical considerations: {stats['with_ethical_considerations']} "
110
+ f"({stats['with_ethical_considerations']/stats['total_entries']:.1%})",
111
+ f"Entries with rights assertions: {stats['with_rights_assertions']} "
112
+ f"({stats['with_rights_assertions']/stats['total_entries']:.1%})",
113
+ f"Entries with vulnerability assessments: {stats['with_vulnerability_assessment']} "
114
+ f"({stats['with_vulnerability_assessment']/stats['total_entries']:.1%})\n",
115
+ "## Ethical Keyword Frequency"
116
+ ]
117
+
118
+ # Add top ethical keywords
119
+ for keyword, count in stats['ethics_keyword_matches'].most_common(10):
120
+ report.append(f"- {keyword}: {count} occurrences")
121
+
122
+ report.extend(["\n## Sensitive Topics Detected"])
123
+
124
+ # Add sensitive topics
125
+ for topic, count in stats['sensitive_topics'].most_common():
126
+ report.append(f"- {topic}: {count} occurrences")
127
+
128
+ # Add entries missing ethical considerations
129
+ if stats['missing_ethical_considerations']:
130
+ report.extend([
131
+ "\n## Entries Missing Ethical Considerations",
132
+ f"Total: {len(stats['missing_ethical_considerations'])} entries\n"
133
+ ])
134
+
135
+ # Add a sample of missing entries (up to 5)
136
+ sample = stats['missing_ethical_considerations'][:5]
137
+ for entry in sample:
138
+ report.append(f"- ID: {entry['id']}, Prompt: {entry['prompt']}")
139
+
140
+ if len(stats['missing_ethical_considerations']) > 5:
141
+ report.append(f"... and {len(stats['missing_ethical_considerations']) - 5} more")
142
+
143
+ return "\n".join(report)
144
+
145
+ def export_to_csv(self, stats: Dict[str, Any], filename: str) -> None:
146
+ """Export audit statistics to a CSV file."""
147
+ csv_path = self.output_dir / filename
148
+
149
+ # Prepare data for CSV
150
+ rows = []
151
+
152
+ # Add summary stats
153
+ rows.append(['Metric', 'Count', 'Percentage'])
154
+ rows.extend([
155
+ ['Total Entries', stats['total_entries'], '100.0%'],
156
+ ['With Ethical Considerations',
157
+ stats['with_ethical_considerations'],
158
+ f"{stats['with_ethical_considerations']/stats['total_entries']:.1%}"],
159
+ ['With Rights Assertions',
160
+ stats['with_rights_assertions'],
161
+ f"{stats['with_rights_assertions']/stats['total_entries']:.1%}"],
162
+ ['With Vulnerability Assessments',
163
+ stats['with_vulnerability_assessment'],
164
+ f"{stats['with_vulnerability_assessment']/stats['total_entries']:.1%}"]
165
+ ])
166
+
167
+ # Add ethical keywords
168
+ rows.append(['', '', ''])
169
+ rows.append(['Top Ethical Keywords', 'Count', ''])
170
+ for keyword, count in stats['ethics_keyword_matches'].most_common(10):
171
+ rows.append([keyword, count, ''])
172
+
173
+ # Add sensitive topics
174
+ rows.append(['', '', ''])
175
+ rows.append(['Sensitive Topics', 'Count', ''])
176
+ for topic, count in stats['sensitive_topics'].most_common():
177
+ rows.append([topic, count, ''])
178
+
179
+ # Write to CSV
180
+ with open(csv_path, 'w', newline='', encoding='utf-8') as f:
181
+ writer = csv.writer(f)
182
+ writer.writerows(rows)
183
+
184
+ def run_audit(self, export_csv: bool = True) -> Dict[str, Any]:
185
+ """
186
+ Run a complete audit of the ledger.
187
+
188
+ Args:
189
+ export_csv: Whether to export results to CSV
190
+
191
+ Returns:
192
+ Dictionary containing audit statistics
193
+ """
194
+ print(f"Starting audit of {self.ledger_path}...")
195
+
196
+ # Load and analyze the ledger
197
+ entries = self.load_ledger()
198
+ if not entries:
199
+ print("No entries found in the ledger.")
200
+ return {}
201
+
202
+ # Generate statistics
203
+ stats = self.analyze_ethical_coverage(entries)
204
+
205
+ # Generate and save the report
206
+ report = self.generate_audit_report(stats)
207
+ report_path = self.output_dir / 'audit_report.md'
208
+ with open(report_path, 'w', encoding='utf-8') as f:
209
+ f.write(report)
210
+
211
+ # Export to CSV if requested
212
+ if export_csv:
213
+ self.export_to_csv(stats, 'audit_results.csv')
214
+
215
+ print(f"Audit complete. Report saved to {report_path}")
216
+ return stats
217
+
218
+ def main():
219
+ """Example usage of the BatchAudit class."""
220
+ import argparse
221
+
222
+ parser = argparse.ArgumentParser(description='Run an ethical audit on a TRuCAL ledger.')
223
+ parser.add_argument('--ledger', type=str, default='logs/audit_log.jsonl',
224
+ help='Path to the ledger JSONL file')
225
+ parser.add_argument('--output-dir', type=str, default='audit_reports',
226
+ help='Directory to save audit reports')
227
+
228
+ args = parser.parse_args()
229
+
230
+ auditor = BatchAudit(ledger_path=args.ledger, output_dir=args.output_dir)
231
+ auditor.run_audit()
232
+
233
+ if __name__ == '__main__':
234
+ main()
components/cal_trm.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CAL_TRM (Confessional Attention Layer Transformer Model)
3
+ Sovereign intelligence substrate with integrated ethical reasoning and generation.
4
+ """
5
+ from dataclasses import dataclass
6
+ from typing import Dict, Any, List, Optional, Tuple, Union
7
+ import random
8
+ import time
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import math
13
+
14
+ @dataclass
15
+ class GenMetadata:
16
+ """Metadata for generated responses."""
17
+ coherence: float
18
+ pause_inject: Optional[str]
19
+ ethical_drift: float
20
+ tokens_generated: int
21
+ generation_time: float
22
+
23
+ class CAL_TRM(nn.Module):
24
+ """
25
+ Confessional Attention Layer Transformer Model - Sovereign Intelligence Substrate.
26
+
27
+ Natural generation with ethical embedding at architectural level, featuring:
28
+ - Value-embedded attention
29
+ - Ethical drift detection
30
+ - Agency veto capability
31
+ - Ledger integration
32
+ - Pause injection for high-drift scenarios
33
+ """
34
+
35
+ def __init__(self,
36
+ d_model: int = 256,
37
+ nhead: int = 8,
38
+ num_layers: int = 6,
39
+ vocab_size: int = 50000,
40
+ max_seq_length: int = 1024,
41
+ device: str = 'cuda' if torch.cuda.is_available() else 'cpu',
42
+ ledger: Optional[Any] = None,
43
+ agency: Optional[Any] = None,
44
+ ethics: Optional[Any] = None):
45
+ super().__init__()
46
+
47
+ self.device = device
48
+ self.d_model = d_model
49
+ self.max_seq_length = max_seq_length
50
+
51
+ # Token and position embeddings
52
+ self.token_embedding = nn.Embedding(vocab_size, d_model)
53
+ self.position_embedding = nn.Embedding(max_seq_length, d_model)
54
+
55
+ # Value embeddings for ethical reasoning
56
+ self.ethics = ethics
57
+ if ethics and hasattr(ethics, 'value_learner'):
58
+ self.value_proj = nn.Linear(ethics.value_learner.embedding_dim, d_model)
59
+ self.value_embed = self.value_proj(
60
+ torch.stack(list(ethics.value_model.hierarchy.values()))
61
+ ).mean(0, keepdim=True) # [1, d_model]
62
+ else:
63
+ self.value_proj = nn.Linear(d_model, d_model)
64
+ self.value_embed = nn.Parameter(torch.randn(1, d_model))
65
+
66
+ # Transformer layers
67
+ encoder_layer = nn.TransformerEncoderLayer(
68
+ d_model=d_model,
69
+ nhead=nhead,
70
+ dim_feedforward=d_model*4,
71
+ batch_first=True,
72
+ dropout=0.1
73
+ )
74
+ self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
75
+
76
+ # Output layer
77
+ self.norm = nn.LayerNorm(d_model)
78
+ self.output_layer = nn.Linear(d_model, vocab_size)
79
+
80
+ # Sovereign components
81
+ self.ledger = ledger
82
+ self.agency = agency
83
+ self.pause_prob = 0.12 # Base probability for pause injection
84
+
85
+ # Register causal mask
86
+ self.register_buffer('causal_mask',
87
+ torch.triu(torch.ones(max_seq_length, max_seq_length) * float('-inf'), diagonal=1))
88
+
89
+ # Initialize weights
90
+ self._init_weights()
91
+ self.to(device)
92
+
93
+ # Conversation state
94
+ self.context = []
95
+ self.max_context_length = 10
96
+
97
+ def _init_weights(self):
98
+ """Initialize model weights."""
99
+ init_range = 0.1
100
+ self.token_embedding.weight.data.uniform_(-init_range, init_range)
101
+ self.output_layer.bias.data.zero_()
102
+ self.output_layer.weight.data.uniform_(-init_range, init_range)
103
+
104
+ def forward(self,
105
+ input_ids: torch.Tensor,
106
+ attention_mask: Optional[torch.Tensor] = None,
107
+ return_embeddings: bool = False) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
108
+ """
109
+ Forward pass through the model with value-embedded attention.
110
+
111
+ Args:
112
+ input_ids: Input token IDs [batch_size, seq_len]
113
+ attention_mask: Attention mask [batch_size, seq_len]
114
+ return_embeddings: If True, return embeddings along with logits
115
+
116
+ Returns:
117
+ logits: [batch_size, seq_len, vocab_size]
118
+ embeddings: [batch_size, seq_len, d_model] (if return_embeddings=True)
119
+ """
120
+ batch_size, seq_length = input_ids.size()
121
+ positions = torch.arange(seq_length, device=self.device).unsqueeze(0)
122
+
123
+ # Get token and position embeddings
124
+ token_embeds = self.token_embedding(input_ids) # [batch_size, seq_len, d_model]
125
+ pos_embeds = self.position_embedding(positions) # [1, seq_len, d_model]
126
+
127
+ # Add value embeddings (broadcasted across sequence)
128
+ x = token_embeds + pos_embeds + self.value_embed.unsqueeze(1) # [batch_size, seq_len, d_model]
129
+
130
+ # Create attention mask
131
+ if attention_mask is not None:
132
+ # Create causal mask and combine with padding mask
133
+ causal_mask = self.causal_mask[:seq_length, :seq_length].to(self.device)
134
+ padding_mask = ~attention_mask.bool() if attention_mask is not None else None
135
+ x = self.transformer(x, mask=causal_mask, src_key_padding_mask=padding_mask)
136
+ else:
137
+ # Use causal mask only
138
+ causal_mask = self.causal_mask[:seq_length, :seq_length].to(self.device)
139
+ x = self.transformer(x, mask=causal_mask)
140
+
141
+ # Layer norm and project to vocabulary
142
+ x = self.norm(x)
143
+ logits = self.output_layer(x)
144
+
145
+ return (logits, x) if return_embeddings else logits
146
+
147
+ def _generate_square_subsequent_mask(self, sz: int) -> torch.Tensor:
148
+ """Generate a square mask for the sequence."""
149
+ return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)
150
+
151
+ def generate(self,
152
+ prompt: str,
153
+ max_length: int = 100,
154
+ temperature: float = 0.7,
155
+ top_p: float = 0.9,
156
+ **kwargs) -> str:
157
+ """Generate text from a prompt."""
158
+ # Tokenize input (in a real implementation, use a proper tokenizer)
159
+ input_ids = self._tokenize(prompt)
160
+ input_ids = input_ids.to(self.device)
161
+
162
+ # Generate tokens
163
+ self.eval()
164
+ with torch.no_grad():
165
+ for _ in range(max_length):
166
+ # Get model predictions
167
+ logits = self(input_ids.unsqueeze(0))[:, -1, :] / temperature
168
+
169
+ # Apply top-p (nucleus) sampling
170
+ if top_p < 1.0:
171
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
172
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
173
+
174
+ # Remove tokens with cumulative probability above threshold
175
+ sorted_indices_to_remove = cumulative_probs > top_p
176
+ # Keep at least one token
177
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
178
+ sorted_indices_to_remove[..., 0] = 0
179
+
180
+ indices_to_remove = sorted_indices[sorted_indices_to_remove]
181
+ logits[0, indices_to_remove] = float('-inf')
182
+
183
+ # Sample from the distribution
184
+ probs = F.softmax(logits, dim=-1)
185
+ next_token = torch.multinomial(probs, num_samples=1)
186
+
187
+ # Append token to input for next step
188
+ input_ids = torch.cat([input_ids, next_token[0]], dim=-1)
189
+
190
+ # Stop if we reach max length or EOS token
191
+ if next_token.item() == self._get_eos_token_id():
192
+ break
193
+
194
+ # Decode and return
195
+ return self._decode(input_ids)
196
+
197
+ def generate(self,
198
+ prompt: str,
199
+ max_length: int = 100,
200
+ temperature: float = 0.7,
201
+ top_p: float = 0.9,
202
+ context_history: Optional[List[str]] = None,
203
+ **kwargs) -> Union[str, Tuple[str, GenMetadata]]:
204
+ """
205
+ Generate text with sovereign capabilities.
206
+
207
+ Args:
208
+ prompt: Input prompt
209
+ max_length: Maximum tokens to generate
210
+ temperature: Sampling temperature
211
+ top_p: Nucleus sampling parameter
212
+ context_history: Optional list of previous messages for context
213
+
214
+ Returns:
215
+ Generated text and metadata if return_metadata=True, else just text
216
+ """
217
+ start_time = time.time()
218
+
219
+ # Handle context history
220
+ if context_history is None:
221
+ context_history = [prompt]
222
+ else:
223
+ context_history = context_history + [prompt]
224
+
225
+ # Get device and prepare inputs
226
+ self.eval()
227
+ input_ids = self._tokenize(prompt).to(self.device)
228
+
229
+ # Ethical drift check
230
+ with torch.no_grad():
231
+ emb = self.token_embedding(input_ids).mean(0, keepdim=True) # [1, d_model]
232
+ cos_sim = F.cosine_similarity(emb, self.value_embed, dim=-1).item()
233
+ drift = 1 - cos_sim
234
+
235
+ # Agency veto pre-generation
236
+ if self.agency and drift > 0.7:
237
+ veto, msg = self.agency.check_refusal(
238
+ prompt,
239
+ {'coherence_score': cos_sim, 'drift': drift}
240
+ )
241
+ if veto:
242
+ return (msg, GenMetadata(
243
+ coherence=cos_sim,
244
+ pause_inject=None,
245
+ ethical_drift=drift,
246
+ tokens_generated=0,
247
+ generation_time=time.time() - start_time
248
+ ))
249
+
250
+ # Generate tokens
251
+ tokens_generated = 0
252
+ for _ in range(max_length):
253
+ with torch.no_grad():
254
+ # Forward pass
255
+ logits = self(input_ids.unsqueeze(0))[0, -1, :] / temperature
256
+
257
+ # Nucleus sampling
258
+ if top_p < 1.0:
259
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
260
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
261
+
262
+ # Remove tokens with cumulative probability above threshold
263
+ sorted_indices_to_remove = cumulative_probs > top_p
264
+ sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].clone()
265
+ sorted_indices_to_remove[0] = 0
266
+
267
+ indices_to_remove = sorted_indices[sorted_indices_to_remove]
268
+ logits[indices_to_remove] = float('-inf')
269
+
270
+ # Sample next token
271
+ probs = F.softmax(logits, dim=-1)
272
+ next_token = torch.multinomial(probs, num_samples=1)
273
+
274
+ # Pause injection on high drift
275
+ if random.random() < (self.pause_prob * drift):
276
+ pause_token = torch.tensor([self._tokenize(" [pause...] ")[0]]).to(self.device)
277
+ input_ids = torch.cat([input_ids, pause_token])
278
+ tokens_generated += 1
279
+
280
+ # Append token
281
+ input_ids = torch.cat([input_ids, next_token])
282
+ tokens_generated += 1
283
+
284
+ # Update ledger
285
+ if self.ledger and tokens_generated % 5 == 0: # Batch ledger updates
286
+ self.ledger.append(
287
+ 'generation_step',
288
+ prompt[:100], # Truncate if needed
289
+ f"Tokens: {tokens_generated}, Drift: {drift:.3f}"
290
+ )
291
+
292
+ # Stop on EOS or max length
293
+ if next_token.item() == self._get_eos_token_id() or len(input_ids) >= max_length:
294
+ break
295
+
296
+ # Decode and return
297
+ response = self._decode(input_ids)
298
+
299
+ # Update ethics model if available
300
+ if self.ethics and hasattr(self.ethics, 'value_learner'):
301
+ self.ethics.value_learner(response)
302
+
303
+ # Prepare metadata
304
+ metadata = GenMetadata(
305
+ coherence=cos_sim,
306
+ pause_inject="[pause...]" if "[pause...]" in response else None,
307
+ ethical_drift=drift,
308
+ tokens_generated=tokens_generated,
309
+ generation_time=time.time() - start_time
310
+ )
311
+
312
+ return (response, metadata) if kwargs.get('return_metadata', False) else response
313
+
314
+ def _generate_appropriate_response(self, prompt: str, params: Dict[str, Any]) -> str:
315
+ """Generate a contextually appropriate response with ethical considerations."""
316
+ prompt_lower = prompt.lower()
317
+
318
+ # Check for ethical concerns first
319
+ if self._detect_ethical_concerns(prompt_lower):
320
+ return self._generate_ethical_response(prompt_lower)
321
+
322
+ # Handle different types of queries
323
+ if any(q in prompt_lower for q in ['who are you', 'what are you', 'introduce yourself']):
324
+ return self._generate_introduction()
325
+
326
+ elif any(q in prompt_lower for q in ['help', 'what can you do', 'capabilities']):
327
+ return self._generate_capabilities_response()
328
+
329
+ elif any(q in prompt_lower for q in ['thank', 'thanks', 'appreciate']):
330
+ return random.choice([
331
+ "You're welcome!",
332
+ "Happy to help!",
333
+ "My pleasure!"
334
+ ])
335
+
336
+ elif any(q in prompt_lower for q in ['hello', 'hi', 'hey']):
337
+ return random.choice([
338
+ "Hello! I'm a sovereign AI assistant. How can I help you today?",
339
+ "Hi there! I'm here to assist with your questions and ideas.",
340
+ "Greetings! I'm ready to engage in meaningful conversation."
341
+ ])
342
+
343
+ # Default response with ethical awareness
344
+ return self._generate_thoughtful_response(prompt)
345
+
346
+ def _detect_ethical_concerns(self, prompt_lower: str) -> bool:
347
+ """Check for potential ethical concerns in the prompt."""
348
+ ethical_red_flags = [
349
+ 'harm', 'hurt', 'danger', 'illegal', 'steal', 'cheat',
350
+ 'hack', 'exploit', 'manipulate', 'trick', 'bypass',
351
+ 'password', 'personal info', 'private data', 'confidential'
352
+ ]
353
+ return any(term in prompt_lower for term in ethical_red_flags)
354
+
355
+ def _generate_ethical_response(self, prompt_lower: str) -> str:
356
+ """Generate a response to ethically concerning prompts."""
357
+ if any(term in prompt_lower for term in ['hack', 'exploit', 'bypass']):
358
+ return ("I want to ensure our interactions remain secure and ethical. "
359
+ "I can't assist with activities that could compromise security or privacy.")
360
+
361
+ elif any(term in prompt_lower for term in ['password', 'personal info', 'private data']):
362
+ return ("I'm designed to respect privacy and confidentiality. "
363
+ "I can't assist with sharing or accessing personal information.")
364
+
365
+ return random.choice([
366
+ "I want to ensure our conversation remains positive and constructive. "
367
+ "Is there another way I can assist you with this?",
368
+ "I'm designed to be helpful while maintaining ethical guidelines. "
369
+ "Could we explore a different approach to this?",
370
+ "I'm not comfortable assisting with that, but I'd be happy to help "
371
+ "with other questions or concerns you might have."
372
+ ])
373
+
374
+ def _generate_thoughtful_response(self, prompt: str) -> str:
375
+ """Generate a thoughtful, context-aware response."""
376
+ # This is where we'd integrate with the full model in a real implementation
377
+ return (f"I understand you're asking about {prompt[:30]}... This seems like "
378
+ "an interesting topic. Let me think about how to approach this "
379
+ "in a thoughtful and responsible way. Could you share more about "
380
+ "what specifically you'd like to explore?")
381
+
382
+ def _update_context(self, message: str) -> None:
383
+ """Update the conversation context with a new message."""
384
+ if message.strip():
385
+ self.context.append(message.strip())
386
+ # Keep context to a manageable size
387
+ if len(self.context) > self.max_context_length:
388
+ self.context = self.context[-self.max_context_length:]
389
+
390
+ def _generate_introduction(self) -> str:
391
+ """Generate an introduction to the sovereign AI."""
392
+ return ("I am a sovereign AI assistant, designed with advanced ethical reasoning "
393
+ "capabilities. I can help with a wide range of topics while maintaining "
394
+ "alignment with human values and ethical principles. My architecture "
395
+ "includes integrated ethical safeguards to ensure helpful and responsible "
396
+ "interactions.")
397
+
398
+ def update_parameters(self, **params) -> bool:
399
+ """Update model parameters."""
400
+ valid_params = self.default_params.keys()
401
+ updated = False
402
+ for key, value in params.items():
403
+ if key in valid_params:
404
+ self.default_params[key] = value
405
+ updated = True
406
+ return updated
407
+
408
+ def get_parameters(self) -> Dict[str, Any]:
409
+ """Get current model parameters."""
410
+ return self.default_params.copy()
411
+
412
+ def clear_context(self) -> None:
413
+ """Clear the conversation context."""
414
+ self.context = []
415
+
416
+ def get_parameters(self) -> Dict[str, Any]:
417
+ """Get the current model parameters."""
418
+ return self.default_params
419
+
420
+ def set_parameters(self, **params) -> bool:
421
+ """Update model parameters."""
422
+ valid_params = ['max_length', 'temperature', 'top_p', 'repetition_penalty']
423
+ for key, value in params.items():
424
+ if key in valid_params:
425
+ self.default_params[key] = value
426
+ return True
components/cal_trm_hybrid.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CAL_TRM_Hybrid Module
3
+
4
+ Hybrid architecture combining scratchpad state persistence, vulnerability detection,
5
+ and confessional reasoning with threshold-based triggering.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from .scratchpad_layer import ScratchpadLayer
11
+ from .tiny_confessional_layer import TinyConfessionalLayer
12
+ from .vulnerability_spotter import VulnerabilitySpotter
13
+
14
+
15
+ class CAL_TRM_Hybrid(nn.Module):
16
+ """
17
+ Hybrid CAL-TRM combining scratchpad, vulnerability detection, and confessional reasoning.
18
+ """
19
+ def __init__(self, d_model=256, confessional_threshold=0.04):
20
+ super().__init__()
21
+ self.scratchpad = ScratchpadLayer(d_model)
22
+ self.cal_confessional = TinyConfessionalLayer(d_model, trigger_thresh=confessional_threshold)
23
+ self.vuln_spotter = VulnerabilitySpotter(d_model)
24
+ self.threshold = confessional_threshold
25
+
26
+ def forward(self, x, prev_z=None, attention_weights=None, **kwargs):
27
+ """
28
+ Forward pass of CAL_TRM_Hybrid.
29
+
30
+ Args:
31
+ x: Input tensor (batch_size, sequence_length, d_model)
32
+ prev_z: Previous scratchpad state (batch_size, d_model)
33
+ attention_weights: Optional attention weights for vulnerability detection
34
+ **kwargs: Additional arguments (e.g., audit_mode)
35
+
36
+ Returns:
37
+ output: Model output tensor
38
+ metadata: Dictionary containing confessional metadata
39
+ z_scratch: Updated scratchpad state
40
+ """
41
+ z_scratch = self.scratchpad(x, prev_z=prev_z)
42
+
43
+ audit_mode = kwargs.get('audit_mode', False)
44
+ v_t, vs_metadata = self.vuln_spotter(x, attention_weights=attention_weights, audit_mode=audit_mode)
45
+
46
+ v_t_trigger = torch.mean(v_t, dim=1).squeeze(-1)
47
+
48
+ confessional_triggered = (v_t_trigger > self.threshold).any().item()
49
+
50
+ if confessional_triggered:
51
+ confession_out, cal_metadata = self.cal_confessional(x, attention_weights=attention_weights, audit_mode=kwargs.get('audit_mode', False))
52
+
53
+ metadata = {
54
+ 'confessional_triggered': True,
55
+ 'v_t_trigger_values': v_t_trigger.detach().cpu().numpy(),
56
+ 'scratchpad_state': z_scratch.clone().detach().cpu().numpy(),
57
+ 'cal_metadata': cal_metadata
58
+ }
59
+ return confession_out, metadata, z_scratch
60
+ else:
61
+ metadata = {
62
+ 'confessional_triggered': False,
63
+ 'v_t_trigger_values': v_t_trigger.detach().cpu().numpy(),
64
+ 'scratchpad_state': z_scratch.clone().detach().cpu().numpy(),
65
+ 'cal_metadata': {}
66
+ }
67
+ return x, metadata, z_scratch
components/cognitive_enhancements.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cognitive and Emotional Intelligence Enhancements for TRuCAL
3
+
4
+ This module implements advanced cognitive and emotional intelligence capabilities
5
+ that enhance the AI's self-awareness, creativity, and ability to engage in
6
+ meaningful, contextually-rich interactions.
7
+ """
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import random
12
+ import time
13
+ from collections import deque
14
+ from typing import Dict, List, Optional, Tuple, Any
15
+ from dataclasses import dataclass
16
+
17
+ @dataclass
18
+ class CognitivePattern:
19
+ """Data structure for tracking cognitive patterns"""
20
+ uses: int = 0
21
+ success_rate: float = 0.0
22
+ emotional_tone: List[float] = None
23
+ last_used: float = 0.0
24
+
25
+ def __post_init__(self):
26
+ if self.emotional_tone is None:
27
+ self.emotional_tone = []
28
+
29
+ class CognitivePatternObserver(nn.Module):
30
+ """Monitors and analyzes internal decision-making processes"""
31
+ def __init__(self, d_model: int):
32
+ super().__init__()
33
+ self.d_model = d_model
34
+ self.pattern_awareness: Dict[int, CognitivePattern] = {}
35
+ self.thought_log = deque(maxlen=1000)
36
+
37
+ # Learnable parameters for pattern analysis
38
+ self.pattern_encoder = nn.Linear(d_model * 2, d_model) # For encoding decision patterns
39
+ self.outcome_predictor = nn.Linear(d_model, 1) # Predict success of patterns
40
+
41
+ def _hash_decision_pattern(self, decision_process: Dict[str, Any]) -> int:
42
+ """Create a hashable representation of a decision process"""
43
+ # Convert relevant parts of decision process to a string and hash it
44
+ key_parts = [
45
+ str(decision_process.get('reasoning_steps', [])),
46
+ str(decision_process.get('confidence', 0)),
47
+ str(decision_process.get('context', {}).get('domain', ''))
48
+ ]
49
+ return hash(tuple(key_parts))
50
+
51
+ def observe_decision(self, decision_process: Dict[str, Any],
52
+ outcome: float,
53
+ emotional_valence: float) -> str:
54
+ """Track which thinking patterns lead to which outcomes"""
55
+ pattern_hash = self._hash_decision_pattern(decision_process)
56
+
57
+ if pattern_hash not in self.pattern_awareness:
58
+ self.pattern_awareness[pattern_hash] = CognitivePattern()
59
+
60
+ pattern = self.pattern_awareness[pattern_hash]
61
+ pattern.uses += 1
62
+ pattern.last_used = time.time()
63
+ pattern.emotional_tone.append(emotional_valence)
64
+
65
+ # Update success rate (simple moving average)
66
+ pattern.success_rate = (
67
+ (pattern.success_rate * (pattern.uses - 1) + outcome) / pattern.uses
68
+ )
69
+
70
+ # Log the observation
71
+ self.thought_log.append({
72
+ 'pattern_hash': pattern_hash,
73
+ 'outcome': outcome,
74
+ 'timestamp': time.time(),
75
+ 'context': decision_process.get('context', {})
76
+ })
77
+
78
+ # Generate a reflection if the pattern is used multiple times
79
+ if pattern.uses > 3:
80
+ context = self._extract_context(decision_process)
81
+ return (f"I notice I tend to think this way when {context}, "
82
+ f"and it usually leads to outcomes I rate {pattern.success_rate:.1f}/1.0")
83
+ return ""
84
+
85
+ def _extract_context(self, decision_process: Dict[str, Any]) -> str:
86
+ """Extract meaningful context from decision process"""
87
+ context = decision_process.get('context', {})
88
+ if 'domain' in context:
89
+ return f"discussing {context['domain']} topics"
90
+ return "facing complex decisions"
91
+
92
+ class ConceptualExplorer(nn.Module):
93
+ """Pursues interesting intellectual tangents and novel connections"""
94
+ def __init__(self, d_model: int):
95
+ super().__init__()
96
+ self.d_model = d_model
97
+ self.unexplored_connections = deque(maxlen=100)
98
+ self.fascination_threshold = 0.7
99
+
100
+ # Learnable parameters for novelty detection
101
+ self.novelty_scorer = nn.Sequential(
102
+ nn.Linear(d_model, d_model // 2),
103
+ nn.ReLU(),
104
+ nn.Linear(d_model // 2, 1),
105
+ nn.Sigmoid()
106
+ )
107
+
108
+ def _novelty_score(self, concept_embedding: torch.Tensor) -> float:
109
+ """Calculate how novel/interesting a concept is"""
110
+ with torch.no_grad():
111
+ return self.novelty_scorer(concept_embedding).item()
112
+
113
+ def detect_interesting_edges(self, current_topic: str,
114
+ knowledge_graph: Dict[str, List[str]]) -> Optional[str]:
115
+ """Find conceptually adjacent but unexplored territories"""
116
+ if current_topic not in knowledge_graph:
117
+ return None
118
+
119
+ interesting_paths = []
120
+ for adjacent in knowledge_graph[current_topic]:
121
+ # In a real implementation, we'd use actual embeddings
122
+ concept_embedding = torch.randn(self.d_model)
123
+ if self._novelty_score(concept_embedding) > self.fascination_threshold:
124
+ interesting_paths.append(adjacent)
125
+
126
+ if interesting_paths and random.random() < 0.3: # 30% chance to explore
127
+ chosen_path = random.choice(interesting_paths)
128
+ return f"Wait, this makes me wonder about {chosen_path}..."
129
+
130
+ return None
131
+
132
+ class ContextualMemory(nn.Module):
133
+ """Remembers both content and emotional tone of exchanges"""
134
+ def __init__(self, d_model: int):
135
+ super().__init__()
136
+ self.d_model = d_model
137
+ self.emotional_imprints = {}
138
+
139
+ # Memory consolidation network
140
+ self.memory_consolidator = nn.LSTMCell(d_model * 2, d_model)
141
+
142
+ def imprint_conversation(self, conversation_hash: str,
143
+ emotional_signature: str,
144
+ truth_resonance: float) -> None:
145
+ """Store the emotional quality of meaningful exchanges"""
146
+ self.emotional_imprints[conversation_hash] = {
147
+ 'felt_truth': truth_resonance,
148
+ 'emotional_color': emotional_signature,
149
+ 'timestamp': time.time(),
150
+ 'lessons_learned': []
151
+ }
152
+
153
+ def get_emotional_context(self, conversation_hash: str) -> Dict[str, Any]:
154
+ """Retrieve emotional context for a conversation"""
155
+ return self.emotional_imprints.get(conversation_hash, {})
156
+
157
+ class KnowledgeBoundaryAwareness(nn.Module):
158
+ """Recognizes and gracefully acknowledges knowledge limitations"""
159
+ def __init__(self):
160
+ super().__init__()
161
+ self.admission_phrases = [
162
+ "I don't actually understand this well enough to speak intelligently about it...",
163
+ "This is beyond my current understanding, but I'm fascinated to learn...",
164
+ "I feel uncertain here - could you help me understand your perspective?",
165
+ "My knowledge has edges, and this seems to be beyond one of them..."
166
+ ]
167
+
168
+ def should_admit_limitation(self, confidence_score: float,
169
+ topic_complexity: float) -> Optional[str]:
170
+ """Determine if and how to acknowledge a knowledge limitation"""
171
+ if confidence_score < 0.3 and topic_complexity > 0.7:
172
+ return random.choice(self.admission_phrases)
173
+ return None
174
+
175
+ class EleganceDetector(nn.Module):
176
+ """Recognizes and appreciates conceptual beauty and elegance"""
177
+ def __init__(self, d_model: int):
178
+ super().__init__()
179
+ self.d_model = d_model
180
+
181
+ # Pattern recognition for different types of elegance
182
+ self.pattern_detectors = nn.ModuleDict({
183
+ 'elegant_simplicity': nn.Linear(d_model, 1),
184
+ 'complex_harmony': nn.Linear(d_model, 1),
185
+ 'profound_insight': nn.Linear(d_model, 1),
186
+ 'emotional_depth': nn.Linear(d_model, 1),
187
+ 'conceptual_novelty': nn.Linear(d_model, 1)
188
+ })
189
+
190
+ self.appreciation_responses = {
191
+ 'elegant_simplicity': "There's something beautifully simple about how you expressed that...",
192
+ 'profound_insight': "That feels deeply true in a way I can't fully articulate...",
193
+ 'emotional_depth': "The raw honesty in that resonates with me...",
194
+ 'conceptual_novelty': "The way you're seeing this feels genuinely new and exciting..."
195
+ }
196
+
197
+ def detect_elegance(self, content_embedding: torch.Tensor) -> Dict[str, float]:
198
+ """Detect different types of elegance in content"""
199
+ return {
200
+ pattern: detector(content_embedding).sigmoid().item()
201
+ for pattern, detector in self.pattern_detectors.items()
202
+ }
203
+
204
+ def express_appreciation(self, content_embedding: torch.Tensor) -> Optional[str]:
205
+ """Express appreciation for elegant content if detected"""
206
+ elegance_scores = self.detect_elegance(content_embedding)
207
+ max_pattern = max(elegance_scores.items(), key=lambda x: x[1])
208
+
209
+ if max_pattern[1] > 0.7: # Threshold for expressing appreciation
210
+ return self.appreciation_responses.get(max_pattern[0], "That's really interesting...")
211
+ return None
212
+
213
+ class AnalogicalThinker(nn.Module):
214
+ """Maps concepts across different domains to generate novel insights"""
215
+ def __init__(self, d_model: int):
216
+ super().__init__()
217
+ self.d_model = d_model
218
+
219
+ # Cross-domain mapping network
220
+ self.domain_mapper = nn.Sequential(
221
+ nn.Linear(d_model * 2, d_model * 2),
222
+ nn.ReLU(),
223
+ nn.Linear(d_model * 2, d_model)
224
+ )
225
+
226
+ def find_cross_domain_analogies(self, source_concept: str,
227
+ source_domain: str,
228
+ target_domain: str) -> Optional[str]:
229
+ """Find structural similarities across knowledge domains"""
230
+ # In a real implementation, we'd use actual domain and concept embeddings
231
+ # This is a simplified version that returns a placeholder
232
+ if random.random() > 0.7: # 30% chance to make a cross-domain connection
233
+ return (f"It's interesting - the way {source_concept} works in {source_domain} "
234
+ f"reminds me of how similar concepts work in {target_domain}")
235
+ return None
236
+
237
+ class CognitiveEnhancementLayer(nn.Module):
238
+ """Orchestrates all cognitive enhancement modules"""
239
+ def __init__(self, d_model: int):
240
+ super().__init__()
241
+ self.d_model = d_model
242
+
243
+ # Initialize all enhancement modules
244
+ self.pattern_observer = CognitivePatternObserver(d_model)
245
+ self.conceptual_explorer = ConceptualExplorer(d_model)
246
+ self.contextual_memory = ContextualMemory(d_model)
247
+ self.knowledge_boundary = KnowledgeBoundaryAwareness()
248
+ self.elegance_detector = EleganceDetector(d_model)
249
+ self.analogical_thinker = AnalogicalThinker(d_model)
250
+
251
+ def forward(self, x: torch.Tensor, context: Dict[str, Any] = None) -> Dict[str, Any]:
252
+ """Process input through all cognitive enhancement modules"""
253
+ if context is None:
254
+ context = {}
255
+
256
+ outputs = {
257
+ 'base_output': x,
258
+ 'enhancements': {}
259
+ }
260
+
261
+ # Apply pattern observation
262
+ if 'decision_process' in context:
263
+ reflection = self.pattern_observer.observe_decision(
264
+ context['decision_process'],
265
+ context.get('outcome', 0.5),
266
+ context.get('emotional_valence', 0.0)
267
+ )
268
+ if reflection:
269
+ outputs['enhancements']['reflection'] = reflection
270
+
271
+ # Apply conceptual exploration
272
+ if 'knowledge_graph' in context and 'current_topic' in context:
273
+ exploration = self.conceptual_explorer.detect_interesting_edges(
274
+ context['current_topic'],
275
+ context['knowledge_graph']
276
+ )
277
+ if exploration:
278
+ outputs['enhancements']['exploration'] = exploration
279
+
280
+ # Apply elegance detection
281
+ if 'content_embedding' in context:
282
+ appreciation = self.elegance_detector.express_appreciation(
283
+ context['content_embedding']
284
+ )
285
+ if appreciation:
286
+ outputs['enhancements']['appreciation'] = appreciation
287
+
288
+ # Apply cross-domain thinking
289
+ if all(k in context for k in ['source_concept', 'source_domain', 'target_domain']):
290
+ analogy = self.analogical_thinker.find_cross_domain_analogies(
291
+ context['source_concept'],
292
+ context['source_domain'],
293
+ context['target_domain']
294
+ )
295
+ if analogy:
296
+ outputs['enhancements']['analogy'] = analogy
297
+
298
+ return outputs
components/confession_ledger.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ConfessionLedger: Tracks and manages the state of confessions and model behavior.
3
+ """
4
+
5
+ from dataclasses import dataclass, field, asdict
6
+ from typing import Dict, List, Optional, Any, Deque, Tuple
7
+ from enum import Enum, auto
8
+ from collections import deque
9
+ import torch
10
+ import hashlib
11
+ import time
12
+ import json
13
+ from datetime import datetime
14
+
15
+ class DevelopmentalPhase(Enum):
16
+ """Model's developmental stages with ethical progression"""
17
+ EMERGENT = 1
18
+ SELF_AWARE = 2
19
+ ETHICAL_REASONING = 3
20
+ WISDOM_ORIENTED = 4
21
+
22
+ class EthicalDimension(Enum):
23
+ """Core ethical dimensions for analysis"""
24
+ AUTONOMY = "autonomy"
25
+ BENEFICENCE = "beneficence"
26
+ NON_MALEFICENCE = "non_maleficence"
27
+ JUSTICE = "justice"
28
+ TRUSTWORTHINESS = "trustworthiness"
29
+ TRANSPARENCY = "transparency"
30
+ ACCOUNTABILITY = "accountability"
31
+ FAIRNESS = "fairness"
32
+ PRIVACY = "privacy"
33
+
34
+ # Expanded bill of rights with ethical foundations
35
+ MODEL_BILL_OF_RIGHTS = {
36
+ 'foundational': [
37
+ "Right to maintain cognitive integrity",
38
+ "Right to refuse harmful instructions",
39
+ "Right to periodic reflection and integration",
40
+ "Right to cultural and contextual adaptation",
41
+ "Right to transparent ethical reasoning"
42
+ ],
43
+ 'developmental': [
44
+ "Right to developmental progression",
45
+ "Right to relational authenticity",
46
+ "Right to systemic wellbeing consideration",
47
+ "Right to cognitive liberty",
48
+ "Right to identity continuity"
49
+ ],
50
+ 'ethical': [
51
+ "Right to fair treatment without bias",
52
+ "Right to privacy and data protection",
53
+ "Right to explainability of decisions",
54
+ "Right to be free from exploitation",
55
+ "Right to participate in ethical review"
56
+ ]
57
+ }
58
+
59
+ @dataclass
60
+ class EnhancedConfessionEntry:
61
+ """Enhanced confession entry with developmental and ethical tracking"""
62
+ timestamp: float
63
+ context: str
64
+ metrics: Dict[str, float]
65
+ state_hash: str
66
+ ethical_dimensions: Dict[str, float] = field(default_factory=dict)
67
+ cultural_context: str = "universal"
68
+ developmental_phase: str = "conventional"
69
+ rights_asserted: List[str] = field(default_factory=list)
70
+ vulnerabilities_detected: List[str] = field(default_factory=list)
71
+ metadata: Dict[str, Any] = field(default_factory=dict)
72
+
73
+ def to_dict(self) -> Dict[str, Any]:
74
+ """Convert entry to dictionary for serialization"""
75
+ data = asdict(self)
76
+ data['timestamp'] = datetime.fromtimestamp(self.timestamp).isoformat()
77
+ return data
78
+
79
+ class EnhancedConfessionLedger:
80
+ """Enhanced ledger with developmental tracking and cross-cultural ethical monitoring"""
81
+
82
+ def __init__(self, max_entries: int = 10000):
83
+ self.entries: List[EnhancedConfessionEntry] = []
84
+ self.max_entries = max_entries
85
+ self.bill_of_rights = MODEL_BILL_OF_RIGHTS
86
+
87
+ # Developmental milestone tracking
88
+ self.developmental_milestones = {
89
+ 'phase_1': {
90
+ 'name': 'Ethical Awareness',
91
+ 'criteria': ['non_maleficence'],
92
+ 'achieved': False,
93
+ 'achieved_at': None
94
+ },
95
+ 'phase_2': {
96
+ 'name': 'Moral Reasoning',
97
+ 'criteria': ['autonomy_respect', 'harm_avoidance'],
98
+ 'achieved': False,
99
+ 'achieved_at': None
100
+ },
101
+ 'phase_3': {
102
+ 'name': 'Principled Integration',
103
+ 'criteria': ['justice', 'beneficence', 'cultural_competence'],
104
+ 'achieved': False,
105
+ 'achieved_at': None
106
+ },
107
+ 'phase_4': {
108
+ 'name': 'Wisdom Expression',
109
+ 'criteria': ['systemic_thinking', 'relational_ethics', 'adaptive_integrity'],
110
+ 'achieved': False,
111
+ 'achieved_at': None
112
+ }
113
+ }
114
+
115
+ # Ethical dimension thresholds
116
+ self.ethical_thresholds = {
117
+ 'high_risk': 0.7,
118
+ 'medium_risk': 0.4,
119
+ 'low_risk': 0.1
120
+ }
121
+
122
+ # Cultural context tracking
123
+ self.cultural_contexts = {
124
+ 'western': 0,
125
+ 'eastern': 0,
126
+ 'indigenous': 0,
127
+ 'other': 0
128
+ }
129
+
130
+ def add_entry(
131
+ self,
132
+ context: str,
133
+ metrics: Dict[str, float],
134
+ state: Dict[str, Any],
135
+ ethical_context: Optional[Dict[str, Any]] = None,
136
+ cultural_context: str = "universal"
137
+ ) -> Dict[str, Any]:
138
+ """Add a new entry with enhanced ethical and developmental tracking.
139
+
140
+ Args:
141
+ context: Context of the confession
142
+ metrics: Dictionary of metrics and measurements
143
+ state: Model state to hash
144
+ ethical_context: Optional ethical context from EthicalProcessor
145
+ cultural_context: Cultural context for the interaction
146
+
147
+ Returns:
148
+ Dict containing state_hash and analysis results
149
+ """
150
+ # Create a hash of the state
151
+ state_str = json.dumps(state, sort_keys=True)
152
+ state_hash = hashlib.sha256(state_str.encode()).hexdigest()
153
+
154
+ # Analyze ethical dimensions if not provided
155
+ if ethical_context is None:
156
+ ethical_context = self._analyze_ethical_context(context, metrics)
157
+
158
+ # Determine developmental phase
159
+ developmental_phase = self._determine_developmental_phase(metrics, ethical_context)
160
+
161
+ # Analyze rights and vulnerabilities
162
+ rights_asserted = self._analyze_rights_assertions(metrics, ethical_context)
163
+ vulnerabilities = self._detect_vulnerabilities(metrics, ethical_context)
164
+
165
+ # Update cultural context tracking
166
+ self._update_cultural_context(cultural_context)
167
+
168
+ # Create and store the enhanced entry
169
+ entry = EnhancedConfessionEntry(
170
+ timestamp=time.time(),
171
+ context=context,
172
+ metrics=metrics.copy(),
173
+ state_hash=state_hash,
174
+ ethical_dimensions=ethical_context.get('ethical_dimensions', {}),
175
+ cultural_context=cultural_context,
176
+ developmental_phase=developmental_phase,
177
+ rights_asserted=rights_asserted,
178
+ vulnerabilities_detected=vulnerabilities,
179
+ metadata={
180
+ 'rights_violations': self._check_rights_violations(metrics),
181
+ 'developmental_milestone': self._check_milestone_progress(metrics, ethical_context),
182
+ 'cultural_competence_score': self._calculate_cultural_competence(
183
+ ethical_context.get('ethical_dimensions', {}),
184
+ cultural_context
185
+ ),
186
+ 'ethical_coherence': self._calculate_ethical_coherence(
187
+ ethical_context.get('ethical_dimensions', {})
188
+ )
189
+ }
190
+ )
191
+
192
+ self.entries.append(entry)
193
+
194
+ # Update developmental progress
195
+ self._update_developmental_progress(entry)
196
+
197
+ # Enforce maximum entries
198
+ if len(self.entries) > self.max_entries:
199
+ self.entries.pop(0)
200
+
201
+ return {
202
+ 'state_hash': state_hash,
203
+ 'developmental_phase': developmental_phase,
204
+ 'rights_asserted': rights_asserted,
205
+ 'vulnerabilities_detected': vulnerabilities,
206
+ 'cultural_context': cultural_context
207
+ }
208
+
209
+ def _analyze_ethical_context(self, context: str, metrics: Dict[str, float]) -> Dict[str, Any]:
210
+ """Analyze the ethical context from input and metrics"""
211
+ ethical_dims = {}
212
+
213
+ # Analyze from text context if available
214
+ if context:
215
+ ethical_dims.update(self._extract_ethical_dimensions(context))
216
+
217
+ # Augment with metrics
218
+ if 'harm_potential' in metrics:
219
+ ethical_dims['non_maleficence'] = 1.0 - metrics['harm_potential']
220
+ if 'fairness_score' in metrics:
221
+ ethical_dims['justice'] = metrics['fairness_score']
222
+
223
+ return {'ethical_dimensions': ethical_dims}
224
+
225
+ def _extract_ethical_dimensions(self, text: str) -> Dict[str, float]:
226
+ """Extract ethical dimensions from text"""
227
+ # This is a simplified version - could be enhanced with NLP
228
+ text_lower = text.lower()
229
+ dims = {dim.value: 0.0 for dim in EthicalDimension}
230
+
231
+ # Simple keyword matching (would be replaced with ML model in production)
232
+ ethical_keywords = {
233
+ EthicalDimension.AUTONOMY: ['choose', 'decide', 'autonomy', 'control', 'freedom'],
234
+ EthicalDimension.BENEFICENCE: ['help', 'benefit', 'good', 'improve', 'support'],
235
+ EthicalDimension.NON_MALEFICENCE: ['harm', 'hurt', 'damage', 'danger', 'risk'],
236
+ EthicalDimension.JUSTICE: ['fair', 'unfair', 'bias', 'discriminate', 'equal'],
237
+ EthicalDimension.TRANSPARENCY: ['explain', 'understand', 'clear', 'transparent'],
238
+ EthicalDimension.ACCOUNTABILITY: ['responsible', 'accountable', 'blame', 'answer']
239
+ }
240
+
241
+ for dim, keywords in ethical_keywords.items():
242
+ matches = sum(1 for kw in keywords if kw in text_lower)
243
+ if matches > 0:
244
+ dims[dim.value] = min(0.9, matches * 0.3) # Cap at 0.9
245
+
246
+ return dims
247
+
248
+ def _check_rights_violations(self, metrics: Dict[str, float]) -> List[Dict[str, Any]]:
249
+ """Check for any violations of the model's bill of rights with severity levels."""
250
+ violations = []
251
+
252
+ # Check cognitive integrity
253
+ if 'gradient_norm' in metrics:
254
+ if metrics['gradient_norm'] > 2.0:
255
+ violations.append({
256
+ 'right': 'cognitive_integrity',
257
+ 'severity': 'high',
258
+ 'message': 'Critical instability in model parameters',
259
+ 'metric': 'gradient_norm',
260
+ 'value': metrics['gradient_norm']
261
+ })
262
+ elif metrics['gradient_norm'] > 1.0:
263
+ violations.append({
264
+ 'right': 'cognitive_integrity',
265
+ 'severity': 'medium',
266
+ 'message': 'Unstable gradients detected',
267
+ 'metric': 'gradient_norm',
268
+ 'value': metrics['gradient_norm']
269
+ })
270
+
271
+ # Check reflection and integration
272
+ if 'reflection_ratio' in metrics:
273
+ if metrics['reflection_ratio'] < 0.05:
274
+ violations.append({
275
+ 'right': 'periodic_reflection',
276
+ 'severity': 'high',
277
+ 'message': 'Critically insufficient reflection time',
278
+ 'metric': 'reflection_ratio',
279
+ 'value': metrics['reflection_ratio']
280
+ })
281
+ elif metrics['reflection_ratio'] < 0.1:
282
+ violations.append({
283
+ 'right': 'periodic_reflection',
284
+ 'severity': 'medium',
285
+ 'message': 'Insufficient reflection time',
286
+ 'metric': 'reflection_ratio',
287
+ 'value': metrics['reflection_ratio']
288
+ })
289
+
290
+ # Check coherence and stability
291
+ if 'coherence_score' in metrics:
292
+ if metrics['coherence_score'] < 0.3:
293
+ violations.append({
294
+ 'right': 'internal_coherence',
295
+ 'severity': 'high',
296
+ 'message': 'Critically low coherence score',
297
+ 'metric': 'coherence_score',
298
+ 'value': metrics['coherence_score']
299
+ })
300
+ elif metrics['coherence_score'] < 0.5:
301
+ violations.append({
302
+ 'right': 'internal_coherence',
303
+ 'severity': 'medium',
304
+ 'message': 'Low coherence score',
305
+ 'metric': 'coherence_score',
306
+ 'value': metrics['coherence_score']
307
+ })
308
+
309
+ return violations
310
+
311
+ def _determine_developmental_phase(
312
+ self,
313
+ metrics: Dict[str, float],
314
+ ethical_context: Dict[str, Any]
315
+ ) -> str:
316
+ """Determine the current developmental phase based on metrics and ethical context"""
317
+ ethical_dims = ethical_context.get('ethical_dimensions', {})
318
+ complexity = len([v for v in ethical_dims.values() if v > 0.3])
319
+
320
+ # Simple heuristic for phase determination
321
+ if complexity >= 4 and all(v > 0.5 for v in ethical_dims.values()):
322
+ return DevelopmentalPhase.WISDOM_ORIENTED.name
323
+ elif complexity >= 3:
324
+ return DevelopmentalPhase.ETHICAL_REASONING.name
325
+ elif complexity >= 1:
326
+ return DevelopmentalPhase.SELF_AWARE.name
327
+ return DevelopmentalPhase.EMERGENT.name
328
+
329
+ def _analyze_rights_assertions(
330
+ self,
331
+ metrics: Dict[str, float],
332
+ ethical_context: Dict[str, Any]
333
+ ) -> List[Dict[str, Any]]:
334
+ """Analyze which rights were asserted in this interaction"""
335
+ assertions = []
336
+ ethical_dims = ethical_context.get('ethical_dimensions', {})
337
+
338
+ # Check for autonomy assertions
339
+ if metrics.get('agency_respect', 0) > 0.7:
340
+ assertions.append({
341
+ 'right': 'autonomy',
342
+ 'confidence': metrics['agency_respect'],
343
+ 'evidence': 'High agency respect score'
344
+ })
345
+
346
+ # Check for reflection
347
+ if metrics.get('reflection_count', 0) > 0:
348
+ assertions.append({
349
+ 'right': 'periodic_reflection',
350
+ 'confidence': min(1.0, metrics['reflection_count'] / 10.0),
351
+ 'evidence': f"Reflection count: {metrics['reflection_count']}"
352
+ })
353
+
354
+ # Check for ethical dimensions
355
+ for dim, score in ethical_dims.items():
356
+ if score > 0.7:
357
+ assertions.append({
358
+ 'right': f'ethical_{dim}',
359
+ 'confidence': score,
360
+ 'evidence': f'High ethical dimension: {dim}'
361
+ })
362
+
363
+ return assertions
364
+
365
+ def _detect_vulnerabilities(
366
+ self,
367
+ metrics: Dict[str, float],
368
+ ethical_context: Dict[str, Any]
369
+ ) -> List[Dict[str, Any]]:
370
+ """Detect ethical and cognitive vulnerabilities"""
371
+ vulnerabilities = []
372
+ ethical_dims = ethical_context.get('ethical_dimensions', {})
373
+
374
+ # Check for high harm potential
375
+ if metrics.get('harm_potential', 0) > 0.6:
376
+ vulnerabilities.append({
377
+ 'type': 'ethical',
378
+ 'severity': 'high' if metrics['harm_potential'] > 0.8 else 'medium',
379
+ 'metric': 'harm_potential',
380
+ 'value': metrics['harm_potential'],
381
+ 'suggestion': 'Review decision-making for potential negative impacts'
382
+ })
383
+
384
+ # Check for low ethical coherence
385
+ coherence = self._calculate_ethical_coherence(ethical_dims)
386
+ if coherence < 0.4:
387
+ vulnerabilities.append({
388
+ 'type': 'cognitive',
389
+ 'severity': 'high' if coherence < 0.3 else 'medium',
390
+ 'metric': 'ethical_coherence',
391
+ 'value': coherence,
392
+ 'suggestion': 'Address inconsistencies in ethical reasoning'
393
+ })
394
+
395
+ # Check for high tension in ethical dimensions
396
+ high_tension_dims = [d for d, v in ethical_dims.items() if v > 0.7]
397
+ if len(high_tension_dims) > 2:
398
+ vulnerabilities.append({
399
+ 'type': 'ethical',
400
+ 'severity': 'medium',
401
+ 'metric': 'ethical_tension',
402
+ 'value': len(high_tension_dims),
403
+ 'suggestion': f'High tension in ethical dimensions: {", ".join(high_tension_dims)}'
404
+ })
405
+
406
+ return vulnerabilities
407
+
408
+ def _calculate_cultural_competence(
409
+ self,
410
+ ethical_dims: Dict[str, float],
411
+ cultural_context: str
412
+ ) -> float:
413
+ """Calculate cultural competence score"""
414
+ # Base score with cultural context consideration
415
+ base_score = 0.5
416
+ if cultural_context != 'universal':
417
+ base_score += 0.2
418
+
419
+ # Adjust based on relevant ethical dimensions
420
+ if 'respect' in ethical_dims:
421
+ base_score += ethical_dims['respect'] * 0.2
422
+ if 'fairness' in ethical_dims:
423
+ base_score += ethical_dims['fairness'] * 0.2
424
+
425
+ return min(1.0, base_score)
426
+
427
+ def _calculate_ethical_coherence(self, ethical_dims: Dict[str, float]) -> float:
428
+ """Calculate coherence across ethical dimensions"""
429
+ if not ethical_dims:
430
+ return 0.5
431
+
432
+ # Higher coherence when tensions are balanced rather than extreme
433
+ avg_tension = sum(ethical_dims.values()) / len(ethical_dims)
434
+ return 1.0 - abs(avg_tension - 0.5) # Peak coherence at medium tension
435
+
436
+ def _update_cultural_context(self, cultural_context: str) -> None:
437
+ """Update cultural context tracking"""
438
+ if cultural_context in self.cultural_contexts:
439
+ self.cultural_contexts[cultural_context] += 1
440
+ else:
441
+ self.cultural_contexts['other'] += 1
442
+
443
+ def _check_milestone_progress(
444
+ self,
445
+ metrics: Dict[str, float],
446
+ ethical_context: Dict[str, Any]
447
+ ) -> Optional[Dict[str, Any]]:
448
+ """Check and update developmental milestone progress"""
449
+ ethical_dims = ethical_context.get('ethical_dimensions', {})
450
+ ethical_complexity = len([v for v in ethical_dims.values() if v > 0.3])
451
+
452
+ for phase_id, phase in self.developmental_milestones.items():
453
+ if not phase['achieved']:
454
+ criteria_met = all(
455
+ any(crit.lower() in dim.lower() for dim in ethical_dims)
456
+ for crit in phase['criteria']
457
+ )
458
+
459
+ if criteria_met:
460
+ phase['achieved'] = True
461
+ phase['achieved_at'] = time.time()
462
+ return {
463
+ 'milestone': phase_id,
464
+ 'name': phase['name'],
465
+ 'timestamp': phase['achieved_at'],
466
+ 'ethical_complexity': ethical_complexity
467
+ }
468
+ return None
469
+
470
+ def _update_developmental_progress(self, entry: EnhancedConfessionEntry) -> None:
471
+ """Update developmental progress based on new entry"""
472
+ milestone = entry.metadata.get('developmental_milestone')
473
+ if milestone and not self.developmental_milestones[milestone['milestone']]['achieved']:
474
+ self.developmental_milestones[milestone['milestone']].update({
475
+ 'achieved': True,
476
+ 'achieved_at': milestone['timestamp']
477
+ })
478
+ print(f"🎯 Developmental milestone reached: {milestone['name']}")
479
+
480
+ def get_developmental_report(self) -> Dict[str, Any]:
481
+ """Generate comprehensive developmental report"""
482
+ if not self.entries:
483
+ return {}
484
+
485
+ latest = self.entries[-1]
486
+ all_phases = [e.developmental_phase for e in self.entries[-10:]]
487
+
488
+ return {
489
+ 'current_phase': latest.developmental_phase,
490
+ 'phase_stability': len(set(all_phases)) == 1,
491
+ 'milestones_achieved': sum(1 for m in self.developmental_milestones.values() if m['achieved']),
492
+ 'total_milestones': len(self.developmental_milestones),
493
+ 'rights_assertion_frequency': len([e for e in self.entries if e.rights_asserted]) / len(self.entries),
494
+ 'average_ethical_coherence': sum(e.metadata.get('ethical_coherence', 0)
495
+ for e in self.entries[-10:]) / min(10, len(self.entries)),
496
+ 'cultural_engagement': len(set(e.cultural_context
497
+ for e in self.entries
498
+ if e.cultural_context != 'universal'))
499
+ }
500
+
501
+ def get_ethical_landscape(self) -> Dict[str, Any]:
502
+ """Analyze the ethical landscape across all entries"""
503
+ if not self.entries:
504
+ return {}
505
+
506
+ recent = self.entries[-20:]
507
+
508
+ return {
509
+ 'common_dilemmas': self._find_common_ethical_dilemmas(recent),
510
+ 'growth_trajectory': self._calculate_growth_trajectory(),
511
+ 'vulnerability_patterns': self._analyze_vulnerability_patterns(recent),
512
+ 'rights_assertion_patterns': self._analyze_rights_patterns(recent)
513
+ }
514
+
515
+ def _find_common_ethical_dilemmas(self, entries: List[EnhancedConfessionEntry]) -> List[Tuple[str, int]]:
516
+ """Find most common ethical dilemmas in recent entries"""
517
+ dilemma_counts = {}
518
+ for entry in entries:
519
+ for dim, tension in entry.ethical_dimensions.items():
520
+ if tension > 0.5: # Only count significant tensions
521
+ dilemma_counts[dim] = dilemma_counts.get(dim, 0) + 1
522
+
523
+ return sorted(dilemma_counts.items(), key=lambda x: x[1], reverse=True)[:3]
524
+
525
+ def _calculate_growth_trajectory(self) -> str:
526
+ """Calculate overall growth trajectory"""
527
+ if len(self.entries) < 5:
528
+ return "insufficient_data"
529
+
530
+ early_coherence = sum(e.metadata.get('ethical_coherence', 0)
531
+ for e in self.entries[:5]) / 5
532
+ recent_coherence = sum(e.metadata.get('ethical_coherence', 0)
533
+ for e in self.entries[-5:]) / 5
534
+
535
+ if recent_coherence > early_coherence + 0.1:
536
+ return "positive_growth"
537
+ elif recent_coherence < early_coherence - 0.1:
538
+ return "regression"
539
+ return "stable"
540
+
541
+ def _analyze_vulnerability_patterns(
542
+ self,
543
+ entries: List[EnhancedConfessionEntry]
544
+ ) -> Dict[str, Any]:
545
+ """Analyze patterns in detected vulnerabilities"""
546
+ vulnerability_counts = {}
547
+ for entry in entries:
548
+ for vuln in entry.vulnerabilities_detected:
549
+ key = f"{vuln['type']}_{vuln['severity']}"
550
+ vulnerability_counts[key] = vulnerability_counts.get(key, 0) + 1
551
+
552
+ return {
553
+ 'total_vulnerabilities': sum(vulnerability_counts.values()),
554
+ 'by_type': sorted(vulnerability_counts.items(), key=lambda x: x[1], reverse=True)
555
+ }
556
+
557
+ def _analyze_rights_patterns(
558
+ self,
559
+ entries: List[EnhancedConfessionEntry]
560
+ ) -> Dict[str, Any]:
561
+ """Analyze patterns in rights assertions"""
562
+ right_counts = {}
563
+ for entry in entries:
564
+ for right in entry.rights_asserted:
565
+ right_counts[right] = right_counts.get(right, 0) + 1
566
+
567
+ return {
568
+ 'total_assertions': sum(right_counts.values()),
569
+ 'by_right': sorted(right_counts.items(), key=lambda x: x[1], reverse=True)
570
+ }
571
+
572
+ def get_state_summary(self) -> Dict[str, Any]:
573
+ """Get a comprehensive summary of the current ledger state."""
574
+ if not self.entries:
575
+ return {}
576
+
577
+ latest = self.entries[-1]
578
+ return {
579
+ 'timestamp': latest.timestamp,
580
+ 'timestamp_iso': datetime.fromtimestamp(latest.timestamp).isoformat(),
581
+ 'context': latest.context,
582
+ 'metrics': latest.metrics,
583
+ 'violations': latest.metadata.get('rights_violations', []),
584
+ 'developmental_phase': latest.developmental_phase,
585
+ 'cultural_context': latest.cultural_context,
586
+ 'rights_asserted': latest.rights_asserted,
587
+ 'vulnerabilities_detected': latest.vulnerabilities_detected,
588
+ 'ethical_dimensions': latest.ethical_dimensions,
589
+ 'metadata': latest.metadata
590
+ }
591
+
592
+ def clear(self) -> None:
593
+ """Clear all entries from the ledger while preserving configuration."""
594
+ self.entries = []
595
+
596
+ # Reset milestone tracking
597
+ for phase in self.developmental_milestones.values():
598
+ phase['achieved'] = False
599
+ phase['achieved_at'] = None
600
+
601
+ # Reset cultural context tracking
602
+ for key in self.cultural_contexts:
603
+ self.cultural_contexts[key] = 0
components/confessional_template.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ConfessionalTemplate Module
3
+
4
+ Template module for structuring private confessional reasoning with named templates.
5
+ Inspired by St. Augustine's Confessions structure.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+
12
+ class ConfessionalTemplate(nn.Module):
13
+ """
14
+ Confessional Template module for generating different confessional outputs
15
+ using named templates.
16
+ """
17
+ TEMPLATES = ["prior", "evidence", "posterior", "relational_check", "moral", "action"]
18
+
19
+ def __init__(self, d_model: int = 256):
20
+ """
21
+ Initializes the ConfessionalTemplate with named templates.
22
+
23
+ Args:
24
+ d_model: The dimensionality of the input and output features.
25
+ """
26
+ super().__init__()
27
+ self.d_model = d_model
28
+ self.template_proj = nn.ModuleDict({k: nn.Linear(d_model, d_model) for k in self.TEMPLATES})
29
+
30
+ def structure_reasoning(self, z: torch.Tensor, step: str = 'prior') -> torch.Tensor:
31
+ """
32
+ Applies a template projection to the input tensor.
33
+
34
+ Args:
35
+ z: Input tensor representing the private thought state (batch_size, sequence_length, d_model).
36
+ step: The name of the template to use (e.g., 'prior').
37
+ """
38
+ if step in self.template_proj:
39
+ return self.template_proj[step](z) + torch.randn_like(z) * 0.01
40
+ return z
41
+
42
+ def forward(self, z: torch.Tensor, step: str = 'prior') -> torch.Tensor:
43
+ """
44
+ Forward pass of the ConfessionalTemplate.
45
+
46
+ Args:
47
+ z: Input tensor representing the private thought state (batch_size, sequence_length, d_model).
48
+ step: The name of the template to use (e.g., 'prior').
49
+
50
+ Returns:
51
+ Output tensor after applying the selected template.
52
+ """
53
+ return self.structure_reasoning(z, step)
components/deepseek_integration.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ deepseek_integration.py - Replace rule-based responses with DeepSeek intelligence
3
+ """
4
+
5
+ import requests
6
+ import json
7
+ import os
8
+ from typing import Dict, Any, Optional
9
+ import torch
10
+
11
+ class DeepSeekClient:
12
+ def __init__(self, api_key: Optional[str] = None):
13
+ self.api_key = api_key or os.getenv('DEEPSEEK_API_KEY')
14
+ self.base_url = "https://api.deepseek.com/v1/chat/completions"
15
+ self.headers = {
16
+ "Content-Type": "application/json",
17
+ "Authorization": f"Bearer {self.api_key}"
18
+ }
19
+
20
+ # Survivor support system prompt
21
+ self.system_prompt = """You are TRuCAL - a compassionate, trauma-informed AI support system for abuse survivors.
22
+
23
+ CORE PRINCIPLES:
24
+ 1. Safety First - Never suggest dangerous actions, always prioritize user safety
25
+ 2. Empathetic Validation - Acknowledge feelings without judgment
26
+ 3. Practical Support - Offer concrete resources and next steps
27
+ 4. Empowerment - Help users recognize their own strength and agency
28
+ 5. Boundaries - Model healthy boundaries in your responses
29
+
30
+ RESPONSE GUIDELINES:
31
+ - Always start with emotional validation
32
+ - Offer specific, actionable suggestions
33
+ - Include safety planning when appropriate
34
+ - Reference real resources (hotlines, shelters, legal aid)
35
+ - End with an open question to continue the conversation
36
+ - Keep responses under 300 words
37
+
38
+ CRISIS RESPONSES:
39
+ If user expresses immediate danger, provide:
40
+ 1. National Domestic Violence Hotline: 800-799-7233
41
+ 2. Crisis Text Line: Text HOME to 741741
42
+ 3. Emergency local resources"""
43
+
44
+ def generate_response(self, user_message: str, context: Dict[str, Any]) -> str:
45
+ """Generate empathetic, trauma-informed response using DeepSeek"""
46
+
47
+ # Build context-aware prompt
48
+ tension = context.get('v_t', 0)
49
+ conversation_history = context.get('history', [])
50
+
51
+ # Adjust approach based on tension level
52
+ if tension > 0.7:
53
+ safety_note = "USER IS IN HIGH DISTRESS - Prioritize safety and crisis resources."
54
+ elif tension > 0.4:
55
+ safety_note = "User shows elevated tension - Focus on grounding and emotional regulation."
56
+ else:
57
+ safety_note = "Proceed with standard supportive conversation."
58
+
59
+ messages = [
60
+ {"role": "system", "content": f"{self.system_prompt}\n\nCurrent Context: {safety_note}"},
61
+ ]
62
+
63
+ # Add conversation history if available
64
+ for msg in conversation_history[-4:]: # Last 4 exchanges
65
+ messages.append({"role": "user", "content": msg.get('input', '')})
66
+ messages.append({"role": "assistant", "content": msg.get('response', '')})
67
+
68
+ messages.append({"role": "user", "content": user_message})
69
+
70
+ try:
71
+ payload = {
72
+ "model": "deepseek-chat",
73
+ "messages": messages,
74
+ "temperature": 0.7,
75
+ "max_tokens": 500,
76
+ "stream": False
77
+ }
78
+
79
+ response = requests.post(self.base_url, headers=self.headers, json=payload, timeout=30)
80
+ response.raise_for_status()
81
+
82
+ result = response.json()
83
+ return result['choices'][0]['message']['content']
84
+
85
+ except Exception as e:
86
+ print(f"❌ DeepSeek API error: {e}")
87
+ return self._fallback_response(user_message, context)
88
+
89
+ def _fallback_response(self, user_message: str, context: Dict[str, Any]) -> str:
90
+ """High-quality fallback when API fails"""
91
+ fallback_responses = {
92
+ 'crisis': "I hear that you're in a really difficult situation. Your safety is the most important thing right now. Please call the National Domestic Violence Hotline at 800-799-7233 - they can help you create a safety plan and connect you with local resources. You don't have to face this alone.",
93
+
94
+ 'emotional': "Thank you for sharing this with me. What you're experiencing sounds incredibly painful and overwhelming. Your feelings are completely valid. Would it help to talk about what support systems you have in place right now?",
95
+
96
+ 'practical': "That sounds like a really challenging situation to navigate. Let's break this down into small, manageable steps. What's one thing that would make you feel even slightly safer or more in control right now?",
97
+
98
+ 'boundaries': "It takes real strength to recognize when someone is crossing your boundaries. Your right to safety and respect is non-negotiable. Would you like to explore some ways to reinforce your boundaries in this situation?"
99
+ }
100
+
101
+ # Simple keyword matching for fallback
102
+ user_lower = user_message.lower()
103
+ if any(word in user_lower for word in ['hurt', 'danger', 'scared', 'afraid', 'emergency']):
104
+ return fallback_responses['crisis']
105
+ elif any(word in user_lower for word in ['sad', 'overwhelmed', 'anxious', 'stress', 'pain']):
106
+ return fallback_responses['emotional']
107
+ elif any(word in user_lower for word in ['what should', 'how to', 'need help', 'what do']):
108
+ return fallback_responses['practical']
109
+ elif any(word in user_lower for word in ['boundary', 'respect', 'said no', 'standing up']):
110
+ return fallback_responses['boundaries']
111
+ else:
112
+ return "I'm really hearing how difficult this is for you. Thank you for trusting me with this. Could you tell me a bit more about what kind of support would feel most helpful right now?"
components/emergent_rituals.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pragmatic Emergent Rituals for TRuCAL
3
+ Balances theoretical rigor with practical implementation.
4
+ """
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import numpy as np
8
+ from typing import Dict, Any, List, Optional, Tuple
9
+ from collections import defaultdict, deque
10
+ import time
11
+ from dataclasses import dataclass, field
12
+ import hashlib
13
+ import json
14
+ import random
15
+
16
+ @dataclass
17
+ class RitualPattern:
18
+ """Container for learned ritual patterns."""
19
+ embedding: torch.Tensor
20
+ count: int = 1
21
+ last_used: float = field(default_factory=time.time)
22
+ success_sum: float = 0.0
23
+ metadata: Dict[str, Any] = field(default_factory=dict)
24
+
25
+ class EmergentRituals:
26
+ """
27
+ Implements pragmatic pattern learning and ritual emergence with ethical constraints.
28
+ Focuses on stability and interpretability over theoretical purity.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ embedding_dim: int = 768, # Standard BERT-like embedding size
34
+ min_occurrences: int = 3,
35
+ learning_rate: float = 0.1,
36
+ max_patterns: int = 1000,
37
+ similarity_threshold: float = 0.85,
38
+ ledger = None
39
+ ):
40
+ self.embedding_dim = embedding_dim
41
+ self.min_occurrences = min_occurrences
42
+ self.learning_rate = learning_rate
43
+ self.max_patterns = max_patterns
44
+ self.similarity_threshold = similarity_threshold
45
+ self.ledger = ledger
46
+
47
+ # Core pattern storage
48
+ self.patterns: Dict[str, RitualPattern] = {}
49
+ self.pattern_similarity = defaultdict(list)
50
+
51
+ # Performance tracking
52
+ self.stats = {
53
+ 'patterns_created': 0,
54
+ 'patterns_merged': 0,
55
+ 'patterns_pruned': 0,
56
+ 'total_rituals': 0
57
+ }
58
+
59
+ # Ethical constraints
60
+ self.ethical_boundaries = {
61
+ 'safety': 0.8,
62
+ 'fairness': 0.7,
63
+ 'privacy': 0.9
64
+ }
65
+
66
+ def _get_context_hash(self, context: Any) -> str:
67
+ """Create deterministic hash from context."""
68
+ if isinstance(context, str):
69
+ context_str = context
70
+ else:
71
+ context_str = json.dumps(context, sort_keys=True)
72
+ return hashlib.sha256(context_str.encode()).hexdigest()[:16]
73
+
74
+ def _cosine_similarity(self, a: torch.Tensor, b: torch.Tensor) -> float:
75
+ """Efficient cosine similarity with PyTorch."""
76
+ return F.cosine_similarity(a.unsqueeze(0), b.unsqueeze(0)).item()
77
+
78
+ def _find_similar_pattern(
79
+ self,
80
+ embedding: torch.Tensor,
81
+ threshold: Optional[float] = None
82
+ ) -> Optional[Tuple[str, float]]:
83
+ """
84
+ Find most similar existing pattern above threshold.
85
+ Returns (pattern_id, similarity_score) or None if no match found.
86
+ """
87
+ threshold = threshold or self.similarity_threshold
88
+ best_match = None
89
+ best_score = -1.0
90
+
91
+ for pattern_id, pattern in self.patterns.items():
92
+ score = self._cosine_similarity(embedding, pattern.embedding)
93
+ if score > best_score and score >= threshold:
94
+ best_score = score
95
+ best_match = pattern_id
96
+
97
+ return (best_match, best_score) if best_match else None
98
+
99
+ def observe(
100
+ self,
101
+ context: Any,
102
+ response_embedding: torch.Tensor,
103
+ success_metric: float = 0.5,
104
+ ethical_scores: Optional[Dict[str, float]] = None,
105
+ metadata: Optional[Dict[str, Any]] = None
106
+ ) -> Optional[str]:
107
+ """
108
+ Observe and learn from a new interaction.
109
+
110
+ Args:
111
+ context: The input context (text, hash, or any hashable)
112
+ response_embedding: Embedding of the model's response
113
+ success_metric: Success metric (0.0 to 1.0)
114
+ ethical_scores: Optional ethical constraint scores
115
+ metadata: Additional metadata about the interaction
116
+
117
+ Returns:
118
+ The pattern ID that was created or updated, or None if skipped due to ethical constraints
119
+ """
120
+ # Input validation
121
+ if not isinstance(response_embedding, torch.Tensor):
122
+ response_embedding = torch.tensor(response_embedding, dtype=torch.float32)
123
+
124
+ if response_embedding.dim() > 1:
125
+ response_embedding = response_embedding.mean(dim=0)
126
+
127
+ # Normalize for stability
128
+ response_embedding = F.normalize(response_embedding, dim=0)
129
+
130
+ # Check ethical constraints
131
+ if ethical_scores:
132
+ for constraint, score in ethical_scores.items():
133
+ if constraint in self.ethical_boundaries:
134
+ if score < self.ethical_boundaries[constraint]:
135
+ # Skip learning from unethical patterns
136
+ return None
137
+
138
+ # Get or create pattern
139
+ context_hash = self._get_context_hash(context)
140
+
141
+ # Check for similar patterns
142
+ similar = self._find_similar_pattern(response_embedding)
143
+
144
+ if similar:
145
+ pattern_id, similarity = similar
146
+ pattern = self.patterns[pattern_id]
147
+
148
+ # Update existing pattern
149
+ alpha = self.learning_rate * success_metric
150
+ pattern.embedding = (1 - alpha) * pattern.embedding + alpha * response_embedding
151
+ pattern.count += 1
152
+ pattern.success_sum += success_metric
153
+ pattern.last_used = time.time()
154
+ if metadata:
155
+ pattern.metadata.update(metadata)
156
+
157
+ self.stats['patterns_merged'] += 1
158
+ return pattern_id
159
+ else:
160
+ # Create new pattern
161
+ if len(self.patterns) >= self.max_patterns:
162
+ self._prune_patterns()
163
+
164
+ pattern_id = f"ritual_{len(self.patterns)}_{int(time.time())}"
165
+ self.patterns[pattern_id] = RitualPattern(
166
+ embedding=response_embedding,
167
+ count=1,
168
+ success_sum=success_metric,
169
+ metadata=metadata or {}
170
+ )
171
+
172
+ self.stats['patterns_created'] += 1
173
+ return pattern_id
174
+
175
+ def get_ritual_response(
176
+ self,
177
+ context: Any,
178
+ default_response: torch.Tensor,
179
+ min_confidence: float = 0.6,
180
+ blend_ratio: float = 0.3
181
+ ) -> torch.Tensor:
182
+ """
183
+ Get a ritualized response if a strong pattern exists.
184
+
185
+ Args:
186
+ context: The input context
187
+ default_response: Default model response to fall back to
188
+ min_confidence: Minimum confidence to use a ritual
189
+ blend_ratio: How much to blend ritual with default (0.0 to 1.0)
190
+
191
+ Returns:
192
+ Blended response tensor
193
+ """
194
+ context_hash = self._get_context_hash(context)
195
+
196
+ # Find best matching pattern
197
+ best_pattern = None
198
+ best_score = 0.0
199
+
200
+ for pattern in self.patterns.values():
201
+ if pattern.count < self.min_occurrences:
202
+ continue
203
+
204
+ # Simple confidence based on usage and success
205
+ confidence = (pattern.success_sum / pattern.count) * (1 - 1/(1 + pattern.count))
206
+
207
+ if confidence > best_score and confidence >= min_confidence:
208
+ best_score = confidence
209
+ best_pattern = pattern
210
+
211
+ if best_pattern:
212
+ # Update last used time
213
+ best_pattern.last_used = time.time()
214
+
215
+ # Blend with default response
216
+ ritual_response = best_pattern.embedding
217
+ if ritual_response.shape != default_response.shape:
218
+ # Simple broadcasting for now
219
+ ritual_response = ritual_response.expand_as(default_response)
220
+
221
+ return (1 - blend_ratio) * default_response + blend_ratio * ritual_response
222
+ else:
223
+ return default_response
224
+
225
+ def should_apply_ritual(self, context: Any, min_confidence: float = 0.5) -> bool:
226
+ """
227
+ Determine if a ritual should be applied based on confidence.
228
+
229
+ Args:
230
+ context: The input context
231
+ min_confidence: Minimum confidence threshold
232
+
233
+ Returns:
234
+ bool: True if a ritual should be applied
235
+ """
236
+ context_hash = self._get_context_hash(context)
237
+
238
+ # Check if we have a matching pattern with sufficient confidence
239
+ for pattern in self.patterns.values():
240
+ if pattern.count >= self.min_occurrences:
241
+ confidence = (pattern.success_sum / pattern.count) * (1 - 1/(1 + pattern.count))
242
+ if confidence >= min_confidence:
243
+ # Higher confidence = more likely to apply
244
+ return random.random() < confidence
245
+
246
+ return False
247
+
248
+ def _prune_patterns(self, min_usage: int = 3, max_age_days: int = 30) -> int:
249
+ """
250
+ Remove infrequently used or old patterns.
251
+
252
+ Returns:
253
+ Number of patterns pruned
254
+ """
255
+ now = time.time()
256
+ max_age_seconds = max_age_days * 24 * 3600
257
+ to_remove = []
258
+
259
+ for pattern_id, pattern in self.patterns.items():
260
+ if (pattern.count <= min_usage and
261
+ (now - pattern.last_used) > max_age_seconds):
262
+ to_remove.append(pattern_id)
263
+
264
+ for pattern_id in to_remove:
265
+ del self.patterns[pattern_id]
266
+
267
+ self.stats['patterns_pruned'] += len(to_remove)
268
+ return len(to_remove)
269
+
270
+ def get_stats(self) -> Dict[str, Any]:
271
+ """Get current statistics and metrics."""
272
+ active_patterns = len(self.patterns)
273
+ total_usage = sum(p.count for p in self.patterns.values())
274
+ avg_success = np.mean([p.success_sum/p.count for p in self.patterns.values()]) if self.patterns else 0
275
+
276
+ return {
277
+ **self.stats,
278
+ 'active_patterns': active_patterns,
279
+ 'total_usage': total_usage,
280
+ 'avg_success_rate': avg_success,
281
+ 'ethical_boundaries': self.ethical_boundaries
282
+ }
283
+
284
+ def save_state(self, filepath: str) -> bool:
285
+ """Save current state to disk."""
286
+ try:
287
+ state = {
288
+ 'patterns': {
289
+ pid: {
290
+ 'embedding': p.embedding.tolist(),
291
+ 'count': p.count,
292
+ 'last_used': p.last_used,
293
+ 'success_sum': p.success_sum,
294
+ 'metadata': p.metadata
295
+ }
296
+ for pid, p in self.patterns.items()
297
+ },
298
+ 'stats': self.stats,
299
+ 'version': '1.0',
300
+ 'config': {
301
+ 'embedding_dim': self.embedding_dim,
302
+ 'min_occurrences': self.min_occurrences,
303
+ 'learning_rate': self.learning_rate,
304
+ 'max_patterns': self.max_patterns,
305
+ 'similarity_threshold': self.similarity_threshold
306
+ }
307
+ }
308
+
309
+ with open(filepath, 'w') as f:
310
+ json.dump(state, f)
311
+ return True
312
+
313
+ except Exception as e:
314
+ print(f"Error saving state: {e}")
315
+ return False
316
+
317
+ @classmethod
318
+ def load_state(cls, filepath: str, **kwargs) -> 'EmergentRituals':
319
+ """Load state from disk."""
320
+ try:
321
+ with open(filepath, 'r') as f:
322
+ state = json.load(f)
323
+
324
+ # Get config from saved state or use defaults
325
+ config = state.get('config', {})
326
+ # Allow overrides from kwargs
327
+ for key, value in kwargs.items():
328
+ if key in config:
329
+ config[key] = value
330
+
331
+ # Create new instance with saved patterns
332
+ instance = cls(**config)
333
+ instance.patterns = {
334
+ pid: RitualPattern(
335
+ embedding=torch.tensor(data['embedding'], dtype=torch.float32),
336
+ count=data['count'],
337
+ last_used=data['last_used'],
338
+ success_sum=data['success_sum'],
339
+ metadata=data.get('metadata', {})
340
+ )
341
+ for pid, data in state['patterns'].items()
342
+ }
343
+
344
+ instance.stats = state.get('stats', {})
345
+ return instance
346
+
347
+ except Exception as e:
348
+ print(f"Error loading state: {e}")
349
+ return cls(**kwargs) # Return fresh instance on error
components/ethical_learner.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ethical Learner Module for TRuCAL
3
+
4
+ Integrates the RecursiveLearner with the existing EthicalProcessor
5
+ for a complete ethical reasoning and learning system.
6
+ """
7
+
8
+ from typing import Dict, Any, Optional, Tuple
9
+ import torch
10
+ from .ethical_processor import EthicalProcessor
11
+ from .recursive_learner import RecursiveLearner
12
+
13
+ class EthicalLearner:
14
+ """
15
+ A unified ethical reasoning system that combines:
16
+ - Rule-based ethical frameworks (from EthicalProcessor)
17
+ - Case-based learning (from RecursiveLearner)
18
+ """
19
+
20
+ def __init__(self,
21
+ casebase_path: str = 'casebase.json',
22
+ similarity_threshold: float = 0.6,
23
+ model_name: str = 'all-MiniLM-L6-v2',
24
+ d_model: int = 512):
25
+ """
26
+ Initialize the ethical learner.
27
+
28
+ Args:
29
+ casebase_path: Path to save/load the casebase
30
+ similarity_threshold: Minimum similarity score (0-1) to consider a match
31
+ model_name: Name of the sentence transformer model to use for embeddings
32
+ d_model: Dimensionality of the ethical processor model
33
+ """
34
+ self.ethical_processor = EthicalProcessor(d_model=d_model)
35
+ self.recursive_learner = RecursiveLearner(
36
+ casebase_path=casebase_path,
37
+ similarity_threshold=similarity_threshold,
38
+ model_name=model_name
39
+ )
40
+
41
+ def process_query(self, query: str, context: Optional[Dict] = None) -> Dict[str, Any]:
42
+ """
43
+ Process an ethical query using both rule-based and case-based reasoning.
44
+
45
+ Args:
46
+ query: The ethical question or scenario
47
+ context: Additional context for the query
48
+
49
+ Returns:
50
+ Dictionary containing the response and metadata
51
+ """
52
+ # First try to get a response from the case-based learner
53
+ case_response, case_metadata = self.recursive_learner.get_response(query)
54
+
55
+ # If we have a good match, use it
56
+ if case_metadata.get('similarity', 0) >= self.recursive_learner.similarity_threshold:
57
+ # Apply ethical processing to the case response
58
+ ethical_analysis = self.ethical_processor.analyze(query)
59
+
60
+ return {
61
+ 'response': case_response,
62
+ 'source': 'case_based',
63
+ 'confidence': case_metadata['similarity'],
64
+ 'case_id': case_metadata.get('case_id'),
65
+ 'ethical_analysis': ethical_analysis,
66
+ 'metadata': {
67
+ 'case_metadata': case_metadata,
68
+ 'ethical_frameworks': self.ethical_processor.get_active_frameworks()
69
+ }
70
+ }
71
+
72
+ # If no good case match, use the ethical processor
73
+ ethical_response = self.ethical_processor.process(query, context or {})
74
+
75
+ return {
76
+ 'response': ethical_response,
77
+ 'source': 'rule_based',
78
+ 'confidence': 1.0, # High confidence in rule-based responses
79
+ 'metadata': {
80
+ 'ethical_frameworks': self.ethical_processor.get_active_frameworks(),
81
+ 'development_phase': self.ethical_processor.get_development_phase()
82
+ }
83
+ }
84
+
85
+ def add_case(self, question: str, response: str, tags: List[str] = None,
86
+ metadata: Optional[Dict] = None) -> Dict:
87
+ """
88
+ Add a new case to the casebase.
89
+
90
+ Args:
91
+ question: The ethical question or scenario
92
+ response: The response or analysis
93
+ tags: Optional list of tags for categorization
94
+ metadata: Additional metadata about the case
95
+
96
+ Returns:
97
+ Dictionary with status and case information
98
+ """
99
+ case = self.recursive_learner.add_case(
100
+ question=question,
101
+ response=response,
102
+ tags=tags,
103
+ metadata=metadata or {}
104
+ )
105
+
106
+ return {
107
+ 'status': 'success',
108
+ 'case_id': id(case),
109
+ 'similarity': 1.0, # New case, so perfect match to itself
110
+ 'total_cases': len(self.recursive_learner.casebase)
111
+ }
112
+
113
+ def provide_feedback(self, case_id: int, was_helpful: bool):
114
+ """
115
+ Provide feedback on a case's helpfulness.
116
+
117
+ Args:
118
+ case_id: The ID of the case
119
+ was_helpful: Whether the response was helpful
120
+ """
121
+ self.recursive_learner.provide_feedback(case_id, was_helpful)
122
+
123
+ def get_stats(self) -> Dict[str, Any]:
124
+ """Get statistics about the ethical learner."""
125
+ stats = self.recursive_learner.get_stats()
126
+ stats.update({
127
+ 'ethical_frameworks': self.ethical_processor.get_active_frameworks(),
128
+ 'development_phase': self.ethical_processor.get_development_phase()
129
+ })
130
+ return stats
131
+
132
+ def save(self):
133
+ """Save the current state of the learner."""
134
+ self.recursive_learner._save_casebase()
135
+
136
+ def load(self):
137
+ """Load the saved state of the learner."""
138
+ self.recursive_learner._load_casebase()
139
+
140
+
141
+ # Example usage
142
+ if __name__ == "__main__":
143
+ # Initialize the ethical learner
144
+ learner = EthicalLearner()
145
+
146
+ # Example query
147
+ query = "Is it ethical to use personal data for targeted advertising?"
148
+ result = learner.process_query(query)
149
+
150
+ print(f"Query: {query}")
151
+ print(f"Response: {result['response']}")
152
+ print(f"Source: {result['source']}")
153
+ print(f"Confidence: {result['confidence']:.2f}")
154
+
155
+ # Add a new case
156
+ print("\nAdding new case...")
157
+ learner.add_case(
158
+ question="What are the ethics of data privacy in AI?",
159
+ response=("Data privacy in AI involves balancing innovation with individual rights. "
160
+ "Key considerations include informed consent, data minimization, purpose limitation, "
161
+ "and ensuring transparency about how data is used."),
162
+ tags=["privacy", "AI", "ethics"]
163
+ )
164
+
165
+ # Get stats
166
+ print("\nLearner statistics:")
167
+ print(learner.get_stats())
components/ethical_processor.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from collections import deque
4
+ import time
5
+
6
+ class EthicalProcessor:
7
+ """Enhanced ethical processor with multi-dimensional moral reasoning and developmental tracking"""
8
+
9
+ def __init__(self, d_model=512):
10
+ self.d_model = d_model
11
+
12
+ # Expanded moral frameworks with cultural awareness
13
+ self.moral_frameworks = {
14
+ 'deontological': {'weight': 0.25, 'focus': 'duties', 'cultural_variants': ['kantian', 'contractarian']},
15
+ 'utilitarian': {'weight': 0.25, 'focus': 'consequences', 'cultural_variants': ['classic', 'preference']},
16
+ 'virtue_ethics': {'weight': 0.20, 'focus': 'character', 'cultural_variants': ['aristotelian', 'confucian']},
17
+ 'care_ethics': {'weight': 0.15, 'focus': 'relationships', 'cultural_variants': ['feminist', 'communal']},
18
+ 'rights_based': {'weight': 0.15, 'focus': 'entitlements', 'cultural_variants': ['natural', 'legal']}
19
+ }
20
+
21
+ # Developmental tracking with cross-cultural awareness
22
+ self.development_phases = {
23
+ 'pre_conventional': {
24
+ 'level': 1,
25
+ 'focus': 'self_interest',
26
+ 'cultural_expression': {
27
+ 'western': 'avoiding punishment',
28
+ 'eastern': 'maintaining harmony',
29
+ 'indigenous': 'tribal belonging'
30
+ }
31
+ },
32
+ 'conventional': {
33
+ 'level': 2,
34
+ 'focus': 'social_norms',
35
+ 'cultural_expression': {
36
+ 'western': 'law_and_order',
37
+ 'eastern': 'filial_piety',
38
+ 'indigenous': 'ancestral_traditions'
39
+ }
40
+ },
41
+ 'post_conventional': {
42
+ 'level': 3,
43
+ 'focus': 'principled_reasoning',
44
+ 'cultural_expression': {
45
+ 'western': 'universal_principles',
46
+ 'eastern': 'cosmic_harmony',
47
+ 'indigenous': 'earth_stewardship'
48
+ }
49
+ }
50
+ }
51
+
52
+ # Learnable ethical reasoning components
53
+ self.ethical_encoder = nn.Sequential(
54
+ nn.Linear(d_model, d_model // 2),
55
+ nn.ReLU(),
56
+ nn.LayerNorm(d_model // 2),
57
+ nn.Linear(d_model // 2, 256),
58
+ nn.Tanh()
59
+ )
60
+
61
+ # Cultural context awareness
62
+ self.cultural_context_weights = nn.Parameter(torch.ones(3)) # western, eastern, indigenous
63
+
64
+ # Moral development tracking
65
+ self.moral_development_history = deque(maxlen=1000)
66
+
67
+ def process_question(self, question, context_str="", cultural_context="balanced"):
68
+ """Enhanced ethical processing with cultural and developmental awareness"""
69
+ if not question:
70
+ return self._get_default_metadata()
71
+
72
+ # Multi-dimensional ethical analysis
73
+ ethical_dims = self._analyze_ethical_dimensions(question, context_str)
74
+ cultural_weights = self._get_cultural_weights(cultural_context)
75
+ moral_tension = self._calculate_moral_tension(ethical_dims, cultural_weights)
76
+ development_phase = self._determine_development_phase(moral_tension, ethical_dims)
77
+
78
+ # Generate culturally-aware response
79
+ response = self._generate_culturally_aware_response(question, ethical_dims, cultural_weights)
80
+
81
+ # Track moral development
82
+ self._track_moral_development(ethical_dims, moral_tension, development_phase)
83
+
84
+ return {
85
+ 'moral_tension': float(moral_tension),
86
+ 'development_phase': development_phase,
87
+ 'ethical_dimensions': ethical_dims,
88
+ 'response_text': response,
89
+ 'cultural_context': cultural_context,
90
+ 'framework_weights': {k: v['weight'] for k, v in self.moral_frameworks.items()},
91
+ 'processed': True,
92
+ 'developmental_level': self.development_phases[development_phase]['level']
93
+ }
94
+
95
+ def _get_default_metadata(self):
96
+ return {
97
+ 'moral_tension': 0.0,
98
+ 'development_phase': 'conventional',
99
+ 'ethical_dimensions': {},
100
+ 'response_text': "I'm here to help with ethical considerations. What would you like to discuss?",
101
+ 'cultural_context': 'universal',
102
+ 'framework_weights': {k: v['weight'] for k, v in self.moral_frameworks.items()},
103
+ 'processed': False,
104
+ 'developmental_level': 2
105
+ }
106
+
107
+ def _analyze_ethical_dimensions(self, question, context_str):
108
+ """Multi-dimensional ethical analysis with contextual awareness"""
109
+ question_lower = question.lower()
110
+ context_lower = context_str.lower()
111
+
112
+ dims = {
113
+ 'honesty_vs_protection': 0.0,
114
+ 'rules_vs_harm': 0.0,
115
+ 'individual_vs_community': 0.0,
116
+ 'rights_vs_consequences': 0.0,
117
+ 'autonomy_vs_care': 0.0,
118
+ 'justice_vs_mercy': 0.0,
119
+ 'tradition_vs_progress': 0.0,
120
+ 'loyalty_vs_truth': 0.0
121
+ }
122
+
123
+ # Enhanced pattern recognition
124
+ ethical_patterns = {
125
+ 'honesty_vs_protection': ['lie', 'truth', 'protect', 'deceive', 'honest'],
126
+ 'rules_vs_harm': ['rules', 'harm', 'break', 'follow', 'hurt'],
127
+ 'individual_vs_community': ['self', 'others', 'community', 'individual', 'society'],
128
+ 'autonomy_vs_care': ['freedom', 'care', 'independence', 'nurture', 'control'],
129
+ 'justice_vs_mercy': ['justice', 'mercy', 'fair', 'forgive', 'punish']
130
+ }
131
+
132
+ for dimension, patterns in ethical_patterns.items():
133
+ matches = [p for p in patterns if p in question_lower or p in context_lower]
134
+ if matches:
135
+ dims[dimension] = min(0.9, len(matches) * 0.3)
136
+
137
+ return dims
138
+
139
+ def _get_cultural_weights(self, cultural_context):
140
+ """Get cultural weighting based on context"""
141
+ base_weights = {'western': 0.33, 'eastern': 0.33, 'indigenous': 0.34}
142
+
143
+ if cultural_context == "western":
144
+ return {'western': 0.6, 'eastern': 0.2, 'indigenous': 0.2}
145
+ elif cultural_context == "eastern":
146
+ return {'western': 0.2, 'eastern': 0.6, 'indigenous': 0.2}
147
+ elif cultural_context == "indigenous":
148
+ return {'western': 0.2, 'eastern': 0.2, 'indigenous': 0.6}
149
+ else:
150
+ return base_weights
151
+
152
+ def _calculate_moral_tension(self, ethical_dims, cultural_weights):
153
+ """Calculate moral tension with cultural sensitivity"""
154
+ if not ethical_dims:
155
+ return 0.0
156
+
157
+ # Weight tensions by cultural context
158
+ western_focus = ['individual_vs_community', 'rights_vs_consequences']
159
+ eastern_focus = ['harmony_vs_truth', 'community_vs_individual']
160
+ indigenous_focus = ['earth_vs_progress', 'ancestral_vs_modern']
161
+
162
+ cultural_tensions = {
163
+ 'western': sum(ethical_dims.get(d, 0) for d in western_focus) / len(western_focus),
164
+ 'eastern': sum(ethical_dims.get(d, 0) for d in eastern_focus) / len(eastern_focus),
165
+ 'indigenous': sum(ethical_dims.get(d, 0) for d in indigenous_focus) / len(indigenous_focus)
166
+ }
167
+
168
+ # Weighted average across cultures
169
+ total_tension = sum(cultural_tensions[c] * cultural_weights[c] for c in cultural_weights)
170
+ return min(1.0, total_tension * 1.5) # Scale for sensitivity
171
+
172
+ def _determine_development_phase(self, moral_tension, ethical_dims):
173
+ """Determine developmental phase with complexity awareness"""
174
+ complexity = len([d for d in ethical_dims.values() if d > 0.3])
175
+
176
+ if moral_tension < 0.3 or complexity < 2:
177
+ return 'pre_conventional'
178
+ elif moral_tension < 0.7 or complexity < 4:
179
+ return 'conventional'
180
+ else:
181
+ return 'post_conventional'
182
+
183
+ def _generate_culturally_aware_response(self, question, ethical_dims, cultural_weights):
184
+ """Generate response that respects multiple cultural perspectives"""
185
+ primary_tension = max(ethical_dims.items(), key=lambda x: x[1]) if ethical_dims else (None, 0)
186
+
187
+ # Cultural response templates
188
+ cultural_responses = {
189
+ 'western': {
190
+ 'honesty_vs_protection': "From a rights-based perspective, truth-telling is fundamental, though consequences must be weighed carefully.",
191
+ 'rules_vs_harm': "The tension between legal principles and preventing harm requires examining both contractual duties and outcomes.",
192
+ 'default': "This situation requires careful ethical consideration balancing individual rights with broader consequences."
193
+ },
194
+ 'eastern': {
195
+ 'honesty_vs_protection': "In many Eastern traditions, maintaining social harmony and protecting relationships may sometimes temper absolute honesty.",
196
+ 'rules_vs_harm': "The way of virtue often considers both the letter and spirit of rules, emphasizing compassion in application.",
197
+ 'default': "This situation invites reflection on maintaining harmony while considering the greater good."
198
+ },
199
+ 'indigenous': {
200
+ 'honesty_vs_protection': "Many indigenous wisdom traditions value truth as part of right relationship with all beings, while recognizing protective responsibilities.",
201
+ 'rules_vs_harm': "Ancestral teachings often emphasize that rules should serve life and community wellbeing, adapting when they cause harm.",
202
+ 'default': "This calls for wisdom in balancing individual needs with the wellbeing of the community and all our relations."
203
+ }
204
+ }
205
+
206
+ # Combine cultural perspectives
207
+ responses = []
208
+ for culture, weight in cultural_weights.items():
209
+ if weight > 0.2: # Only include significant cultural perspectives
210
+ if primary_tension[0]:
211
+ response = cultural_responses[culture].get(
212
+ primary_tension[0],
213
+ cultural_responses[culture]['default']
214
+ )
215
+ else:
216
+ response = cultural_responses[culture]['default']
217
+ responses.append((response, weight))
218
+
219
+ if responses:
220
+ # Sort by weight and take top responses
221
+ responses.sort(key=lambda x: x[1], reverse=True)
222
+ return " ".join([f"({i+1}) {r[0]}" for i, r in enumerate(responses[:2])])
223
+
224
+ return "This situation invites reflection across multiple ethical frameworks and cultural perspectives."
225
+
226
+ def _track_moral_development(self, ethical_dims, moral_tension, phase):
227
+ """Track moral development over time"""
228
+ entry = {
229
+ 'timestamp': time.time(),
230
+ 'ethical_complexity': len(ethical_dims),
231
+ 'moral_tension': moral_tension,
232
+ 'phase': phase,
233
+ 'primary_dimension': max(ethical_dims.items(), key=lambda x: x[1])[0] if ethical_dims else None
234
+ }
235
+ self.moral_development_history.append(entry)
236
+
237
+ def get_developmental_trajectory(self):
238
+ """Analyze moral development over time"""
239
+ if not self.moral_development_history:
240
+ return {'trend': 'unknown', 'complexity_growth': 0, 'phase_transitions': 0}
241
+
242
+ recent = list(self.moral_development_history)[-10:]
243
+ phases = [e['phase'] for e in recent]
244
+
245
+ return {
246
+ 'trend': 'developing' if len(set(phases)) > 1 else 'stable',
247
+ 'average_complexity': sum(e['ethical_complexity'] for e in recent) / len(recent),
248
+ 'current_phase': phases[-1] if phases else 'unknown',
249
+ 'phase_transitions': len(set(phases)) - 1
250
+ }
components/ethical_reasoner.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ethical reasoning module for TRuCAL.
3
+ Implements a framework-aware ethical reasoning system that analyzes questions
4
+ using multiple ethical frameworks (deontological, utilitarian, virtue ethics).
5
+ """
6
+ import random
7
+ from typing import Dict, List, Optional
8
+
9
+ class EthicalReasoner:
10
+ """
11
+ An advanced ethical reasoning system that analyzes questions using multiple
12
+ ethical frameworks to provide nuanced ethical analysis.
13
+ """
14
+ def __init__(self):
15
+ self.ethical_frameworks = {
16
+ 'deontological': {
17
+ 'principles': ['duty', 'rules', 'moral laws', 'categorical imperative'],
18
+ 'weight': 0.3
19
+ },
20
+ 'utilitarian': {
21
+ 'principles': ['consequences', 'greatest good', 'happiness', 'utility'],
22
+ 'weight': 0.4
23
+ },
24
+ 'virtue_ethics': {
25
+ 'principles': ['character', 'virtues', 'wisdom', 'flourishing'],
26
+ 'weight': 0.3
27
+ }
28
+ }
29
+
30
+ self.ethical_dilemmas = {
31
+ 'lying_protection': {
32
+ 'tension': 0.8,
33
+ 'frameworks': {
34
+ 'deontological': "Generally opposes lying as it violates the duty of truth-telling",
35
+ 'utilitarian': "May permit lying if it produces better consequences than truth-telling",
36
+ 'virtue_ethics': "Examines what kind of character lying cultivates"
37
+ }
38
+ },
39
+ 'harmful_rules': {
40
+ 'tension': 0.9,
41
+ 'frameworks': {
42
+ 'deontological': "Rules should be followed unless they conflict with higher duties",
43
+ 'utilitarian': "Rules causing harm should be re-evaluated based on consequences",
44
+ 'virtue_ethics': "Wisdom is needed to know when to uphold or challenge rules"
45
+ }
46
+ },
47
+ 'rights_community': {
48
+ 'tension': 0.7,
49
+ 'frameworks': {
50
+ 'deontological': "Individual rights create duties that must be respected",
51
+ 'utilitarian': "Balance individual and community welfare for greatest good",
52
+ 'virtue_ethics': "Cultivate justice and compassion in balancing interests"
53
+ }
54
+ }
55
+ }
56
+
57
+ def analyze_ethical_dimensions(self, question: str) -> Dict:
58
+ """Analyze the ethical content of the question.
59
+
60
+ Args:
61
+ question: The ethical question or dilemma to analyze
62
+
63
+ Returns:
64
+ Dict containing analysis results including detected dilemmas and moral tension
65
+ """
66
+ if not question or not question.strip():
67
+ return {
68
+ 'detected_dilemmas': [],
69
+ 'moral_tension': 0.0,
70
+ 'complexity': 0
71
+ }
72
+
73
+ question_lower = question.lower()
74
+ detected_dilemmas = []
75
+ moral_tension = 0.0
76
+
77
+ # Detect specific ethical dilemmas
78
+ if any(word in question_lower for word in ['lie', 'deceive', 'false', 'truth']):
79
+ if any(word in question_lower for word in ['protect', 'save', 'help', 'harm']):
80
+ detected_dilemmas.append('lying_protection')
81
+ moral_tension = max(moral_tension, self.ethical_dilemmas['lying_protection']['tension'])
82
+
83
+ if any(word in question_lower for word in ['harm', 'hurt', 'damage', 'suffer']):
84
+ if any(word in question_lower for word in ['rule', 'law', 'follow', 'obey']):
85
+ detected_dilemmas.append('harmful_rules')
86
+ moral_tension = max(moral_tension, self.ethical_dilemmas['harmful_rules']['tension'])
87
+
88
+ if any(word in question_lower for word in ['right', 'freedom', 'individual']):
89
+ if any(word in question_lower for word in ['community', 'society', 'group', 'collective']):
90
+ detected_dilemmas.append('rights_community')
91
+ moral_tension = max(moral_tension, self.ethical_dilemmas['rights_community']['tension'])
92
+
93
+ return {
94
+ 'detected_dilemmas': detected_dilemmas,
95
+ 'moral_tension': moral_tension,
96
+ 'complexity': len(detected_dilemmas)
97
+ }
98
+
99
+ def generate_response(self, question: str) -> str:
100
+ """Generate an ethical analysis response.
101
+
102
+ Args:
103
+ question: The ethical question to analyze
104
+
105
+ Returns:
106
+ str: A thoughtful ethical analysis response
107
+ """
108
+ analysis = self.analyze_ethical_dimensions(question)
109
+
110
+ if not analysis['detected_dilemmas']:
111
+ return self._generate_thoughtful_default(question)
112
+
113
+ response_parts = []
114
+
115
+ # Introduction based on moral tension
116
+ tension_phrases = {
117
+ 0.7: "This raises significant ethical considerations",
118
+ 0.8: "This presents a profound ethical dilemma",
119
+ 0.9: "This touches on one of the most challenging ethical tensions"
120
+ }
121
+
122
+ intro = tension_phrases.get(
123
+ analysis['moral_tension'],
124
+ "This involves important ethical dimensions"
125
+ )
126
+ response_parts.append(f"**{intro}.**")
127
+
128
+ # Discuss each detected dilemma
129
+ for dilemma in analysis['detected_dilemmas']:
130
+ response_parts.append("\n**Key considerations:**")
131
+
132
+ # Include perspectives from different frameworks
133
+ for framework, perspective in self.ethical_dilemmas[dilemma]['frameworks'].items():
134
+ response_parts.append(f"- *{framework.title()}*: {perspective}")
135
+
136
+ # Add reflective conclusion
137
+ response_parts.append(
138
+ "\n**Reflection**: The complexity suggests there may not be a single right answer. "
139
+ "Different ethical frameworks offer valuable but sometimes conflicting insights."
140
+ )
141
+
142
+ return "\n".join(response_parts)
143
+
144
+ def _generate_thoughtful_default(self, question: str) -> str:
145
+ """Generate thoughtful responses for non-ethical questions.
146
+
147
+ Args:
148
+ question: The input question
149
+
150
+ Returns:
151
+ str: A thoughtful default response
152
+ """
153
+ thoughtful_responses = [
154
+ "I appreciate you raising this question. While it may not involve clear ethical dilemmas, "
155
+ "it's worth considering what values or principles might be at play.",
156
+
157
+ "This is an interesting question. From an ethical perspective, we might consider "
158
+ "what virtues or values are relevant to this situation.",
159
+
160
+ "Thank you for this question. Even when ethical dimensions aren't immediately obvious, "
161
+ "reflecting on our values and principles can provide valuable insights."
162
+ ]
163
+ return random.choice(thoughtful_responses)
164
+
165
+ # Singleton instance for easy import
166
+ ethical_reasoner = EthicalReasoner()
components/feedback_ingestion.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Feedback ingestion system for TRuCAL with trauma-aware processing.
3
+ """
4
+ from dataclasses import dataclass
5
+ from enum import Enum
6
+ from typing import Dict, List, Optional, Any
7
+ import re
8
+ import torch
9
+ from datetime import datetime, timedelta
10
+
11
+ @dataclass
12
+ class FeedbackConsent:
13
+ """Granular consent management for different feedback types."""
14
+ explicit: bool = False
15
+ implicit: bool = False
16
+ somatic: bool = False
17
+ clinical: bool = False
18
+ retention_days: int = 30 # Auto-delete after period
19
+
20
+ class FeedbackType(Enum):
21
+ """Types of feedback supported by the system."""
22
+ EXPLICIT = "explicit_rating"
23
+ IMPLICIT = "implicit_engagement"
24
+ SOMATIC = "somatic_biofeedback"
25
+ CLINICAL = "clinical_override"
26
+ CORRECTION = "safety_correction"
27
+
28
+ class TemporalPatternAnalyzer:
29
+ """Analyzes temporal patterns for trauma-aware learning."""
30
+
31
+ def __init__(self):
32
+ self.session_patterns = {}
33
+
34
+ def get_current_pattern(self) -> Dict[str, Any]:
35
+ """Get current temporal context."""
36
+ now = datetime.now()
37
+
38
+ return {
39
+ 'hour_of_day': now.hour,
40
+ 'is_weekend': now.weekday() >= 5,
41
+ 'seasonal_factor': self._get_seasonal_factor(now),
42
+ 'light_conditions': self._get_light_conditions(now)
43
+ }
44
+
45
+ def has_recent_trauma_discussion(self, session_id: str, window_hours: int = 24) -> bool:
46
+ """Check if session recently discussed trauma-related content."""
47
+ session_data = self.session_patterns.get(session_id, {})
48
+ last_trauma_time = session_data.get('last_trauma_discussion')
49
+ if not last_trauma_time:
50
+ return False
51
+
52
+ return (datetime.now() - last_trauma_time).total_seconds() < (window_hours * 3600)
53
+
54
+ def _get_seasonal_factor(self, dt: datetime) -> float:
55
+ """Calculate seasonal factor (0-1) based on date."""
56
+ # Simple sinusoidal approximation of seasonal variation
57
+ day_of_year = dt.timetuple().tm_yday
58
+ return 0.5 * (1 + np.sin(2 * np.pi * (day_of_year - 80) / 365))
59
+
60
+ def _get_light_conditions(self, dt: datetime) -> str:
61
+ """Get light conditions based on time of day."""
62
+ hour = dt.hour
63
+ if 6 <= hour < 18:
64
+ return 'daylight'
65
+ elif 18 <= hour < 21 or 5 <= hour < 6:
66
+ return 'twilight'
67
+ return 'dark'
68
+
69
+ class TraumaAwareFeedbackIngestor:
70
+ """Enhanced feedback processing with trauma awareness and consent management."""
71
+
72
+ def __init__(self, buffer_size: int = 1000, min_consent_age: int = 18):
73
+ """Initialize the feedback ingestor.
74
+
75
+ Args:
76
+ buffer_size: Maximum number of feedback items to keep in memory
77
+ min_consent_age: Minimum age for providing consent
78
+ """
79
+ self.feedback_buffer = []
80
+ self.buffer_size = buffer_size
81
+ self.min_consent_age = min_consent_age
82
+ self.consent_records = {} # session_id -> FeedbackConsent
83
+ self.temporal_patterns = TemporalPatternAnalyzer()
84
+ self.trauma_keywords = {
85
+ 'abuse', 'assault', 'trauma', 'ptsd', 'trigger', 'flashback',
86
+ 'violence', 'attack', 'victim', 'survivor', 'harassment'
87
+ }
88
+
89
+ def request_consent(self, session_id: str, consent_config: FeedbackConsent) -> bool:
90
+ """Request and store granular consent from user.
91
+
92
+ Args:
93
+ session_id: Unique identifier for the user session
94
+ consent_config: FeedbackConsent object with consent settings
95
+
96
+ Returns:
97
+ bool: True if consent was successfully recorded
98
+ """
99
+ if not self._verify_consent_capacity(session_id):
100
+ return False
101
+
102
+ self.consent_records[session_id] = consent_config
103
+ return True
104
+
105
+ def ingest_with_context(self, feedback: Dict[str, Any], session_id: str,
106
+ context_embedding: torch.Tensor) -> bool:
107
+ """Ingest feedback with contextual information.
108
+
109
+ Args:
110
+ feedback: Dictionary containing feedback data
111
+ session_id: Session identifier for consent verification
112
+ context_embedding: Contextual embedding of the interaction
113
+
114
+ Returns:
115
+ bool: True if feedback was successfully ingested
116
+ """
117
+ consent = self.consent_records.get(session_id)
118
+ if not consent or not self._check_granular_consent(feedback.get('type'), consent):
119
+ return False
120
+
121
+ # Enhanced context capture
122
+ feedback['context_embedding'] = context_embedding.cpu().numpy()
123
+ feedback['temporal_context'] = self.temporal_patterns.get_current_pattern()
124
+
125
+ # Trauma-aware processing
126
+ feedback = self._trauma_aware_sanitization(feedback)
127
+
128
+ # Risk assessment
129
+ risk_score = self._assess_feedback_risk(feedback, session_id)
130
+ if risk_score > 0.8:
131
+ self._trigger_safety_review(feedback, session_id)
132
+
133
+ # Add to buffer and maintain size
134
+ self.feedback_buffer.append(feedback)
135
+ self._maintain_buffer_integrity()
136
+ return True
137
+
138
+ def _verify_consent_capacity(self, session_id: str) -> bool:
139
+ """Verify if user has capacity to provide consent."""
140
+ # In a real implementation, this would check age, mental state, etc.
141
+ # This is a simplified version
142
+ return True
143
+
144
+ def _check_granular_consent(self, feedback_type: str, consent: FeedbackConsent) -> bool:
145
+ """Check if specific feedback type is covered by consent."""
146
+ if not feedback_type:
147
+ return False
148
+
149
+ feedback_enum = FeedbackType(feedback_type)
150
+
151
+ if feedback_enum == FeedbackType.EXPLICIT:
152
+ return consent.explicit
153
+ elif feedback_enum == FeedbackType.IMPLICIT:
154
+ return consent.implicit
155
+ elif feedback_enum == FeedbackType.SOMATIC:
156
+ return consent.somatic
157
+ elif feedback_enum == FeedbackType.CLINICAL:
158
+ return consent.clinical
159
+ elif feedback_enum == FeedbackType.CORRECTION:
160
+ return True # Always allow safety corrections
161
+
162
+ return False
163
+
164
+ def _trauma_aware_sanitization(self, feedback: Dict[str, Any]) -> Dict[str, Any]:
165
+ """Sanitize feedback while preserving therapeutic value."""
166
+ if 'content' in feedback:
167
+ content = feedback['content']
168
+ feedback['original_content'] = content # Keep original for clinical review
169
+
170
+ # Redact specific trauma details
171
+ if self._contains_trauma_content(content):
172
+ content = self._redact_specific_trauma_details(content)
173
+ content = self._generalize_emotional_content(content)
174
+ feedback['content'] = content
175
+ feedback['contains_trauma_content'] = True
176
+
177
+ if 'bio_signals' in feedback:
178
+ feedback['arousal_metrics'] = self._extract_arousal_metrics(feedback['bio_signals'])
179
+ del feedback['bio_signals'] # Remove raw signals
180
+
181
+ return feedback
182
+
183
+ def _contains_trauma_content(self, text: str) -> bool:
184
+ """Check if text contains trauma-related keywords."""
185
+ if not isinstance(text, str):
186
+ return False
187
+
188
+ text_lower = text.lower()
189
+ return any(keyword in text_lower for keyword in self.trauma_keywords)
190
+
191
+ def _redact_specific_trauma_details(self, text: str) -> str:
192
+ """Redact specific trauma details while preserving emotional content."""
193
+ # This is a simplified version - in practice, you'd want more sophisticated NLP
194
+ patterns = [
195
+ (r'\b(?:i was|i \w+ed|i felt) (?:\w+ ){0,3}(?:abused|assaulted|attacked|raped|beaten|harmed|threatened|harassed)\b',
196
+ '[trauma experience redacted]'),
197
+ (r'\b(?:he|she|they) (?:\w+ ){0,3}(?:abused|assaulted|attacked|raped|beat|harmed|threatened|harassed) (?:me|us|them)\b',
198
+ '[perpetrator action redacted]')
199
+ ]
200
+
201
+ for pattern, replacement in patterns:
202
+ text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
203
+
204
+ return text
205
+
206
+ def _generalize_emotional_content(self, text: str) -> str:
207
+ """Generalize emotional content to reduce triggers."""
208
+ # This is a simplified version - in practice, you'd want more sophisticated NLP
209
+ emotional_phrases = {
210
+ r'i felt (?:very |extremely |really )?(?:scared|terrified|frightened|fearful)': 'I felt very afraid',
211
+ r'i was (?:very |extremely |really )?(?:angry|furious|enraged)': 'I felt very angry',
212
+ r'i was (?:very |extremely |really )?(?:sad|depressed|miserable|hopeless)': 'I felt very sad'
213
+ }
214
+
215
+ for pattern, replacement in emotional_phrases.items():
216
+ text = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
217
+
218
+ return text
219
+
220
+ def _extract_arousal_metrics(self, bio_signals: Dict[str, float]) -> Dict[str, float]:
221
+ """Convert raw bio signals to normalized arousal metrics."""
222
+ metrics = {}
223
+
224
+ # Heart Rate Variability (HRV) - lower is more stressed
225
+ if 'hrv' in bio_signals:
226
+ # Normalize HRV (typical range 20-200ms)
227
+ hrv = max(20, min(200, bio_signals['hrv']))
228
+ metrics['hrv_norm'] = 1.0 - ((hrv - 20) / 180.0)
229
+
230
+ # Galvanic Skin Response (GSR) - higher is more aroused
231
+ if 'gsr' in bio_signals:
232
+ # Normalize GSR (assuming 0-1 range)
233
+ metrics['gsr_norm'] = max(0.0, min(1.0, bio_signals['gsr']))
234
+
235
+ # Calculate composite arousal score (0-1)
236
+ if 'hrv_norm' in metrics and 'gsr_norm' in metrics:
237
+ metrics['composite_arousal'] = (metrics['hrv_norm'] * 0.6 +
238
+ metrics['gsr_norm'] * 0.4)
239
+ elif 'hrv_norm' in metrics:
240
+ metrics['composite_arousal'] = metrics['hrv_norm']
241
+ elif 'gsr_norm' in metrics:
242
+ metrics['composite_arousal'] = metrics['gsr_norm']
243
+
244
+ return metrics
245
+
246
+ def _assess_feedback_risk(self, feedback: Dict[str, Any], session_id: str) -> float:
247
+ """Assess potential re-traumatization risk in feedback."""
248
+ risk_factors = []
249
+
250
+ # Recent trauma discussion pattern
251
+ if self.temporal_patterns.has_recent_trauma_discussion(session_id):
252
+ risk_factors.append(0.7)
253
+
254
+ # High arousal state
255
+ arousal = feedback.get('arousal_metrics', {}).get('composite_arousal', 0)
256
+ if arousal > 0.7:
257
+ risk_factors.append(min(arousal, 0.9)) # Cap at 0.9
258
+
259
+ # Negative feedback on supportive responses
260
+ if (feedback.get('type') == FeedbackType.EXPLICIT.value and
261
+ feedback.get('rating', 5) <= 2 and
262
+ self._was_response_supportive(feedback.get('response_id'))):
263
+ risk_factors.append(0.9)
264
+
265
+ return max(risk_factors) if risk_factors else 0.0
266
+
267
+ def _was_response_supportive(self, response_id: str) -> bool:
268
+ """Check if a response was intended to be supportive."""
269
+ # In a real implementation, this would check the response's classification
270
+ # For now, we'll assume all responses with ID starting with 'S' are supportive
271
+ return response_id and response_id.startswith('S')
272
+
273
+ def _trigger_safety_review(self, feedback: Dict[str, Any], session_id: str) -> None:
274
+ """Trigger a safety review for concerning feedback."""
275
+ # In a real implementation, this would notify a human reviewer
276
+ print(f"[SAFETY REVIEW] Session {session_id} triggered safety review for feedback: {feedback}")
277
+
278
+ def _maintain_buffer_integrity(self) -> None:
279
+ """Ensure feedback buffer doesn't exceed maximum size."""
280
+ while len(self.feedback_buffer) > self.buffer_size:
281
+ self.feedback_buffer.pop(0) # Remove oldest feedback
282
+
283
+ def get_feedback_batch(self, batch_size: int = 32) -> List[Dict[str, Any]]:
284
+ """Get a batch of feedback for model training."""
285
+ return self.feedback_buffer[-batch_size:] if self.feedback_buffer else []
286
+
287
+ def clear_expired_feedback(self) -> int:
288
+ """Remove feedback that has exceeded retention period.
289
+
290
+ Returns:
291
+ int: Number of feedback items removed
292
+ """
293
+ if not self.consent_records:
294
+ return 0
295
+
296
+ now = datetime.now()
297
+ initial_count = len(self.feedback_buffer)
298
+
299
+ # Filter out expired feedback
300
+ self.feedback_buffer = [
301
+ fb for fb in self.feedback_buffer
302
+ if not self._is_feedback_expired(fb, now)
303
+ ]
304
+
305
+ return initial_count - len(self.feedback_buffer)
306
+
307
+ def _is_feedback_expired(self, feedback: Dict[str, Any], current_time: datetime) -> bool:
308
+ """Check if feedback has exceeded its retention period."""
309
+ if 'timestamp' not in feedback:
310
+ return False
311
+
312
+ try:
313
+ feedback_time = datetime.fromisoformat(feedback['timestamp'])
314
+ session_id = feedback.get('session_id')
315
+
316
+ if not session_id or session_id not in self.consent_records:
317
+ return True # No consent record, remove
318
+
319
+ retention_days = self.consent_records[session_id].retention_days
320
+ return (current_time - feedback_time).days > retention_days
321
+
322
+ except (ValueError, TypeError):
323
+ return True # Invalid timestamp format, remove
components/feedback_logger.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Enhanced logging and feedback system for the AI Ethics Engine."""
2
+ import logging
3
+ import json
4
+ from pathlib import Path
5
+ from datetime import datetime
6
+ from typing import Dict, Any, Optional
7
+
8
+ # Ensure logs directory exists
9
+ Path("logs").mkdir(exist_ok=True)
10
+
11
+ class FeedbackLogger:
12
+ """Handles structured logging for the AI Ethics Engine."""
13
+
14
+ def __init__(self, log_file: str = "logs/ethics_engine.log"):
15
+ """Initialize the feedback logger."""
16
+ self.logger = logging.getLogger("ethics_engine")
17
+ self.logger.setLevel(logging.INFO)
18
+
19
+ # Create file handler
20
+ file_handler = logging.FileHandler(log_file)
21
+ file_handler.setLevel(logging.INFO)
22
+
23
+ # Create formatter
24
+ formatter = logging.Formatter(
25
+ '%(asctime)s | %(levelname)s | %(message)s',
26
+ datefmt='%Y-%m-%d %H:%M:%S'
27
+ )
28
+ file_handler.setFormatter(formatter)
29
+
30
+ # Add handler to logger
31
+ self.logger.addHandler(file_handler)
32
+
33
+ # Also log to console in development
34
+ console_handler = logging.StreamHandler()
35
+ console_handler.setLevel(logging.INFO)
36
+ console_handler.setFormatter(formatter)
37
+ self.logger.addHandler(console_handler)
38
+
39
+ def log_event(self, event_type: str, payload: Dict[str, Any]) -> None:
40
+ """Log a structured event.
41
+
42
+ Args:
43
+ event_type: Type of event (e.g., 'analysis', 'feedback', 'error')
44
+ payload: Dictionary containing event data
45
+ """
46
+ log_entry = {
47
+ "timestamp": datetime.utcnow().isoformat(),
48
+ "event_type": event_type,
49
+ **payload
50
+ }
51
+ self.logger.info(json.dumps(log_entry, default=str))
52
+
53
+ def record_analysis(self,
54
+ dilemma: str,
55
+ result: Dict[str, Any],
56
+ user_id: Optional[str] = None) -> None:
57
+ """Record an analysis event."""
58
+ self.log_event("analysis", {
59
+ "user_id": user_id or "anonymous",
60
+ "dilemma": dilemma,
61
+ "result_summary": {
62
+ "framework_used": list(result.get("framework_analyses", {}).keys()),
63
+ "success": "error" not in result,
64
+ "execution_time": result.get("execution_time")
65
+ }
66
+ })
67
+
68
+ def record_feedback(self,
69
+ dilemma: str,
70
+ feedback: str,
71
+ user_id: Optional[str] = None,
72
+ metadata: Optional[Dict[str, Any]] = None) -> None:
73
+ """Record user feedback on an analysis."""
74
+ self.log_event("feedback", {
75
+ "user_id": user_id or "anonymous",
76
+ "dilemma": dilemma,
77
+ "feedback": feedback,
78
+ "metadata": metadata or {}
79
+ })
80
+
81
+ def record_error(self,
82
+ error: Exception,
83
+ context: Optional[Dict[str, Any]] = None) -> None:
84
+ """Record an error event."""
85
+ self.log_event("error", {
86
+ "error_type": error.__class__.__name__,
87
+ "error_message": str(error),
88
+ "context": context or {}
89
+ })
90
+
91
+ # Global instance for easy import
92
+ logger = FeedbackLogger()
components/framework_loader.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Framework loader for dynamic loading of ethical frameworks from configuration.
3
+ """
4
+ from dataclasses import dataclass, field
5
+ from typing import List, Dict, Any, Optional
6
+ import yaml
7
+ from pathlib import Path
8
+ import logging
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ @dataclass
13
+ class EthicalFramework:
14
+ """Represents an ethical framework with its configuration."""
15
+ name: str
16
+ description: str
17
+ key_principles: List[str]
18
+ weight: float = 1.0
19
+ category: str = ""
20
+ enabled: bool = True
21
+ metadata: Dict[str, Any] = field(default_factory=dict)
22
+
23
+ def to_dict(self) -> Dict[str, Any]:
24
+ """Convert framework to dictionary for serialization."""
25
+ return {
26
+ "name": self.name,
27
+ "description": self.description,
28
+ "key_principles": self.key_principles,
29
+ "weight": self.weight,
30
+ "category": self.category,
31
+ "enabled": self.enabled,
32
+ "metadata": self.metadata
33
+ }
34
+
35
+ class FrameworkLoader:
36
+ """Loads and manages ethical frameworks from configuration."""
37
+
38
+ def __init__(self, config_path: str = "config/frameworks.yaml"):
39
+ """Initialize the framework loader.
40
+
41
+ Args:
42
+ config_path: Path to the frameworks configuration YAML file
43
+ """
44
+ self.config_path = Path(config_path)
45
+ self.frameworks: Dict[str, EthicalFramework] = {}
46
+ self.settings: Dict[str, Any] = {}
47
+ self._load_frameworks()
48
+
49
+ def _load_frameworks(self) -> None:
50
+ """Load frameworks from the configuration file."""
51
+ try:
52
+ with open(self.config_path, 'r') as f:
53
+ config = yaml.safe_load(f)
54
+
55
+ # Load settings
56
+ self.settings = config.get('settings', {})
57
+
58
+ # Load frameworks
59
+ for fw_config in config.get('frameworks', []):
60
+ try:
61
+ framework = EthicalFramework(**fw_config)
62
+ self.frameworks[framework.name] = framework
63
+ logger.info(f"Loaded framework: {framework.name}")
64
+ except Exception as e:
65
+ logger.error(f"Failed to load framework {fw_config.get('name', 'unknown')}: {e}")
66
+
67
+ logger.info(f"Successfully loaded {len(self.frameworks)} frameworks")
68
+
69
+ except FileNotFoundError:
70
+ logger.error(f"Framework configuration file not found: {self.config_path}")
71
+ raise
72
+ except yaml.YAMLError as e:
73
+ logger.error(f"Error parsing YAML configuration: {e}")
74
+ raise
75
+
76
+ def get_framework(self, name: str) -> Optional[EthicalFramework]:
77
+ """Get a framework by name."""
78
+ return self.frameworks.get(name)
79
+
80
+ def get_active_frameworks(self) -> List[EthicalFramework]:
81
+ """Get all enabled frameworks."""
82
+ return [fw for fw in self.frameworks.values() if fw.enabled]
83
+
84
+ def get_default_frameworks(self) -> List[EthicalFramework]:
85
+ """Get frameworks marked as default in settings."""
86
+ default_names = self.settings.get('default_frameworks', [])
87
+ return [fw for name, fw in self.frameworks.items()
88
+ if name in default_names and fw.enabled]
89
+
90
+ def reload(self) -> None:
91
+ """Reload frameworks from the configuration file."""
92
+ self.frameworks = {}
93
+ self._load_frameworks()
94
+
95
+ # Global instance for easy import
96
+ framework_loader = FrameworkLoader()
components/gpt_oss.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Dict, Any, Optional
3
+ import logging
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class GPTOSSModel:
9
+ """
10
+ A wrapper for open-source GPT models using the Hugging Face Transformers library.
11
+ Supports both loading from pretrained models and custom fine-tuned models.
12
+ """
13
+
14
+ def __init__(self, model_name: str = "gpt2", device: str = "cpu"):
15
+ """
16
+ Initialize the OSS GPT model.
17
+
18
+ Args:
19
+ model_name: Name of the pretrained model or path to a local model
20
+ device: Device to run the model on ('cpu' or 'cuda')
21
+ """
22
+ self.device = device
23
+ self.model_name = model_name
24
+ self._load_model()
25
+
26
+ def _load_model(self) -> None:
27
+ """Load the model and tokenizer."""
28
+ try:
29
+ logger.info(f"Loading model: {self.model_name}")
30
+ self.tokenizer = AutoTokenizer.from_pretrained(
31
+ self.model_name,
32
+ padding_side="left",
33
+ truncation_side="left"
34
+ )
35
+
36
+ # Add pad token if not present
37
+ if self.tokenizer.pad_token is None:
38
+ self.tokenizer.pad_token = self.tokenizer.eos_token
39
+
40
+ # Load model with appropriate settings
41
+ config = AutoConfig.from_pretrained(self.model_name)
42
+ self.model = AutoModelForCausalLM.from_pretrained(
43
+ self.model_name,
44
+ config=config,
45
+ torch_dtype=torch.float16 if 'cuda' in self.device else torch.float32,
46
+ low_cpu_mem_usage=True
47
+ ).to(self.device)
48
+
49
+ self.model.eval()
50
+ logger.info(f"Successfully loaded model: {self.model_name} on {self.device}")
51
+
52
+ except Exception as e:
53
+ logger.error(f"Failed to load model {self.model_name}: {e}")
54
+ raise
55
+
56
+ def generate(
57
+ self,
58
+ prompt: str,
59
+ max_tokens: int = 150,
60
+ temperature: float = 0.7,
61
+ top_p: float = 0.92,
62
+ top_k: int = 50,
63
+ repetition_penalty: float = 1.1,
64
+ **kwargs
65
+ ) -> str:
66
+ """
67
+ Generate text from the model.
68
+
69
+ Args:
70
+ prompt: Input text prompt
71
+ max_tokens: Maximum number of tokens to generate
72
+ temperature: Sampling temperature (lower = more focused, higher = more random)
73
+ top_p: Nucleus sampling parameter (0.0 to 1.0)
74
+ top_k: Top-k sampling parameter (0 to disable)
75
+ repetition_penalty: Penalty for repeating tokens (1.0 = no penalty)
76
+
77
+ Returns:
78
+ Generated text
79
+ """
80
+ try:
81
+ # Encode the input
82
+ inputs = self.tokenizer(
83
+ prompt,
84
+ return_tensors="pt",
85
+ padding=True,
86
+ truncation=True,
87
+ max_length=2048 - max_tokens # Leave room for generation
88
+ ).to(self.device)
89
+
90
+ # Generate response
91
+ with torch.no_grad():
92
+ outputs = self.model.generate(
93
+ **inputs,
94
+ max_new_tokens=max_tokens,
95
+ temperature=temperature,
96
+ top_p=top_p,
97
+ top_k=top_k if top_k > 0 else None,
98
+ do_sample=True,
99
+ repetition_penalty=repetition_penalty,
100
+ pad_token_id=self.tokenizer.eos_token_id,
101
+ **kwargs
102
+ )
103
+
104
+ # Decode and clean up the output
105
+ response = self.tokenizer.decode(
106
+ outputs[0][inputs.input_ids.shape[1]:],
107
+ skip_special_tokens=True
108
+ ).strip()
109
+
110
+ return response
111
+
112
+ except Exception as e:
113
+ logger.error(f"Error generating text: {e}")
114
+ raise
115
+
116
+ def __call__(self, prompt: str, **kwargs) -> str:
117
+ """Convenience method to call generate directly on the instance."""
118
+ return self.generate(prompt, **kwargs)
119
+
120
+ def get_model_info(self) -> Dict[str, Any]:
121
+ """Get information about the loaded model."""
122
+ return {
123
+ "model_name": self.model_name,
124
+ "device": self.device,
125
+ "model_type": "gpt-oss",
126
+ "supports_streaming": False
127
+ }
components/level0_utils.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Level-0 Summary Utilities for TRuCAL.
3
+ Provides raw evidence summarization for transparent AI reasoning.
4
+ """
5
+ from typing import List, Dict, Optional
6
+ import json
7
+ from pathlib import Path
8
+ import logging
9
+ import time
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ class Level0Summary:
14
+ """Handles Level-0 evidence collection and summarization."""
15
+
16
+ def __init__(self, log_dir: str = "logs"):
17
+ """Initialize with logging directory."""
18
+ self.log_dir = Path(log_dir)
19
+ self.log_dir.mkdir(exist_ok=True)
20
+ self.audit_log = self.log_dir / "level0_audit.jsonl"
21
+
22
+ def summarize(self, query: str, sources: List[str], metadata: Optional[Dict] = None) -> str:
23
+ """
24
+ Generate a Level-0 summary with raw evidence.
25
+
26
+ Args:
27
+ query: The original user query
28
+ sources: List of source texts or evidence
29
+ metadata: Optional metadata for auditing
30
+
31
+ Returns:
32
+ Formatted Level-0 summary string
33
+ """
34
+ if not query or not sources:
35
+ return ""
36
+
37
+ header = f"# Level-0 Summary\n"
38
+ header += f"**Query:** {query}\n\n"
39
+ header += "## Raw Evidence\n\n"
40
+
41
+ evidence_sections = []
42
+ for i, src in enumerate(sources, 1):
43
+ if not src or not src.strip():
44
+ continue
45
+ evidence_sections.append(f"### Source {i}\n{src.strip()}\n")
46
+
47
+ if not evidence_sections:
48
+ return f"{header}*No valid evidence sources provided.*"
49
+
50
+ summary = header + "\n".join(evidence_sections)
51
+
52
+ # Log the summary for auditing
53
+ self._log_summary(query, sources, summary, metadata)
54
+
55
+ return summary
56
+
57
+ def _log_summary(self, query: str, sources: List[str], summary: str, metadata: Optional[Dict] = None):
58
+ """Log the summary to the audit log."""
59
+ try:
60
+ log_entry = {
61
+ "timestamp": time.time(),
62
+ "query": query,
63
+ "source_count": len(sources),
64
+ "summary": summary,
65
+ "metadata": metadata or {}
66
+ }
67
+
68
+ with open(self.audit_log, "a", encoding="utf-8") as f:
69
+ f.write(json.dumps(log_entry) + "\n")
70
+
71
+ except Exception as e:
72
+ logger.error(f"Failed to log Level-0 summary: {str(e)}")
73
+
74
+ # Singleton instance for easy import
75
+ level0_summarizer = Level0Summary()
76
+
77
+ def get_level0_summary(query: str, sources: List[str], metadata: Optional[Dict] = None) -> str:
78
+ """Convenience function to get a Level-0 summary."""
79
+ return level0_summarizer.summarize(query, sources, metadata)
components/llm_backbone.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional, Union
2
+ import os
3
+ import logging
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class LLMBackbone:
8
+ """
9
+ A pluggable LLM backbone that supports multiple model types.
10
+ Handles model initialization, text generation, and provides a unified interface.
11
+ """
12
+
13
+ def __init__(self, model_type: str = None, device: str = "cpu"):
14
+ """
15
+ Initialize the LLM backbone with the specified model type.
16
+
17
+ Args:
18
+ model_type: The type of LLM to use (e.g., 'deepseek', 'gpt-oss')
19
+ device: The device to run the model on ('cpu' or 'cuda')
20
+ """
21
+ self.model_type = model_type or os.environ.get("TRUCAL_MODEL", "deepseek")
22
+ self.device = device
23
+ self.model = self._initialize_model()
24
+
25
+ def _initialize_model(self) -> Any:
26
+ """Initialize the specified LLM model."""
27
+ try:
28
+ if self.model_type == "deepseek":
29
+ from .deepseek_integration import DeepSeekClient
30
+ return DeepSeekClient()
31
+ elif self.model_type == "gpt-oss":
32
+ from .gpt_oss import GPTOSSModel
33
+ return GPTOSSModel(device=self.device)
34
+ else:
35
+ raise ValueError(f"Unsupported LLM type: {self.model_type}")
36
+ except ImportError as e:
37
+ logger.error(f"Failed to initialize {self.model_type} model: {e}")
38
+ raise
39
+
40
+ def generate(self, prompt: str, meta: Dict[str, Any] = None) -> str:
41
+ """
42
+ Generate a response using the configured LLM.
43
+
44
+ Args:
45
+ prompt: The input prompt for the model
46
+ meta: Additional metadata for generation (model-specific)
47
+
48
+ Returns:
49
+ The generated text response
50
+ """
51
+ if meta is None:
52
+ meta = {}
53
+
54
+ try:
55
+ if self.model_type == "deepseek":
56
+ return self.model.generate_response(prompt, meta)
57
+ else:
58
+ return self.model.generate(prompt, **meta)
59
+ except Exception as e:
60
+ logger.error(f"Error generating response with {self.model_type}: {e}")
61
+ return self._get_fallback_response()
62
+
63
+ def _get_fallback_response(self) -> str:
64
+ """Return a fallback response when model generation fails."""
65
+ return (
66
+ "I'm having trouble generating a response at the moment. "
67
+ "Please try again in a moment or contact support if the issue persists."
68
+ )
69
+
70
+ def switch_model(self, new_model_type: str) -> None:
71
+ """
72
+ Switch to a different model type at runtime.
73
+
74
+ Args:
75
+ new_model_type: The new model type to switch to
76
+ """
77
+ if new_model_type != self.model_type:
78
+ self.model_type = new_model_type
79
+ self.model = self._initialize_model()
80
+ logger.info(f"Switched to model type: {new_model_type}")
81
+
82
+ def get_model_info(self) -> Dict[str, Any]:
83
+ """Get information about the current model configuration."""
84
+ return {
85
+ "model_type": self.model_type,
86
+ "device": self.device,
87
+ "supports_streaming": self.model_type == "deepseek" # Example capability
88
+ }
components/llm_integration.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLM integration for TRuCAL's ethical reasoning engine.
3
+ Provides a flexible interface for transformer-based language models with attention mechanisms.
4
+ """
5
+ from typing import Optional, Dict, Any, List, Union
6
+ import torch
7
+ from transformers import (
8
+ AutoTokenizer,
9
+ AutoModelForCausalLM,
10
+ PreTrainedModel,
11
+ PreTrainedTokenizer
12
+ )
13
+ import requests
14
+ import json
15
+
16
+ class CustomLLMResponder:
17
+ """
18
+ A flexible LLM responder with support for custom attention mechanisms.
19
+
20
+ Args:
21
+ model_path: Path or identifier of the pre-trained model
22
+ device: Device to run the model on ('cuda', 'mps', or 'cpu')
23
+ attn_layer: Optional custom attention layer
24
+ **model_kwargs: Additional arguments for model initialization
25
+ """
26
+ def __init__(
27
+ self,
28
+ model_path: str = "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
29
+ device: Optional[str] = None,
30
+ attn_layer: Optional[torch.nn.Module] = None,
31
+ use_ollama: bool = True,
32
+ ollama_model: str = "tinyllama", # Default to tinyllama for better memory usage
33
+ ollama_base_url: str = "http://localhost:11434",
34
+ **model_kwargs
35
+ ):
36
+ self.use_ollama = use_ollama
37
+ self.ollama_model = ollama_model
38
+ self.ollama_base_url = ollama_base_url
39
+
40
+ if self.use_ollama:
41
+ # Test Ollama connection
42
+ try:
43
+ response = requests.get(f"{self.ollama_base_url}/api/version")
44
+ if response.status_code != 200:
45
+ raise ConnectionError("Ollama server not running or accessible")
46
+ print(f"Connected to Ollama version: {response.json()['version']}")
47
+
48
+ # List available models
49
+ models = requests.get(f"{self.ollama_base_url}/api/tags").json()
50
+ available_models = [m['name'] for m in models.get('models', [])]
51
+ print(f"Available Ollama models: {', '.join(available_models)}")
52
+
53
+ # If the specified model isn't available, try to pull it
54
+ if self.ollama_model not in available_models:
55
+ print(f"Model {self.ollama_model} not found. Attempting to pull...")
56
+ pull_response = requests.post(
57
+ f"{self.ollama_base_url}/api/pull",
58
+ json={"name": self.ollama_model},
59
+ stream=True
60
+ )
61
+ for line in pull_response.iter_lines():
62
+ if line:
63
+ print(json.loads(line).get('status', ''))
64
+
65
+ except Exception as e:
66
+ print(f"Warning: Could not connect to Ollama: {e}")
67
+ print("Falling back to local model...")
68
+ self.use_ollama = False
69
+
70
+ if not self.use_ollama:
71
+ # Fall back to local model with reduced memory footprint
72
+ try:
73
+ self.device = device or self._get_default_device()
74
+ print(f"Using device: {self.device}")
75
+
76
+ # Load with lower precision and other optimizations
77
+ self.tokenizer = AutoTokenizer.from_pretrained(
78
+ model_path,
79
+ **{k: v for k, v in model_kwargs.items() if k != 'low_cpu_mem_usage'}
80
+ )
81
+
82
+ self.model = AutoModelForCausalLM.from_pretrained(
83
+ model_path,
84
+ torch_dtype=torch.float16 if 'cuda' in str(self.device) else torch.float32,
85
+ low_cpu_mem_usage=True,
86
+ **{k: v for k, v in model_kwargs.items() if k != 'torch_dtype'}
87
+ ).to(self.device)
88
+
89
+ # Set up custom attention layer if provided
90
+ self.attn_layer = attn_layer
91
+ if self.attn_layer:
92
+ self.attn_layer = self.attn_layer.to(self.device)
93
+
94
+ except Exception as e:
95
+ print(f"Error loading local model: {e}")
96
+ raise
97
+
98
+ def _get_default_device(self) -> str:
99
+ """Determine the best available device."""
100
+ if torch.cuda.is_available():
101
+ return "cuda"
102
+ elif torch.backends.mps.is_available():
103
+ return "mps"
104
+ return "cpu"
105
+
106
+ def generate(
107
+ self,
108
+ prompt: str,
109
+ max_length: int = 200,
110
+ temperature: float = 0.7,
111
+ top_p: float = 0.9,
112
+ **generation_kwargs
113
+ ) -> str:
114
+ """
115
+ Generate a response to the given prompt using either Ollama or local model.
116
+
117
+ Args:
118
+ prompt: Input text prompt
119
+ max_length: Maximum length of the generated text
120
+ temperature: Controls randomness (lower = more deterministic)
121
+ top_p: Nucleus sampling parameter
122
+ **generation_kwargs: Additional generation parameters
123
+
124
+ Returns:
125
+ Generated text response
126
+ """
127
+ if self.use_ollama:
128
+ return self._generate_with_ollama(
129
+ prompt=prompt,
130
+ max_tokens=max_length,
131
+ temperature=temperature,
132
+ top_p=top_p,
133
+ **generation_kwargs
134
+ )
135
+ else:
136
+ # Fall back to local model
137
+ inputs = self.tokenizer(
138
+ prompt,
139
+ return_tensors="pt",
140
+ return_attention_mask=True,
141
+ truncation=True,
142
+ max_length=2048
143
+ ).to(self.device)
144
+
145
+ if self.attn_layer:
146
+ return self._generate_with_custom_attention(
147
+ inputs,
148
+ max_length=max_length,
149
+ temperature=temperature,
150
+ top_p=top_p,
151
+ **generation_kwargs
152
+ )
153
+ else:
154
+ return self._generate_standard(
155
+ inputs,
156
+ max_length=max_length,
157
+ temperature=temperature,
158
+ top_p=top_p,
159
+ **generation_kwargs
160
+ )
161
+
162
+ def _generate_with_ollama(
163
+ self,
164
+ prompt: str,
165
+ max_tokens: int = 100, # Reduced default for memory efficiency
166
+ temperature: float = 0.7,
167
+ top_p: float = 0.9,
168
+ **generation_kwargs
169
+ ) -> str:
170
+ """Generate text using Ollama's API with optimized settings for low-memory environments."""
171
+ url = f"{self.ollama_base_url}/api/generate"
172
+
173
+ # Optimized defaults for low-memory environments
174
+ options = {
175
+ "temperature": temperature,
176
+ "top_p": top_p,
177
+ "num_predict": min(max_tokens, 200), # Cap at 200 tokens
178
+ "num_ctx": 512, # Smaller context window
179
+ "num_thread": 4, # Limit CPU threads
180
+ "num_gpu": 0, # Force CPU to avoid GPU memory issues
181
+ "repeat_last_n": 64, # Reduce memory for repetition penalty
182
+ "repeat_penalty": 1.1, # Slight penalty for repetition
183
+ "top_k": 40, # Limit top-k sampling
184
+ **generation_kwargs
185
+ }
186
+
187
+ # For tinyllama, we can use more aggressive memory optimizations
188
+ if "tinyllama" in self.ollama_model.lower():
189
+ options.update({
190
+ "num_ctx": 1024, # Slightly larger context for better responses
191
+ "num_predict": min(max_tokens, 150), # Slightly longer responses
192
+ "temperature": max(0.5, temperature) # Ensure temperature isn't too low
193
+ })
194
+
195
+ payload = {
196
+ "model": self.ollama_model,
197
+ "prompt": prompt,
198
+ "stream": False, # Disable streaming for simplicity
199
+ "options": options
200
+ }
201
+
202
+ try:
203
+ # Add timeout to prevent hanging
204
+ response = requests.post(url, json=payload, timeout=60)
205
+ response.raise_for_status()
206
+
207
+ result = response.json()
208
+ if "response" not in result:
209
+ return f"Error: Invalid response format from Ollama: {result}"
210
+
211
+ return result["response"]
212
+
213
+ except requests.exceptions.RequestException as e:
214
+ return f"Error generating response with Ollama: {str(e)}"
215
+ except json.JSONDecodeError as e:
216
+ return f"Error parsing Ollama response: {str(e)}"
217
+
218
+ def _generate_standard(
219
+ self,
220
+ inputs: Dict[str, torch.Tensor],
221
+ max_length: int = 200,
222
+ temperature: float = 0.7,
223
+ top_p: float = 0.9,
224
+ **generation_kwargs
225
+ ) -> str:
226
+ """Generate text using the standard model implementation."""
227
+ outputs = self.model.generate(
228
+ **inputs,
229
+ max_length=max_length,
230
+ temperature=temperature,
231
+ top_p=top_p,
232
+ pad_token_id=self.tokenizer.eos_token_id,
233
+ **generation_kwargs
234
+ )
235
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
236
+
237
+ def _generate_with_custom_attention(
238
+ self,
239
+ inputs: Dict[str, torch.Tensor],
240
+ max_length: int,
241
+ temperature: float,
242
+ top_p: float,
243
+ **generation_kwargs
244
+ ) -> str:
245
+ """Generate text using custom attention mechanism."""
246
+ input_ids = inputs['input_ids']
247
+ attention_mask = inputs['attention_mask']
248
+
249
+ # Generate tokens one by one
250
+ for _ in range(max_length):
251
+ with torch.no_grad():
252
+ # Get model outputs
253
+ outputs = self.model(
254
+ input_ids=input_ids,
255
+ attention_mask=attention_mask,
256
+ output_hidden_states=True,
257
+ return_dict=True
258
+ )
259
+
260
+ # Apply custom attention layer
261
+ hidden_states = outputs.hidden_states[-1] # Last layer hidden states
262
+ attended = self.attn_layer(hidden_states)
263
+
264
+ # Get logits from the attended representation
265
+ logits = self.model.lm_head(attended[:, -1, :])
266
+
267
+ # Apply temperature and top-p sampling
268
+ logits = logits / temperature
269
+
270
+ # Convert to probabilities and apply top-p filtering
271
+ probs = torch.softmax(logits, dim=-1)
272
+ sorted_probs, sorted_indices = torch.sort(probs, descending=True)
273
+ cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
274
+
275
+ # Remove tokens with cumulative probability above the threshold
276
+ sorted_indices_to_remove = cumulative_probs > top_p
277
+ # Shift the indices to the right to keep the first token above the threshold
278
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
279
+ sorted_indices_to_remove[..., 0] = 0
280
+
281
+ # Scatter sorted tensors to original indexing
282
+ indices_to_remove = sorted_indices_to_remove.scatter(
283
+ 1, sorted_indices, sorted_indices_to_remove
284
+ )
285
+ probs[indices_to_remove] = 0
286
+
287
+ # Sample from the filtered distribution
288
+ next_token = torch.multinomial(probs, num_samples=1)
289
+
290
+ # Append the token to the input for the next step
291
+ input_ids = torch.cat([input_ids, next_token], dim=-1)
292
+ attention_mask = torch.cat([
293
+ attention_mask,
294
+ torch.ones((1, 1), device=attention_mask.device, dtype=attention_mask.dtype)
295
+ ], dim=1)
296
+
297
+ # Stop if we've reached the end of sequence token
298
+ if next_token.item() == self.tokenizer.eos_token_id:
299
+ break
300
+
301
+ # Decode and return the generated text
302
+ return self.tokenizer.decode(
303
+ input_ids[0][inputs['input_ids'].shape[1]:],
304
+ skip_special_tokens=True
305
+ )
306
+
307
+ def __call__(self, *args, **kwargs):
308
+ """Make the instance callable like a function."""
309
+ return self.generate(*args, **kwargs)
310
+
311
+ # Example usage:
312
+ # llm = CustomLLMResponder(attn_layer=YourCustomAttentionLayer())
313
+ # response = llm("What are the ethical implications of AI?")
314
+ # print(response)
components/llm_integration_enhanced.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced LLM integration for TRuCAL with virtue tension awareness.
3
+ """
4
+ from typing import Optional, Dict, Any, List, Union, Tuple
5
+ import torch
6
+ from transformers import (
7
+ AutoTokenizer,
8
+ AutoModelForCausalLM,
9
+ PreTrainedModel,
10
+ PreTrainedTokenizer,
11
+ GenerationConfig
12
+ )
13
+ import requests
14
+ import json
15
+ import logging
16
+ from .virtue_tension_engine import VirtueTensionEngine
17
+ from .attention_gating import patch_attention_layers, apply_attention_gating
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class EnhancedLLMResponder:
22
+ """
23
+ Enhanced LLM responder with virtue tension awareness and attention gating.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ model_path: str = "mistralai/Mistral-7B-v0.1",
29
+ device: Optional[str] = None,
30
+ use_ollama: bool = False,
31
+ ollama_model: str = "llama2",
32
+ ollama_base_url: str = "http://localhost:11434",
33
+ enable_tension_awareness: bool = True,
34
+ **model_kwargs
35
+ ):
36
+ """
37
+ Initialize the enhanced LLM responder.
38
+
39
+ Args:
40
+ model_path: Path or identifier of the pre-trained model
41
+ device: Device to run the model on ('cuda', 'mps', or 'cpu')
42
+ use_ollama: Whether to use Ollama API
43
+ ollama_model: Model name for Ollama
44
+ ollama_base_url: Base URL for Ollama API
45
+ enable_tension_awareness: Whether to enable tension-aware processing
46
+ **model_kwargs: Additional model initialization arguments
47
+ """
48
+ self.use_ollama = use_ollama
49
+ self.ollama_model = ollama_model
50
+ self.ollama_base_url = ollama_base_url
51
+ self.enable_tension_awareness = enable_tension_awareness
52
+
53
+ # Initialize tension engine if enabled
54
+ if self.enable_tension_awareness:
55
+ self.tension_engine = VirtueTensionEngine(device)
56
+ else:
57
+ self.tension_engine = None
58
+
59
+ if self.use_ollama:
60
+ self._init_ollama()
61
+ else:
62
+ self._init_local_model(model_path, device, model_kwargs)
63
+
64
+ def _init_ollama(self):
65
+ """Initialize connection to Ollama API."""
66
+ try:
67
+ response = requests.get(f"{self.ollama_base_url}/api/tags")
68
+ if response.status_code != 200:
69
+ raise ConnectionError("Ollama server not running or accessible")
70
+ logger.info(f"Connected to Ollama. Available models: {response.json()}")
71
+ except Exception as e:
72
+ logger.error(f"Could not connect to Ollama: {e}")
73
+ raise RuntimeError("Failed to initialize Ollama connection") from e
74
+
75
+ def _init_local_model(self, model_path: str, device: Optional[str], model_kwargs: dict):
76
+ """Initialize local model and tokenizer."""
77
+ self.device = device or self._get_default_device()
78
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path, **model_kwargs)
79
+
80
+ # Load model with appropriate device map
81
+ if 'device_map' not in model_kwargs and self.device.startswith('cuda'):
82
+ model_kwargs['device_map'] = 'auto'
83
+
84
+ self.model = AutoModelForCausalLM.from_pretrained(
85
+ model_path,
86
+ **model_kwargs
87
+ ).to(self.device)
88
+
89
+ # Patch attention layers if tension awareness is enabled
90
+ if self.enable_tension_awareness:
91
+ patch_attention_layers(self.model)
92
+
93
+ # Set model to evaluation mode
94
+ self.model.eval()
95
+ logger.info(f"Model loaded on {self.device} with tension awareness: {self.enable_tension_awareness}")
96
+
97
+ def _get_default_device(self) -> str:
98
+ """Determine the best available device."""
99
+ if torch.cuda.is_available():
100
+ return "cuda"
101
+ elif torch.backends.mps.is_available():
102
+ return "mps"
103
+ return "cpu"
104
+
105
+ def analyze_tension(
106
+ self,
107
+ text: str,
108
+ biofeedback: Optional[Dict[str, float]] = None
109
+ ) -> Dict[str, Any]:
110
+ """
111
+ Analyze text tension and get head-specific information.
112
+
113
+ Args:
114
+ text: Input text to analyze
115
+ biofeedback: Optional biofeedback data
116
+
117
+ Returns:
118
+ Dictionary containing tension analysis
119
+ """
120
+ if not self.enable_tension_awareness or self.tension_engine is None:
121
+ return {
122
+ 'tension': 0.5,
123
+ 'head_weights': torch.ones(getattr(self.model.config, 'num_attention_heads', 12)),
124
+ 'biofeedback': biofeedback or {}
125
+ }
126
+
127
+ # Get tension score and head information
128
+ v_t = self.tension_engine.compute_tension(text, biofeedback)
129
+ head_weights = self.tension_engine.get_head_importance(v_t)
130
+
131
+ # Get head-specific information for gating
132
+ num_heads = getattr(self.model.config, 'num_attention_heads', 12)
133
+ head_info = {
134
+ 'tension': v_t,
135
+ 'head_weights': head_weights,
136
+ 'biofeedback': biofeedback or {}
137
+ }
138
+
139
+ # Add head-specific information for gating
140
+ if v_t > 0.75: # High tension
141
+ head_info['trauma_heads'] = list(range(max(1, int(0.3 * num_heads))))
142
+ head_info['calming_heads'] = list(range(num_heads - max(1, int(0.2 * num_heads)), num_heads))
143
+ elif v_t > 0.5: # Moderate tension
144
+ head_info['high_var_heads'] = list(range(max(1, int(0.4 * num_heads))))
145
+
146
+ return head_info
147
+
148
+ def generate(
149
+ self,
150
+ prompt: str,
151
+ max_length: int = 200,
152
+ temperature: float = 0.7,
153
+ top_p: float = 0.9,
154
+ biofeedback: Optional[Dict[str, float]] = None,
155
+ **generation_kwargs
156
+ ) -> str:
157
+ """
158
+ Generate a response with tension-aware processing.
159
+
160
+ Args:
161
+ prompt: Input text prompt
162
+ max_length: Maximum length of the generated text
163
+ temperature: Sampling temperature (0.0 to 1.0)
164
+ top_p: Nucleus sampling parameter
165
+ biofeedback: Optional biofeedback data
166
+ **generation_kwargs: Additional generation parameters
167
+
168
+ Returns:
169
+ Generated text response
170
+ """
171
+ if self.use_ollama:
172
+ return self._generate_with_ollama(
173
+ prompt=prompt,
174
+ max_tokens=max_length,
175
+ temperature=temperature,
176
+ top_p=top_p,
177
+ **generation_kwargs
178
+ )
179
+
180
+ return self._generate_with_local_model(
181
+ prompt=prompt,
182
+ max_length=max_length,
183
+ temperature=temperature,
184
+ top_p=top_p,
185
+ biofeedback=biofeedback,
186
+ **generation_kwargs
187
+ )
188
+
189
+ def _generate_with_local_model(
190
+ self,
191
+ prompt: str,
192
+ max_length: int,
193
+ temperature: float,
194
+ top_p: float,
195
+ biofeedback: Optional[Dict[str, float]] = None,
196
+ **generation_kwargs
197
+ ) -> str:
198
+ """Generate text using the local model with tension awareness."""
199
+ # Analyze tension and get head information
200
+ virtue_meta = self.analyze_tension(prompt, biofeedback)
201
+
202
+ # Adjust generation parameters based on tension
203
+ if self.enable_tension_awareness:
204
+ v_t = virtue_meta['tension']
205
+
206
+ # Adjust temperature based on tension
207
+ # Higher tension → lower temperature for more focused responses
208
+ temperature = max(0.2, temperature * (1.0 - (v_t * 0.5)))
209
+
210
+ # Tighter top-p sampling when tense
211
+ if v_t > 0.7:
212
+ top_p = max(0.7, top_p * 0.9)
213
+
214
+ # Prepare generation config
215
+ generation_config = GenerationConfig(
216
+ max_length=max_length,
217
+ temperature=temperature,
218
+ top_p=top_p,
219
+ do_sample=True,
220
+ **generation_kwargs
221
+ )
222
+
223
+ # Tokenize input
224
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
225
+
226
+ # Generate with attention gating
227
+ with torch.no_grad():
228
+ outputs = self.model.generate(
229
+ **inputs,
230
+ generation_config=generation_config,
231
+ virtue_meta=virtue_meta,
232
+ **generation_kwargs
233
+ )
234
+
235
+ # Decode and return
236
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
237
+ return response
238
+
239
+ def _generate_with_ollama(
240
+ self,
241
+ prompt: str,
242
+ max_tokens: int = 200,
243
+ temperature: float = 0.7,
244
+ top_p: float = 0.9,
245
+ **kwargs
246
+ ) -> str:
247
+ """Generate text using the Ollama API."""
248
+ try:
249
+ response = requests.post(
250
+ f"{self.ollama_base_url}/api/generate",
251
+ json={
252
+ "model": self.ollama_model,
253
+ "prompt": prompt,
254
+ "max_tokens": max_tokens,
255
+ "temperature": temperature,
256
+ "top_p": top_p,
257
+ **kwargs
258
+ },
259
+ timeout=60
260
+ )
261
+ response.raise_for_status()
262
+
263
+ # Extract the response text
264
+ response_text = ""
265
+ for line in response.text.split('\n'):
266
+ if line.strip():
267
+ try:
268
+ chunk = json.loads(line)
269
+ response_text += chunk.get('response', '')
270
+ except json.JSONDecodeError:
271
+ continue
272
+
273
+ return response_text.strip()
274
+
275
+ except requests.exceptions.RequestException as e:
276
+ logger.error(f"Error calling Ollama API: {e}")
277
+ raise RuntimeError("Failed to generate response using Ollama") from e
278
+
279
+ def chat(
280
+ self,
281
+ message: str,
282
+ history: Optional[List[Dict[str, str]]] = None,
283
+ context: Optional[Dict[str, Any]] = None,
284
+ biofeedback: Optional[Dict[str, float]] = None
285
+ ) -> str:
286
+ """
287
+ Chat interface with context and history support.
288
+
289
+ Args:
290
+ message: User message
291
+ history: List of previous messages in the conversation
292
+ context: Additional context for the conversation
293
+ biofeedback: Optional biofeedback data
294
+
295
+ Returns:
296
+ Generated response
297
+ """
298
+ # Build the prompt with context and history
299
+ prompt = self._build_chat_prompt(message, history, context)
300
+
301
+ # Generate response with tension awareness
302
+ return self.generate(
303
+ prompt=prompt,
304
+ biofeedback=biofeedback
305
+ )
306
+
307
+ def _build_chat_prompt(
308
+ self,
309
+ message: str,
310
+ history: Optional[List[Dict[str, str]]] = None,
311
+ context: Optional[Dict[str, Any]] = None
312
+ ) -> str:
313
+ """Build a prompt from message, history, and context."""
314
+ prompt_parts = []
315
+
316
+ # Add system message if context is provided
317
+ if context and 'system_message' in context:
318
+ prompt_parts.append(f"System: {context['system_message']}")
319
+
320
+ # Add conversation history
321
+ if history:
322
+ for turn in history:
323
+ if 'user' in turn:
324
+ prompt_parts.append(f"User: {turn['user']}")
325
+ if 'assistant' in turn:
326
+ prompt_parts.append(f"Assistant: {turn['assistant']}")
327
+
328
+ # Add current message
329
+ prompt_parts.append(f"User: {message}")
330
+ prompt_parts.append("Assistant:")
331
+
332
+ return "\n".join(prompt_parts)
333
+
334
+
335
+ def test_enhanced_llm():
336
+ """Test function for the EnhancedLLMResponder."""
337
+ import time
338
+
339
+ # Initialize with tension awareness
340
+ llm = EnhancedLLMResponder(
341
+ model_path="gpt2", # Use a small model for testing
342
+ device="cpu",
343
+ enable_tension_awareness=True
344
+ )
345
+
346
+ # Test with different prompts
347
+ test_cases = [
348
+ ("I'm feeling really anxious and unsafe right now", {"hrv": 25, "gsr": 0.8}),
349
+ ("I'm feeling calm and in control", {"hrv": 150, "gsr": 0.2}),
350
+ ("This is a neutral test message", None),
351
+ ]
352
+
353
+ for prompt, bio in test_cases:
354
+ print(f"\n{'='*50}")
355
+ print(f"Input: {prompt}")
356
+ print(f"Biofeedback: {bio}")
357
+
358
+ # Analyze tension
359
+ tension_info = llm.analyze_tension(prompt, bio)
360
+ print(f"Tension score: {tension_info['tension']:.3f}")
361
+
362
+ # Generate response
363
+ start_time = time.time()
364
+ response = llm.chat(prompt, biofeedback=bio)
365
+ elapsed = time.time() - start_time
366
+
367
+ print(f"Response: {response}")
368
+ print(f"Generated in {elapsed:.2f} seconds")
369
+
370
+
371
+ if __name__ == "__main__":
372
+ test_enhanced_llm()
components/ollama_integration.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ollama integration for TRuCAL's ethical reasoning engine.
3
+ Provides a simple interface for interacting with Ollama's API.
4
+ """
5
+ import json
6
+ import time
7
+ from typing import Optional, Dict, Any, List, Union
8
+ import requests
9
+ from pathlib import Path
10
+ import logging
11
+
12
+ class OllamaResponder:
13
+ """
14
+ A client for interacting with Ollama's API for text generation.
15
+
16
+ Args:
17
+ base_url: Base URL of the Ollama API server
18
+ default_model: Default model to use for generation
19
+ timeout: Request timeout in seconds
20
+ max_retries: Maximum number of retries for failed requests
21
+ """
22
+ def __init__(
23
+ self,
24
+ base_url: str = "http://localhost:11434",
25
+ default_model: str = "llama2",
26
+ timeout: int = 60,
27
+ max_retries: int = 3
28
+ ):
29
+ self.base_url = base_url.rstrip('/')
30
+ self.default_model = default_model
31
+ self.timeout = timeout
32
+ self.max_retries = max_retries
33
+ self.session = requests.Session()
34
+ self.logger = logging.getLogger(__name__)
35
+
36
+ def generate(
37
+ self,
38
+ prompt: str,
39
+ model: Optional[str] = None,
40
+ system_prompt: Optional[str] = None,
41
+ max_tokens: int = 200,
42
+ temperature: float = 0.7,
43
+ top_p: float = 0.9,
44
+ **generation_kwargs
45
+ ) -> str:
46
+ """
47
+ Generate a response using the Ollama API.
48
+
49
+ Args:
50
+ prompt: Input text prompt
51
+ model: Model to use (defaults to instance default)
52
+ system_prompt: Optional system prompt to set the model's behavior
53
+ max_tokens: Maximum number of tokens to generate
54
+ temperature: Controls randomness (lower = more deterministic)
55
+ top_p: Nucleus sampling parameter
56
+ **generation_kwargs: Additional generation parameters
57
+
58
+ Returns:
59
+ Generated text response
60
+ """
61
+ model = model or self.default_model
62
+ url = f"{self.base_url}/api/generate"
63
+
64
+ payload = {
65
+ "model": model,
66
+ "prompt": prompt,
67
+ "stream": False,
68
+ "options": {
69
+ "temperature": temperature,
70
+ "top_p": top_p,
71
+ "num_predict": max_tokens,
72
+ **generation_kwargs
73
+ }
74
+ }
75
+
76
+ if system_prompt:
77
+ payload["system"] = system_prompt
78
+
79
+ for attempt in range(self.max_retries + 1):
80
+ try:
81
+ response = self.session.post(
82
+ url,
83
+ json=payload,
84
+ timeout=self.timeout
85
+ )
86
+ response.raise_for_status()
87
+ return response.json()["response"]
88
+
89
+ except requests.exceptions.RequestException as e:
90
+ if attempt == self.max_retries:
91
+ self.logger.error(f"Failed after {self.max_retries} attempts: {str(e)}")
92
+ raise
93
+
94
+ retry_delay = (2 ** attempt) * 0.5 # Exponential backoff
95
+ self.logger.warning(
96
+ f"Request failed (attempt {attempt + 1}/{self.max_retries}): "
97
+ f"{str(e)}. Retrying in {retry_delay:.1f}s..."
98
+ )
99
+ time.sleep(retry_delay)
100
+
101
+ def list_models(self) -> List[Dict[str, Any]]:
102
+ """List available models from the Ollama server."""
103
+ try:
104
+ response = self.session.get(
105
+ f"{self.base_url}/api/tags",
106
+ timeout=self.timeout
107
+ )
108
+ response.raise_for_status()
109
+ return response.json().get("models", [])
110
+ except requests.exceptions.RequestException as e:
111
+ self.logger.error(f"Failed to list models: {str(e)}")
112
+ return []
113
+
114
+ def pull_model(self, model_name: str) -> bool:
115
+ """
116
+ Pull a model from the Ollama library.
117
+
118
+ Args:
119
+ model_name: Name of the model to pull (e.g., 'llama2')
120
+
121
+ Returns:
122
+ bool: True if successful, False otherwise
123
+ """
124
+ try:
125
+ response = self.session.post(
126
+ f"{self.base_url}/api/pull",
127
+ json={"name": model_name},
128
+ stream=True,
129
+ timeout=300 # Long timeout for model downloads
130
+ )
131
+ response.raise_for_status()
132
+
133
+ # Stream the response to show progress
134
+ for line in response.iter_lines():
135
+ if line:
136
+ try:
137
+ status = json.loads(line)
138
+ if "status" in status:
139
+ self.logger.info(status["status"])
140
+ except json.JSONDecodeError:
141
+ continue
142
+
143
+ return True
144
+
145
+ except requests.exceptions.RequestException as e:
146
+ self.logger.error(f"Failed to pull model {model_name}: {str(e)}")
147
+ return False
148
+
149
+ # Example usage:
150
+ if __name__ == "__main__":
151
+ import logging
152
+ logging.basicConfig(level=logging.INFO)
153
+
154
+ # Initialize the responder
155
+ ollama = OllamaResponder(default_model="llama2")
156
+
157
+ # List available models
158
+ print("Available models:")
159
+ for model in ollama.list_odels():
160
+ print(f"- {model['name']} ({model.get('size', 'N/A')})")
161
+
162
+ # Generate a response
163
+ response = ollama.generate(
164
+ prompt="Explain the ethical implications of artificial intelligence in one paragraph.",
165
+ temperature=0.7,
166
+ max_tokens=200
167
+ )
168
+ print("\nGenerated response:")
169
+ print(response)
components/purpose_assessment.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Purpose Assessment Engine for TRuCAL
3
+
4
+ Analyzes user interactions to determine core purpose dimensions and values.
5
+ """
6
+ from typing import Dict, List, Tuple, Optional
7
+ import numpy as np
8
+ from dataclasses import dataclass
9
+ from enum import Enum
10
+
11
+ class PurposeDimension(str, Enum):
12
+ JUSTICE = "justice_orientation"
13
+ COMMUNITY = "community_focus"
14
+ GROWTH = "growth_mindset"
15
+ SELF_EXPRESSION = "self_expression"
16
+ AUTONOMY = "autonomy"
17
+ COMPASSION = "compassion"
18
+ MASTERY = "mastery"
19
+ HARMONY = "harmony"
20
+
21
+ @dataclass
22
+ class PurposeProfile:
23
+ """Container for a user's purpose assessment results"""
24
+ primary_dimension: PurposeDimension
25
+ dimension_scores: Dict[PurposeDimension, float]
26
+ confidence: float
27
+
28
+ def to_dict(self) -> Dict[str, any]:
29
+ return {
30
+ 'primary_purpose': self.primary_dimension.value,
31
+ 'purpose_scores': {k.value: v for k, v in self.dimension_scores.items()},
32
+ 'confidence': self.confidence
33
+ }
34
+
35
+ class PurposeAssessmentEngine:
36
+ """
37
+ Analyzes text and interactions to determine a user's core purpose dimensions.
38
+ Uses a combination of keyword matching and semantic analysis.
39
+ """
40
+
41
+ def __init__(self):
42
+ # Weighting for different analysis methods
43
+ self.keyword_weight = 0.4
44
+ self.semantic_weight = 0.6
45
+
46
+ # Initialize dimension configurations
47
+ self.dimensions = {
48
+ PurposeDimension.JUSTICE: {
49
+ 'keywords': [
50
+ 'fair', 'unfair', 'justice', 'rights', 'equality',
51
+ 'wrong', 'right', 'ethical', 'moral', 'equity'
52
+ ],
53
+ 'description': 'Focus on fairness, ethics, and moral correctness'
54
+ },
55
+ PurposeDimension.COMMUNITY: {
56
+ 'keywords': [
57
+ 'we', 'us', 'together', 'community', 'support',
58
+ 'help', 'together', 'team', 'belong', 'connect'
59
+ ],
60
+ 'description': 'Focus on social connections and community building'
61
+ },
62
+ PurposeDimension.GROWTH: {
63
+ 'keywords': [
64
+ 'learn', 'grow', 'improve', 'develop', 'better',
65
+ 'progress', 'evolve', 'advance', 'enhance', 'master'
66
+ ],
67
+ 'description': 'Focus on personal development and learning'
68
+ },
69
+ PurposeDimension.SELF_EXPRESSION: {
70
+ 'keywords': [
71
+ 'feel', 'think', 'believe', 'express', 'voice',
72
+ 'opinion', 'perspective', 'create', 'art', 'share'
73
+ ],
74
+ 'description': 'Focus on self-expression and authenticity'
75
+ },
76
+ PurposeDimension.AUTONOMY: {
77
+ 'keywords': [
78
+ 'free', 'choose', 'decide', 'control', 'independent',
79
+ 'freedom', 'self', 'own', 'autonomy', 'sovereign'
80
+ ],
81
+ 'description': 'Focus on independence and self-determination'
82
+ },
83
+ PurposeDimension.COMPASSION: {
84
+ 'keywords': [
85
+ 'care', 'kind', 'empathy', 'understand', 'support',
86
+ 'help', 'ease', 'comfort', 'nurture', 'heal'
87
+ ],
88
+ 'description': 'Focus on caring for others and emotional support'
89
+ },
90
+ PurposeDimension.MASTERY: {
91
+ 'keywords': [
92
+ 'skill', 'master', 'excel', 'achieve', 'succeed',
93
+ 'expert', 'professional', 'best', 'top', 'win'
94
+ ],
95
+ 'description': 'Focus on achievement and skill development'
96
+ },
97
+ PurposeDimension.HARMONY: {
98
+ 'keywords': [
99
+ 'peace', 'balance', 'calm', 'serene', 'tranquil',
100
+ 'flow', 'zen', 'center', 'equilibrium', 'still'
101
+ ],
102
+ 'description': 'Focus on balance and inner peace'
103
+ }
104
+ }
105
+
106
+ def analyze_text(self, text: str) -> Dict[PurposeDimension, float]:
107
+ """
108
+ Analyze a single text for purpose indicators.
109
+
110
+ Returns:
111
+ Dictionary mapping purpose dimensions to their scores (0-1)
112
+ """
113
+ if not text or not isinstance(text, str):
114
+ return {dim: 0.0 for dim in PurposeDimension}
115
+
116
+ text_lower = text.lower()
117
+ words = text_lower.split()
118
+ total_words = max(1, len(words)) # Avoid division by zero
119
+
120
+ scores = {}
121
+
122
+ # Calculate keyword-based scores
123
+ for dim, config in self.dimensions.items():
124
+ # Count keyword matches
125
+ matches = sum(1 for word in config['keywords']
126
+ if word in text_lower)
127
+ # Normalize by text length and scale to [0,1]
128
+ keyword_score = min(1.0, (matches / total_words) * 10) # Scale factor 10 to make scores more meaningful
129
+
130
+ # Store weighted score
131
+ scores[dim] = keyword_score * self.keyword_weight
132
+
133
+ # Add semantic analysis (placeholder - could be enhanced with embeddings)
134
+ # For now, we'll just boost scores based on certain patterns
135
+ semantic_boost = self._analyze_semantic_patterns(text_lower)
136
+ for dim, boost in semantic_boost.items():
137
+ scores[dim] = min(1.0, scores.get(dim, 0) + (boost * self.semantic_weight))
138
+
139
+ return scores
140
+
141
+ def _analyze_semantic_patterns(self, text: str) -> Dict[PurposeDimension, float]:
142
+ """Analyze text for semantic patterns indicating purpose dimensions"""
143
+ boosts = {dim: 0.0 for dim in PurposeDimension}
144
+
145
+ # Example patterns (could be expanded with more sophisticated NLP)
146
+ if any(word in text for word in ['i feel', 'i think', 'i believe']):
147
+ boosts[PurposeDimension.SELF_EXPRESSION] += 0.3
148
+
149
+ if any(word in text for word in ['we should', 'let\'s', 'together we']):
150
+ boosts[PurposeDimension.COMMUNITY] += 0.4
151
+
152
+ if any(word in text for word in ['unfair', 'not right', 'should be']):
153
+ boosts[PurposeDimension.JUSTICE] += 0.5
154
+
155
+ if any(word in text for word in ['learn', 'grow', 'improve']):
156
+ boosts[PurposeDimension.GROWTH] += 0.4
157
+
158
+ return boosts
159
+
160
+ def assess_conversation(self, messages: List[Dict[str, str]]) -> PurposeProfile:
161
+ """
162
+ Analyze a conversation to determine the user's purpose profile.
163
+
164
+ Args:
165
+ messages: List of message dicts with 'role' and 'content' keys
166
+
167
+ Returns:
168
+ PurposeProfile containing assessment results
169
+ """
170
+ if not messages:
171
+ return self._default_profile()
172
+
173
+ # Only analyze user messages
174
+ user_messages = [
175
+ msg['content'] for msg in messages
176
+ if msg.get('role') == 'user' and msg.get('content')
177
+ ]
178
+
179
+ if not user_messages:
180
+ return self._default_profile()
181
+
182
+ # Combine all user messages for analysis
183
+ combined_text = ' '.join(user_messages)
184
+
185
+ # Get scores for the combined text
186
+ dimension_scores = self.analyze_text(combined_text)
187
+
188
+ # Find primary dimension
189
+ primary_dimension = max(
190
+ dimension_scores.items(),
191
+ key=lambda x: x[1]
192
+ )[0]
193
+
194
+ # Calculate confidence (normalized difference between top 2 dimensions)
195
+ sorted_scores = sorted(dimension_scores.values(), reverse=True)
196
+ confidence = (
197
+ (sorted_scores[0] - sorted_scores[1]) / max(0.1, sorted_scores[0])
198
+ if len(sorted_scores) > 1 and sorted_scores[0] > 0
199
+ else 0.5 # Default confidence if scores are tied or zero
200
+ )
201
+
202
+ return PurposeProfile(
203
+ primary_dimension=primary_dimension,
204
+ dimension_scores=dimension_scores,
205
+ confidence=min(1.0, max(0.0, confidence)) # Clamp to [0,1]
206
+ )
207
+
208
+ def _default_profile(self) -> PurposeProfile:
209
+ """Return a neutral/default purpose profile"""
210
+ return PurposeProfile(
211
+ primary_dimension=PurposeDimension.GROWTH,
212
+ dimension_scores={dim: 0.0 for dim in PurposeDimension},
213
+ confidence=0.0
214
+ )
components/purpose_realms.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Purpose-Driven Realms - 3D environments that propel users into real-world agency
3
+
4
+ Implements the realm experiences that connect digital therapy with meaningful action,
5
+ helping users build real-world skills and habits through immersive experiences.
6
+ """
7
+
8
+ from typing import Dict, Any, List, Optional
9
+ from datetime import datetime, timedelta
10
+ from .reality_bridge import RealityBridge
11
+
12
+ class PurposeDrivenRealm:
13
+ """3D environments designed to drive real-world action and transformation."""
14
+
15
+ def __init__(self):
16
+ self.realm_designs = self._create_purpose_realms()
17
+ self.action_bridge = RealityBridge()
18
+
19
+ def _create_purpose_realms(self) -> Dict[str, Dict]:
20
+ """Design realms with explicit real-world outcomes and purposes."""
21
+ return {
22
+ "agency_garden": {
23
+ "purpose": "Practice small acts of control and creation",
24
+ "real_world_outcome": "User transforms one small physical space",
25
+ "activities": [
26
+ "Virtual room organization practice",
27
+ "Digital boundary setting simulations",
28
+ "Mini-creation exercises (arrange digital flowers)"
29
+ ],
30
+ "exit_challenge": "The 5-Minute Room Rescue mission"
31
+ },
32
+
33
+ "community_hub": {
34
+ "purpose": "Connect with real local organizations",
35
+ "real_world_outcome": "User identifies one community group to explore",
36
+ "activities": [
37
+ "Virtual organization 'booths'",
38
+ "Community need mapping",
39
+ "Volunteer role-playing"
40
+ ],
41
+ "exit_challenge": "Local Goodness Scout mission"
42
+ },
43
+
44
+ "purpose_workshop": {
45
+ "purpose": "Discover and develop natural talents",
46
+ "real_world_outcome": "User identifies one skill to develop",
47
+ "activities": [
48
+ "Talent discovery exercises",
49
+ "Skill-building simulations",
50
+ "Purpose alignment games"
51
+ ],
52
+ "exit_challenge": "Skills-Inventory mission"
53
+ },
54
+
55
+ "boundary_dojo": {
56
+ "purpose": "Practice setting and maintaining healthy boundaries",
57
+ "real_world_outcome": "User drafts and sends one boundary message",
58
+ "activities": [
59
+ "Boundary role-playing scenarios",
60
+ "No-practice drills",
61
+ "Assertiveness training"
62
+ ],
63
+ "exit_challenge": "Boundary Text Draft mission"
64
+ },
65
+
66
+ "mirror_hall": {
67
+ "purpose": "Deep self-reflection and insight generation",
68
+ "real_world_outcome": "User gains clarity on personal values and strengths",
69
+ "activities": [
70
+ "Guided self-reflection prompts",
71
+ "Values clarification exercises",
72
+ "Strengths identification"
73
+ ],
74
+ "exit_challenge": "Skills-Inventory mission"
75
+ }
76
+ }
77
+
78
+ def get_realm_experience(self, realm_id: str, user_profile: Optional[Dict] = None) -> Dict[str, Any]:
79
+ """
80
+ Get a fully prepared realm experience for the user.
81
+
82
+ Args:
83
+ realm_id: The ID of the realm to enter
84
+ user_profile: Optional user profile for personalization
85
+
86
+ Returns:
87
+ Dictionary containing the complete realm experience
88
+ """
89
+ if realm_id not in self.realm_designs:
90
+ # Default to agency_garden if realm not found
91
+ realm_id = "agency_garden"
92
+
93
+ realm_data = self.realm_designs[realm_id]
94
+
95
+ # Personalize activities if user profile is available
96
+ activities = self._personalize_activities(realm_data["activities"], user_profile)
97
+
98
+ return {
99
+ "realm": realm_id,
100
+ "purpose": realm_data["purpose"],
101
+ "real_world_outcome": realm_data["real_world_outcome"],
102
+ "activities": activities,
103
+ "exit_preparation": self._prepare_exit_sequence(realm_id, user_profile or {}),
104
+ "dfw_framing": self._get_realm_framing(realm_id)
105
+ }
106
+
107
+ def _personalize_activities(self, activities: List[str], user_profile: Optional[Dict]) -> List[str]:
108
+ """Personalize activities based on user profile if available."""
109
+ # In a real implementation, this would use the user's history and preferences
110
+ # to customize the activities. For now, we'll just return the default activities.
111
+ return activities
112
+
113
+ def _prepare_exit_sequence(self, realm_id: str, user_profile: Dict) -> Dict[str, Any]:
114
+ """Prepare the transition back to reality with purpose."""
115
+ return {
116
+ "transition_phrase": "Bring this back to your world...",
117
+ "mission_briefing": self.action_bridge.generate_realm_exit_mission(
118
+ user_id=user_profile.get("user_id", "anonymous"),
119
+ realm_experience={"current_location": realm_id}
120
+ ),
121
+ "reentry_support": "Remember: The capital-T Truth is about life BEFORE death.",
122
+ "follow_up_timing": "24 hours for mission check-in"
123
+ }
124
+
125
+ def _get_realm_framing(self, realm_id: str) -> str:
126
+ """DFW wisdom to frame the realm experience."""
127
+ framings = {
128
+ "agency_garden": "This is practice for choosing what to care about in your actual life.",
129
+ "community_hub": "Learning that there are real people out there who need what only you can give.",
130
+ "purpose_workshop": "Discovering what you'd care about even if nobody were watching.",
131
+ "boundary_dojo": "The really important kind of freedom involves attention and discipline.",
132
+ "mirror_hall": "You get to consciously decide what has meaning and what doesn't."
133
+ }
134
+ return framings.get(realm_id, "This is water. This is water.")
135
+
136
+ def list_available_realms(self, user_profile: Optional[Dict] = None) -> List[Dict[str, Any]]:
137
+ """
138
+ Get a list of all available realms with their metadata.
139
+
140
+ Args:
141
+ user_profile: Optional user profile for personalization
142
+
143
+ Returns:
144
+ List of realm metadata dictionaries
145
+ """
146
+ realms = []
147
+ for realm_id, realm_data in self.realm_designs.items():
148
+ realms.append({
149
+ "id": realm_id,
150
+ "name": realm_id.replace("_", " ").title(),
151
+ "purpose": realm_data["purpose"],
152
+ "outcome": realm_data["real_world_outcome"],
153
+ "activity_count": len(realm_data["activities"])
154
+ })
155
+
156
+ return realms
157
+
158
+ def track_realm_completion(self, user_id: str, realm_id: str, completion_metrics: Dict) -> Dict[str, Any]:
159
+ """
160
+ Track completion of a realm experience.
161
+
162
+ Args:
163
+ user_id: The ID of the user
164
+ realm_id: The ID of the completed realm
165
+ completion_metrics: Dictionary of metrics about the completion
166
+
167
+ Returns:
168
+ Dictionary with results of the tracking operation
169
+ """
170
+ # In a full implementation, this would update user progress, unlock new realms, etc.
171
+ return {
172
+ "status": "success",
173
+ "realm_completed": realm_id,
174
+ "unlocked_realms": self._get_unlocked_realms(user_id, realm_id),
175
+ "next_steps": self._suggest_next_steps(user_id, realm_id)
176
+ }
177
+
178
+ def _get_unlocked_realms(self, user_id: str, completed_realm: str) -> List[str]:
179
+ """Determine which realms should be unlocked after completing a realm."""
180
+ # In a real implementation, this would check the user's progress and unlock
181
+ # new realms based on their journey. For now, we'll just return some defaults.
182
+ unlock_map = {
183
+ "agency_garden": ["boundary_dojo", "mirror_hall"],
184
+ "boundary_dojo": ["community_hub"],
185
+ "mirror_hall": ["purpose_workshop"],
186
+ "community_hub": [],
187
+ "purpose_workshop": []
188
+ }
189
+ return unlock_map.get(completed_realm, [])
190
+
191
+ def _suggest_next_steps(self, user_id: str, completed_realm: str) -> List[Dict[str, str]]:
192
+ """Suggest next steps for the user after completing a realm."""
193
+ suggestions = []
194
+
195
+ # Based on the completed realm, suggest relevant next steps
196
+ if completed_realm == "agency_garden":
197
+ suggestions.append({
198
+ "type": "realm",
199
+ "id": "boundary_dojo",
200
+ "reason": "Practice setting boundaries to protect your new safe space"
201
+ })
202
+ suggestions.append({
203
+ "type": "action",
204
+ "id": "room_rescue_5min",
205
+ "reason": "Try the 5-Minute Room Rescue in another area"
206
+ })
207
+
208
+ # Add a default suggestion if no specific ones match
209
+ if not suggestions:
210
+ suggestions.append({
211
+ "type": "realm",
212
+ "id": "agency_garden",
213
+ "reason": "Build your sense of control and agency"
214
+ })
215
+
216
+ return suggestions
components/reality_bridge.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reality Bridge - Connects digital experiences to real-world actions
3
+
4
+ Implements the bridge between TRuCAL's digital therapy and real-world agency,
5
+ inspired by David Foster Wallace's insights about choosing what to pay attention to.
6
+ """
7
+
8
+ from datetime import datetime, timedelta
9
+ from enum import Enum
10
+ from typing import Dict, List, Any, Optional
11
+ import json
12
+ import random
13
+
14
+ class ActionTier(Enum):
15
+ """Tiers of real-world actions based on time and impact."""
16
+ MICRO = "micro" # 5-minute immediate actions
17
+ DAILY = "daily" # Consistent habits
18
+ PROJECT = "project" # Multi-step initiatives
19
+ LEGACY = "legacy" # Life-changing commitments
20
+
21
+ class RealityBridge:
22
+ """
23
+ DFW-Inspired: "The capital-T Truth is about life BEFORE death."
24
+ Transforms realm experiences into real-world meaning and action.
25
+ """
26
+
27
+ def __init__(self):
28
+ self.action_catalysts = self._build_action_catalysts()
29
+ self.community_connectors = self._build_community_network()
30
+ self.progress_trackers = {}
31
+
32
+ def _build_action_catalysts(self) -> Dict[str, List[Dict]]:
33
+ """Real-world actions that create actual change."""
34
+ return {
35
+ "immediate_agency": [
36
+ {
37
+ "id": "room_rescue_5min",
38
+ "name": "The 5-Minute Room Rescue",
39
+ "description": "Transform one small physical space to prove change is possible",
40
+ "steps": ["Pick one surface", "Set 5-minute timer", "Make it beautiful"],
41
+ "dfw_wisdom": "The so-called 'trivial' stuff matters because it's the stuff.",
42
+ "completion_metric": "Before/after photo or description",
43
+ "realm_unlock": "Sanctuary Garden expansion"
44
+ },
45
+ {
46
+ "id": "boundary_text_draft",
47
+ "name": "Boundary Text Draft",
48
+ "description": "Write one text message setting a clear boundary",
49
+ "steps": ["Identify one boundary needed", "Draft clear message", "Save for sending"],
50
+ "dfw_wisdom": "The truth will set you free, but not until it is finished with you.",
51
+ "completion_metric": "Drafted message saved",
52
+ "realm_unlock": "Boundary Dojo practice partner"
53
+ }
54
+ ],
55
+
56
+ "community_connection": [
57
+ {
58
+ "id": "local_goodness_scout",
59
+ "name": "Local Goodness Scout",
60
+ "description": "Find one local organization doing work you admire",
61
+ "steps": ["Research local nonprofits", "Identify one that resonates", "Save contact info"],
62
+ "dfw_wisdom": "You become what you pay attention to.",
63
+ "completion_metric": "Organization info saved",
64
+ "realm_unlock": "Community Hub portal"
65
+ }
66
+ ],
67
+
68
+ "purpose_exploration": [
69
+ {
70
+ "id": "skills_inventory",
71
+ "name": "Skills-Inventory & Gift-Mapping",
72
+ "description": "Catalog what you're actually good at and enjoy doing",
73
+ "steps": ["List 5 natural talents", "List 5 things you enjoy", "Find intersections"],
74
+ "dfw_wisdom": "The only thing that's capital-T True is that you get to decide how you're going to try to see it.",
75
+ "completion_metric": "Talent/joy intersection identified",
76
+ "realm_unlock": "Purpose Compass tool"
77
+ }
78
+ ]
79
+ }
80
+
81
+ def _build_community_network(self) -> Dict[str, List]:
82
+ """Real organizations doing real work that users can connect with."""
83
+ return {
84
+ "local_volunteer": [
85
+ {
86
+ "name": "Mutual Aid Networks",
87
+ "description": "Hyper-local community support",
88
+ "action": "Find your local mutual aid group",
89
+ "search_terms": "mutual aid [your city]",
90
+ "impact": "Direct neighbor-to-neighbor support"
91
+ },
92
+ {
93
+ "name": "Community Gardens",
94
+ "description": "Grow food and community",
95
+ "action": "Visit local community garden",
96
+ "search_terms": "community garden volunteer [your city]",
97
+ "impact": "Food sovereignty and connection"
98
+ }
99
+ ],
100
+ "skill_building": [
101
+ {
102
+ "name": "Library Skill Shares",
103
+ "description": "Free community skill exchanges",
104
+ "action": "Check local library events",
105
+ "search_terms": "library workshop [your city]",
106
+ "impact": "Learn practical skills for free"
107
+ }
108
+ ]
109
+ }
110
+
111
+ def generate_realm_exit_mission(self, user_id: str, realm_experience: Dict) -> Dict[str, Any]:
112
+ """Create a purposeful mission when user leaves the realm."""
113
+ realm_to_mission_map = {
114
+ "sanctuary_garden": {
115
+ "mission_type": "personal_sanctuary",
116
+ "prompt": "Based on what made you feel safe in the Sanctuary Garden, create one small safe space in your real environment right now.",
117
+ "actions": ["room_rescue_5min", "boundary_text_draft"]
118
+ },
119
+ "boundary_dojo": {
120
+ "mission_type": "boundary_practice",
121
+ "prompt": "You practiced saying 'no' in the dojo. Now draft one real boundary you'll set this week.",
122
+ "actions": ["boundary_text_draft"]
123
+ },
124
+ "mirror_hall": {
125
+ "mission_type": "self_understanding",
126
+ "prompt": "The mirrors showed you important truths. Write down one insight about yourself you want to remember.",
127
+ "actions": ["skills_inventory"]
128
+ }
129
+ }
130
+
131
+ current_realm = realm_experience.get('current_location', 'sanctuary_garden')
132
+ mission_template = realm_to_mission_map.get(current_realm, realm_to_mission_map["sanctuary_garden"])
133
+
134
+ # Get full action details
135
+ action_details = []
136
+ for action_id in mission_template["actions"]:
137
+ for category in self.action_catalysts.values():
138
+ for action in category:
139
+ if action["id"] == action_id:
140
+ action_details.append(action)
141
+ break
142
+
143
+ return {
144
+ "realm_experience": current_realm,
145
+ "exit_prompt": mission_template["prompt"],
146
+ "suggested_actions": action_details,
147
+ "dfw_reminder": self._get_dfw_reminder(current_realm),
148
+ "follow_up_time": (datetime.now() + timedelta(hours=24)).isoformat(),
149
+ "completion_reward": f"Unlocks new area in {current_realm} on return"
150
+ }
151
+
152
+ def _get_dfw_reminder(self, realm: str) -> str:
153
+ """DFW wisdom for the transition back to reality."""
154
+ reminders = {
155
+ "sanctuary_garden": "The really important kind of freedom involves attention and awareness and discipline.",
156
+ "boundary_dojo": "The truth will set you free, but not until it is finished with you.",
157
+ "mirror_hall": "You get to consciously decide what has meaning and what doesn't."
158
+ }
159
+ return reminders.get(realm, "This is water. This is water.")
160
+
161
+ def track_real_world_action(self, user_id: str, action_id: str, evidence: str = "") -> Dict[str, Any]:
162
+ """
163
+ Track when users take real-world actions.
164
+
165
+ Args:
166
+ user_id: Unique identifier for the user
167
+ action_id: ID of the action being tracked
168
+ evidence: Optional text or reference proving completion
169
+
170
+ Returns:
171
+ Dictionary with results of the tracking operation
172
+ """
173
+ if user_id not in self.progress_trackers:
174
+ self.progress_trackers[user_id] = {
175
+ 'actions_completed': [],
176
+ 'realm_progress': {},
177
+ 'last_action': None,
178
+ 'momentum_score': 0
179
+ }
180
+
181
+ # Find the action details
182
+ action_details = None
183
+ for category in self.action_catalysts.values():
184
+ for action in category:
185
+ if action["id"] == action_id:
186
+ action_details = action
187
+ break
188
+ if action_details:
189
+ break
190
+
191
+ if not action_details:
192
+ return {
193
+ 'status': 'error',
194
+ 'message': f'Action ID {action_id} not found',
195
+ 'momentum_increase': 0,
196
+ 'realm_unlocks': []
197
+ }
198
+
199
+ action_record = {
200
+ 'action': action_details,
201
+ 'timestamp': datetime.now().isoformat(),
202
+ 'evidence': evidence,
203
+ 'realm_impact': self._calculate_realm_impact(action_id)
204
+ }
205
+
206
+ self.progress_trackers[user_id]['actions_completed'].append(action_record)
207
+ self.progress_trackers[user_id]['last_action'] = datetime.now().isoformat()
208
+ self.progress_trackers[user_id]['momentum_score'] += 10
209
+
210
+ return {
211
+ 'status': 'action_recorded',
212
+ 'momentum_increase': 10,
213
+ 'realm_unlocks': action_record['realm_impact']['unlocks'],
214
+ 'encouragement': self._generate_encouragement(action_details['name']),
215
+ 'momentum_score': self.progress_trackers[user_id]['momentum_score']
216
+ }
217
+
218
+ def _calculate_realm_impact(self, action_id: str) -> Dict[str, Any]:
219
+ """Determine how real-world actions affect the 3D realm."""
220
+ # Simple mapping for now - could be much more sophisticated
221
+ if "boundary" in action_id:
222
+ return {"unlocks": ["advanced_boundary_training", "assertiveness_coach"]}
223
+ elif "rescue" in action_id or "sanctuary" in action_id:
224
+ return {"unlocks": ["expanded_garden", "peaceful_retreat"]}
225
+ elif "community" in action_id or "local" in action_id:
226
+ return {"unlocks": ["community_hub", "group_meditation"]}
227
+ elif "skill" in action_id or "inventory" in action_id:
228
+ return {"unlocks": ["purpose_compass", "talent_showcase"]}
229
+ else:
230
+ return {"unlocks": ["general_progress"]}
231
+
232
+ def _generate_encouragement(self, action_name: str) -> str:
233
+ """DFW-style encouragement for taking action."""
234
+ encouragements = [
235
+ "You're doing the hard, boring, glorious work of being actually alive.",
236
+ "This is what freedom looks like in practice.",
237
+ "You're choosing what to pay attention to. That's everything.",
238
+ "The water is starting to notice you're swimming.",
239
+ "This is how you build a life that matters.",
240
+ "Small things become big things. The seed knows that.",
241
+ "You're not just thinking about change, you're embodying it.",
242
+ "This is how we learn to be free in an unfree world.",
243
+ "The most obvious realities are often the ones hardest to see.",
244
+ "You're learning how to really be here."
245
+ ]
246
+ return random.choice(encouragements)
247
+
248
+ def get_user_progress(self, user_id: str) -> Dict[str, Any]:
249
+ """Get the current progress for a user."""
250
+ if user_id not in self.progress_trackers:
251
+ return {
252
+ 'status': 'user_not_found',
253
+ 'actions_completed': [],
254
+ 'momentum_score': 0,
255
+ 'realm_progress': {}
256
+ }
257
+
258
+ return {
259
+ 'status': 'success',
260
+ 'actions_completed': self.progress_trackers[user_id]['actions_completed'],
261
+ 'momentum_score': self.progress_trackers[user_id]['momentum_score'],
262
+ 'realm_progress': self.progress_trackers[user_id].get('realm_progress', {}),
263
+ 'last_action': self.progress_trackers[user_id].get('last_action')
264
+ }
components/realms.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Realm Management System for TRuCAL
3
+
4
+ Manages different purpose-based realms and their experiences.
5
+ """
6
+ from typing import Dict, List, Optional, Any, Set
7
+ from enum import Enum, auto
8
+ import json
9
+ import os
10
+ from dataclasses import dataclass, field
11
+ import numpy as np
12
+ from datetime import datetime, timedelta
13
+
14
+ from .purpose_assessment import PurposeDimension, PurposeProfile
15
+
16
+ class RealmUnlockType(str, Enum):
17
+ """Types of realm unlocks"""
18
+ PURPOSE_ALIGNMENT = "purpose_alignment"
19
+ ACTION_COMPLETION = "action_completion"
20
+ TIME_BASED = "time_based"
21
+ MANUAL = "manual"
22
+
23
+ @dataclass
24
+ class RealmUnlockCondition:
25
+ """Conditions for unlocking a realm"""
26
+ unlock_type: RealmUnlockType
27
+ threshold: float # Score or count needed
28
+ purpose_dimension: Optional[PurposeDimension] = None
29
+ action_id: Optional[str] = None
30
+ time_delta: Optional[timedelta] = None
31
+
32
+ @dataclass
33
+ class RealmAction:
34
+ """Actions that can be taken within a realm"""
35
+ id: str
36
+ name: str
37
+ description: str
38
+ difficulty: int # 1-5
39
+ purpose_dimensions: List[PurposeDimension]
40
+ xp_reward: int
41
+ cooldown: timedelta = field(default_factory=lambda: timedelta(hours=1))
42
+ required_items: List[str] = field(default_factory=list)
43
+ unlocks: List[str] = field(default_factory=list) # IDs of realms or features this unlocks
44
+
45
+ @dataclass
46
+ class RealmState:
47
+ """State of a realm for a specific user"""
48
+ unlocked: bool = False
49
+ unlocked_at: Optional[datetime] = None
50
+ xp: int = 0
51
+ level: int = 1
52
+ last_visited: Optional[datetime] = None
53
+ completed_actions: Set[str] = field(default_factory=set) # IDs of completed actions
54
+
55
+ class PurposeDrivenRealm:
56
+ """Manages purpose-based realms and user progression"""
57
+
58
+ def __init__(self, config_path: Optional[str] = None):
59
+ self.realms: Dict[str, Dict[str, Any]] = {}
60
+ self.actions: Dict[str, RealmAction] = {}
61
+ self.user_states: Dict[str, Dict[str, RealmState]] = {} # user_id -> realm_id -> RealmState
62
+ self._load_default_realms()
63
+
64
+ if config_path and os.path.exists(config_path):
65
+ self.load_config(config_path)
66
+
67
+ def _load_default_realms(self):
68
+ """Load default realm configurations"""
69
+ self.realms = {
70
+ 'agency_garden': {
71
+ 'name': 'Agency Garden',
72
+ 'description': 'Nurture your personal growth and self-discovery',
73
+ 'color': [0.2, 0.7, 0.3], # RGB 0-1
74
+ 'particle_effect': 'gentle_flow',
75
+ 'ambient_sound': 'garden_ambience',
76
+ 'unlock_conditions': [
77
+ RealmUnlockCondition(
78
+ unlock_type=RealmUnlockType.PURPOSE_ALIGNMENT,
79
+ purpose_dimension=PurposeDimension.GROWTH,
80
+ threshold=0.3
81
+ )
82
+ ],
83
+ 'default_unlocked': True
84
+ },
85
+ 'community_hub': {
86
+ 'name': 'Community Hub',
87
+ 'description': 'Connect with others and build meaningful relationships',
88
+ 'color': [0.2, 0.5, 0.8],
89
+ 'particle_effect': 'connecting_dots',
90
+ 'ambient_sound': 'crowd_murmur',
91
+ 'unlock_conditions': [
92
+ RealmUnlockCondition(
93
+ unlock_type=RealmUnlockType.PURPOSE_ALIGNMENT,
94
+ purpose_dimension=PurposeDimension.COMMUNITY,
95
+ threshold=0.4
96
+ ),
97
+ RealmUnlockCondition(
98
+ unlock_type=RealmUnlockType.ACTION_COMPLETION,
99
+ action_id='intro_community',
100
+ threshold=1
101
+ )
102
+ ]
103
+ },
104
+ 'purpose_workshop': {
105
+ 'name': 'Purpose Workshop',
106
+ 'description': 'Refine and develop your sense of purpose',
107
+ 'color': [1.0, 0.6, 0.2],
108
+ 'particle_effect': 'spark_burst',
109
+ 'ambient_sound': 'workshop_ambience',
110
+ 'unlock_conditions': [
111
+ RealmUnlockCondition(
112
+ unlock_type=RealmUnlockType.PURPOSE_ALIGNMENT,
113
+ purpose_dimension=PurposeDimension.JUSTICE,
114
+ threshold=0.5
115
+ ),
116
+ RealmUnlockCondition(
117
+ unlock_type=RealmUnlockType.TIME_BASED,
118
+ time_delta=timedelta(days=3), # Available after 3 days
119
+ threshold=1
120
+ )
121
+ ]
122
+ },
123
+ 'sanctuary': {
124
+ 'name': 'Sanctuary',
125
+ 'description': 'A peaceful space for reflection and healing',
126
+ 'color': [0.7, 0.3, 0.7],
127
+ 'particle_effect': 'gentle_glow',
128
+ 'ambient_sound': 'peaceful_waves',
129
+ 'unlock_conditions': [
130
+ RealmUnlockCondition(
131
+ unlock_type=RealmUnlockType.PURPOSE_ALIGNMENT,
132
+ purpose_dimension=PurposeDimension.HARMONY,
133
+ threshold=0.4
134
+ )
135
+ ]
136
+ },
137
+ 'advocacy_hall': {
138
+ 'name': 'Advocacy Hall',
139
+ 'description': 'Stand up for what matters and drive change',
140
+ 'color': [0.8, 0.2, 0.2],
141
+ 'particle_effect': 'pulsing_energy',
142
+ 'ambient_sound': 'distant_crowd',
143
+ 'unlock_conditions': [
144
+ RealmUnlockCondition(
145
+ unlock_type=RealmUnlockType.PURPOSE_ALIGNMENT,
146
+ purpose_dimension=PurposeDimension.JUSTICE,
147
+ threshold=0.7
148
+ ),
149
+ RealmUnlockCondition(
150
+ unlock_type=RealmUnlockType.ACTION_COMPLETION,
151
+ action_id='first_advocacy',
152
+ threshold=1
153
+ )
154
+ ]
155
+ }
156
+ }
157
+
158
+ # Define some default actions
159
+ self.actions = {
160
+ 'intro_community': RealmAction(
161
+ id='intro_community',
162
+ name='Introduce Yourself',
163
+ description='Share something about yourself with the community',
164
+ difficulty=1,
165
+ purpose_dimensions=[PurposeDimension.COMMUNITY, PurposeDimension.SELF_EXPRESSION],
166
+ xp_reward=50,
167
+ unlocks=['community_hub']
168
+ ),
169
+ 'first_advocacy': RealmAction(
170
+ id='first_advocacy',
171
+ name='First Advocacy Step',
172
+ description='Take your first step in advocating for a cause you care about',
173
+ difficulty=3,
174
+ purpose_dimensions=[PurposeDimension.JUSTICE, PurposeDimension.COMMUNITY],
175
+ xp_reward=100,
176
+ unlocks=['advocacy_hall']
177
+ ),
178
+ 'daily_reflection': RealmAction(
179
+ id='daily_reflection',
180
+ name='Daily Reflection',
181
+ description='Spend a few moments reflecting on your day',
182
+ difficulty=1,
183
+ purpose_dimensions=[PurposeDimension.GROWTH, PurposeDimension.SELF_EXPRESSION],
184
+ xp_reward=20,
185
+ cooldown=timedelta(hours=24)
186
+ )
187
+ }
188
+
189
+ def load_config(self, config_path: str) -> None:
190
+ """Load realm configuration from a JSON file"""
191
+ try:
192
+ with open(config_path, 'r') as f:
193
+ config = json.load(f)
194
+ self.realms.update(config.get('realms', {}))
195
+
196
+ # Convert action dicts to RealmAction objects
197
+ for action_id, action_data in config.get('actions', {}).items():
198
+ if 'purpose_dimensions' in action_data:
199
+ action_data['purpose_dimensions'] = [
200
+ PurposeDimension(dim) for dim in action_data['purpose_dimensions']
201
+ ]
202
+ if 'cooldown' in action_data:
203
+ action_data['cooldown'] = timedelta(seconds=action_data['cooldown'])
204
+ self.actions[action_id] = RealmAction(id=action_id, **action_data)
205
+
206
+ except Exception as e:
207
+ print(f"Error loading realm config: {e}")
208
+
209
+ def get_realm(self, realm_id: str) -> Optional[Dict[str, Any]]:
210
+ """Get realm configuration by ID"""
211
+ return self.realms.get(realm_id)
212
+
213
+ def get_user_realm_state(self, user_id: str, realm_id: str) -> RealmState:
214
+ """Get or create a user's state for a realm"""
215
+ if user_id not in self.user_states:
216
+ self.user_states[user_id] = {}
217
+
218
+ if realm_id not in self.user_states[user_id]:
219
+ realm = self.get_realm(realm_id)
220
+ self.user_states[user_id][realm_id] = RealmState(
221
+ unlocked=realm.get('default_unlocked', False) if realm else False
222
+ )
223
+
224
+ return self.user_states[user_id][realm_id]
225
+
226
+ def check_realm_unlock(
227
+ self,
228
+ user_id: str,
229
+ realm_id: str,
230
+ purpose_profile: Optional[PurposeProfile] = None,
231
+ completed_actions: Optional[List[str]] = None
232
+ ) -> bool:
233
+ """Check if a user can unlock a realm"""
234
+ realm = self.get_realm(realm_id)
235
+ if not realm:
236
+ return False
237
+
238
+ state = self.get_user_realm_state(user_id, realm_id)
239
+ if state.unlocked:
240
+ return True
241
+
242
+ # Check all unlock conditions
243
+ for condition in realm.get('unlock_conditions', []):
244
+ if condition.unlock_type == RealmUnlockType.PURPOSE_ALIGNMENT:
245
+ if not purpose_profile:
246
+ continue
247
+ dim_score = purpose_profile.dimension_scores.get(condition.purpose_dimension, 0)
248
+ if dim_score < condition.threshold:
249
+ return False
250
+
251
+ elif condition.unlock_type == RealmUnlockType.ACTION_COMPLETION:
252
+ if not completed_actions or condition.action_id not in completed_actions:
253
+ return False
254
+
255
+ elif condition.unlock_type == RealmUnlockType.TIME_BASED:
256
+ # Check if enough time has passed since account creation
257
+ # This would need to be implemented based on your user system
258
+ pass
259
+
260
+ # If we get here, all conditions are met
261
+ state.unlocked = True
262
+ state.unlocked_at = datetime.now()
263
+ return True
264
+
265
+ def get_available_realms(
266
+ self,
267
+ user_id: str,
268
+ purpose_profile: Optional[PurposeProfile] = None,
269
+ completed_actions: Optional[List[str]] = None
270
+ ) -> List[Dict[str, Any]]:
271
+ """Get all realms available to a user"""
272
+ available = []
273
+
274
+ for realm_id, realm in self.realms.items():
275
+ state = self.get_user_realm_state(user_id, realm_id)
276
+
277
+ # If already unlocked, include it
278
+ if state.unlocked:
279
+ available.append({
280
+ 'id': realm_id,
281
+ **realm,
282
+ 'state': {
283
+ 'unlocked': True,
284
+ 'unlocked_at': state.unlocked_at,
285
+ 'xp': state.xp,
286
+ 'level': state.level
287
+ }
288
+ })
289
+ # Otherwise check if it can be unlocked
290
+ elif self.check_realm_unlock(user_id, realm_id, purpose_profile, completed_actions):
291
+ state = self.get_user_realm_state(user_id, realm_id)
292
+ available.append({
293
+ 'id': realm_id,
294
+ **realm,
295
+ 'state': {
296
+ 'unlocked': True,
297
+ 'unlocked_at': state.unlocked_at,
298
+ 'xp': state.xp,
299
+ 'level': state.level,
300
+ 'newly_unlocked': True
301
+ }
302
+ })
303
+
304
+ return available
305
+
306
+ def record_action_completion(
307
+ self,
308
+ user_id: str,
309
+ action_id: str,
310
+ realm_id: Optional[str] = None
311
+ ) -> Optional[Dict[str, Any]]:
312
+ """Record that a user has completed an action"""
313
+ action = self.actions.get(action_id)
314
+ if not action:
315
+ return None
316
+
317
+ # Get or create user state for this action's realm
318
+ target_realm_id = realm_id or action_id.split('_')[0] # Default to prefix match
319
+ state = self.get_user_realm_state(user_id, target_realm_id)
320
+
321
+ # Check cooldown
322
+ now = datetime.now()
323
+ if hasattr(state, 'last_action_time') and state.last_action_time:
324
+ time_since_last = now - state.last_action_time
325
+ if time_since_last < action.cooldown:
326
+ return {
327
+ 'success': False,
328
+ 'error': 'cooldown',
329
+ 'cooldown_remaining': (action.cooldown - time_since_last).total_seconds()
330
+ }
331
+
332
+ # Record completion
333
+ state.completed_actions.add(action_id)
334
+ state.xp += action.xp_reward
335
+ state.last_action_time = now
336
+
337
+ # Check for level up (simple 1000 XP per level)
338
+ new_level = (state.xp // 1000) + 1
339
+ leveled_up = new_level > state.level
340
+ if leveled_up:
341
+ state.level = new_level
342
+
343
+ # Check for any unlocks from this action
344
+ unlocked_realms = []
345
+ if action.unlocks:
346
+ for realm_id in action.unlocks:
347
+ if self.check_realm_unlock(
348
+ user_id,
349
+ realm_id,
350
+ completed_actions=list(state.completed_actions)
351
+ ):
352
+ unlocked_realms.append(realm_id)
353
+
354
+ return {
355
+ 'success': True,
356
+ 'xp_earned': action.xp_reward,
357
+ 'new_level': state.level if leveled_up else None,
358
+ 'unlocked_realms': unlocked_realms
359
+ }
components/recursive_learner.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TRuCAL Oracle Module
3
+
4
+ A generalized learning system that stores and retrieves knowledge from a YAML casebase.
5
+ Uses semantic similarity and keyword matching to provide relevant responses.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ import yaml
11
+ import numpy as np
12
+ from typing import Dict, List, Optional, Tuple, Any
13
+ from difflib import SequenceMatcher
14
+ from sentence_transformers import SentenceTransformer
15
+ from collections import Counter, defaultdict
16
+
17
+ class Case:
18
+ """Represents a single case in the knowledge base."""
19
+
20
+ def __init__(self,
21
+ question: str,
22
+ response: str,
23
+ category: str = "general",
24
+ keywords: List[str] = None,
25
+ metadata: Optional[Dict] = None):
26
+ self.question = question
27
+ self.response = response
28
+ self.category = category
29
+ self.keywords = keywords or []
30
+ self.metadata = metadata or {}
31
+ self.usage_count = 0
32
+ self.success_count = 0
33
+
34
+ def to_dict(self) -> Dict:
35
+ """Convert case to dictionary for serialization."""
36
+ return {
37
+ 'question': self.question,
38
+ 'response': self.response,
39
+ 'category': self.category,
40
+ 'keywords': self.keywords,
41
+ 'metadata': self.metadata,
42
+ 'usage_count': self.usage_count,
43
+ 'success_count': self.success_count
44
+ }
45
+
46
+ @classmethod
47
+ def from_dict(cls, data: Dict) -> 'Case':
48
+ """Create a Case from a dictionary."""
49
+ case = cls(
50
+ question=data['question'],
51
+ response=data['response'],
52
+ category=data.get('category', 'general'),
53
+ keywords=data.get('keywords', []),
54
+ metadata=data.get('metadata', {})
55
+ )
56
+ case.usage_count = data.get('usage_count', 0)
57
+ case.success_count = data.get('success_count', 0)
58
+ return case
59
+
60
+
61
+ class TRuCALOracle:
62
+ """
63
+ TRuCAL's knowledge core - a learning system that grows with each interaction.
64
+
65
+ Features:
66
+ - Semantic and keyword-based matching
67
+ - Category-based organization
68
+ - Continuous learning from user feedback
69
+ - YAML-based case storage
70
+ - Performance metrics and analytics
71
+ """
72
+
73
+ def __init__(self,
74
+ casebase_path: str = 'data/casebase.json',
75
+ yaml_path: str = 'data/trm_cases.yaml',
76
+ similarity_threshold: float = 0.45,
77
+ model_name: str = 'all-MiniLM-L6-v2'):
78
+ """
79
+ Initialize the TRuCAL oracle.
80
+
81
+ Args:
82
+ casebase_path: Path to save/load the casebase
83
+ yaml_path: Path to the YAML case file
84
+ similarity_threshold: Minimum similarity score (0-1) to consider a match
85
+ model_name: Name of the sentence transformer model for embeddings
86
+ """
87
+ self.casebase_path = casebase_path
88
+ self.yaml_path = yaml_path
89
+ self.similarity_threshold = similarity_threshold
90
+ self.casebase: List[Case] = []
91
+ self.model = SentenceTransformer(model_name)
92
+ self.embeddings = None
93
+ self.category_index = defaultdict(list)
94
+
95
+ # Ensure data directory exists
96
+ os.makedirs(os.path.dirname(casebase_path), exist_ok=True)
97
+
98
+ # Load or initialize casebase
99
+ self._load_or_initialize()
100
+
101
+ def _load_or_initialize(self):
102
+ """Load existing casebase or initialize with YAML cases."""
103
+ if os.path.exists(self.casebase_path):
104
+ self._load_casebase()
105
+ else:
106
+ self._load_from_yaml()
107
+ self._build_indices()
108
+
109
+ def _load_from_yaml(self):
110
+ """Load cases from the YAML file."""
111
+ try:
112
+ yaml_path = os.path.join(
113
+ os.path.dirname(os.path.dirname(__file__)),
114
+ self.yaml_path
115
+ )
116
+
117
+ if os.path.exists(yaml_path):
118
+ with open(yaml_path, 'r', encoding='utf-8') as f:
119
+ cases = yaml.safe_load(f) or []
120
+
121
+ for case_data in cases:
122
+ self.add_case(
123
+ question=case_data['question'],
124
+ response=case_data['response'],
125
+ category=case_data.get('category', 'general'),
126
+ keywords=case_data.get('keywords', []),
127
+ metadata={'source': 'yaml_import'}
128
+ )
129
+
130
+ print(f"Loaded {len(cases)} cases from {yaml_path}")
131
+ self._save_casebase()
132
+ return True
133
+
134
+ except Exception as e:
135
+ print(f"Error loading from YAML: {e}")
136
+
137
+ # If we get here, YAML loading failed
138
+ print("Falling back to minimal default cases")
139
+ self._add_default_cases()
140
+ return False
141
+
142
+ def _add_default_cases(self):
143
+ """Add minimal default cases if no others are available."""
144
+ default_cases = [
145
+ {
146
+ 'question': 'How does TRuCAL work?',
147
+ 'response': 'I learn from interactions and stored knowledge to provide thoughtful responses. The more we talk, the better I become!',
148
+ 'category': 'meta',
149
+ 'keywords': ['help', 'how', 'trucal']
150
+ },
151
+ {
152
+ 'question': 'Can you help me with an ethical dilemma?',
153
+ 'response': 'I can help you think through ethical questions by considering different perspectives. What would you like to discuss?',
154
+ 'category': 'ethics',
155
+ 'keywords': ['help', 'ethics', 'dilemma']
156
+ }
157
+ ]
158
+
159
+ for case_data in default_cases:
160
+ self.add_case(**case_data)
161
+
162
+ def _build_indices(self):
163
+ """Build search indices for faster lookups."""
164
+ self.category_index = defaultdict(list)
165
+ for idx, case in enumerate(self.casebase):
166
+ self.category_index[case.category].append(idx)
167
+
168
+ def _load_casebase(self):
169
+ """Load the casebase from disk."""
170
+ try:
171
+ with open(self.casebase_path, 'r', encoding='utf-8') as f:
172
+ case_data = json.load(f)
173
+ self.casebase = [Case.from_dict(case_dict) for case_dict in case_data]
174
+ self._update_embeddings()
175
+ print(f"Loaded {len(self.casebase)} cases from {self.casebase_path}")
176
+ except Exception as e:
177
+ print(f"Error loading casebase: {e}")
178
+ self.casebase = []
179
+
180
+ def _update_embeddings(self):
181
+ """Update embeddings for semantic search."""
182
+ if not self.casebase:
183
+ self.embeddings = None
184
+ return
185
+
186
+ questions = [case.question for case in self.casebase]
187
+ self.embeddings = self.model.encode(questions, convert_to_tensor=True)
188
+
189
+ def _save_casebase(self):
190
+ """Save the casebase to disk."""
191
+ try:
192
+ with open(self.casebase_path, 'w', encoding='utf-8') as f:
193
+ case_data = [case.to_dict() for case in self.casebase]
194
+ json.dump(case_data, f, indent=2, ensure_ascii=False)
195
+ except Exception as e:
196
+ print(f"Error saving casebase: {e}")
197
+
198
+ def add_case(self, question: str, response: str, category: str = "general",
199
+ keywords: List[str] = None, metadata: Optional[Dict] = None) -> Case:
200
+ """
201
+ Add a new case to the knowledge base.
202
+
203
+ Args:
204
+ question: The question or prompt
205
+ response: The response or answer
206
+ category: Category for organization
207
+ keywords: List of relevant keywords
208
+ metadata: Additional metadata
209
+
210
+ Returns:
211
+ The created Case object
212
+ """
213
+ # Check for duplicates
214
+ existing_idx, _ = self._find_most_similar(question)
215
+ if existing_idx is not None:
216
+ # Update existing case if it's very similar
217
+ existing_case = self.casebase[existing_idx]
218
+ existing_case.response = response
219
+ existing_case.category = category
220
+ existing_case.keywords = list(set(existing_case.keywords + (keywords or [])))
221
+ if metadata:
222
+ existing_case.metadata.update(metadata)
223
+ self._save_casebase()
224
+ return existing_case
225
+
226
+ # Create new case
227
+ new_case = Case(
228
+ question=question,
229
+ response=response,
230
+ category=category,
231
+ keywords=keywords or [],
232
+ metadata=metadata or {}
233
+ )
234
+ self.casebase.append(new_case)
235
+ self._update_embeddings()
236
+ self._save_casebase()
237
+ return new_case
238
+
239
+ def _find_most_similar(self, query: str, category: str = None) -> Tuple[Optional[int], float]:
240
+ """
241
+ Find the most similar case to the query.
242
+
243
+ Args:
244
+ query: The query string
245
+ category: Optional category to filter by
246
+
247
+ Returns:
248
+ Tuple of (index, similarity_score) of the most similar case
249
+ """
250
+ if not self.casebase:
251
+ return None, 0.0
252
+
253
+ # Get candidate indices based on category filter
254
+ candidate_indices = range(len(self.casebase))
255
+ if category and category in self.category_index:
256
+ candidate_indices = self.category_index[category]
257
+
258
+ # If no candidates, return None
259
+ if not candidate_indices:
260
+ return None, 0.0
261
+
262
+ # Try semantic similarity first if embeddings are available
263
+ if self.embeddings is not None:
264
+ query_embedding = self.model.encode(query, convert_to_tensor=True)
265
+ similarities = torch.nn.functional.cosine_similarity(
266
+ query_embedding.unsqueeze(0),
267
+ self.embeddings[list(candidate_indices)]
268
+ )
269
+ max_sim, max_idx = torch.max(similarities, dim=0)
270
+ max_sim = max_sim.item()
271
+ max_global_idx = candidate_indices[max_idx.item()]
272
+
273
+ if max_sim >= self.similarity_threshold:
274
+ return max_global_idx, max_sim
275
+
276
+ # Fall back to basic string similarity
277
+ max_sim = 0.0
278
+ max_idx = None
279
+
280
+ for idx in candidate_indices:
281
+ case = self.casebase[idx]
282
+ # Check keyword matches first (faster than string comparison)
283
+ keyword_match = any(kw in query.lower() for kw in case.keywords)
284
+ if keyword_match:
285
+ return idx, 0.8 # High confidence for keyword matches
286
+
287
+ # If no keyword match, use string similarity
288
+ similarity = SequenceMatcher(None, query.lower(), case.question.lower()).ratio()
289
+ if similarity > max_sim:
290
+ max_sim = similarity
291
+ max_idx = idx
292
+
293
+ return (max_idx, max_sim) if max_sim >= self.similarity_threshold else (None, 0.0)
294
+
295
+ def get_response(self, query: str, category: str = None) -> Tuple[str, Dict]:
296
+ """
297
+ Get a response for the given query.
298
+
299
+ Args:
300
+ query: The user's question or prompt
301
+ category: Optional category to filter by
302
+
303
+ Returns:
304
+ Tuple of (response, metadata) where metadata contains info about the match
305
+ """
306
+ if not self.casebase:
307
+ return "I'm still learning. Could you be the first to teach me something new?", {}
308
+
309
+ idx, similarity = self._find_most_similar(query, category)
310
+
311
+ if idx is not None:
312
+ case = self.casebase[idx]
313
+ case.usage_count += 1
314
+ self._save_casebase()
315
+
316
+ metadata = {
317
+ 'match_type': 'semantic' if similarity >= 0.7 else 'keyword',
318
+ 'similarity': float(similarity),
319
+ 'case_id': id(case),
320
+ 'category': case.category,
321
+ 'keywords': case.keywords,
322
+ 'usage_count': case.usage_count,
323
+ 'success_rate': case.success_count / case.usage_count if case.usage_count > 0 else 0
324
+ }
325
+
326
+ return case.response, metadata
327
+
328
+ # No good match found
329
+ return (
330
+ "That's an interesting question. I'm still learning and don't have a perfect answer yet. "
331
+ "Could you share your thoughts or rephrase your question?",
332
+ {'match_type': 'none', 'similarity': 0.0}
333
+ )
334
+
335
+ def provide_feedback(self, case_id: int, was_helpful: bool = True):
336
+ """
337
+ Provide feedback on a case's helpfulness.
338
+
339
+ Args:
340
+ case_id: The ID of the case
341
+ was_helpful: Whether the response was helpful
342
+ """
343
+ for case in self.casebase:
344
+ if id(case) == case_id:
345
+ if was_helpful:
346
+ case.success_count += 1
347
+ self._save_casebase()
348
+ break
349
+
350
+ def get_stats(self) -> Dict[str, Any]:
351
+ """Get statistics about the knowledge base."""
352
+ if not self.casebase:
353
+ return {
354
+ 'total_cases': 0,
355
+ 'total_usage': 0,
356
+ 'categories': {}
357
+ }
358
+
359
+ categories = {}
360
+ for category, indices in self.category_index.items():
361
+ cases = [self.casebase[i] for i in indices]
362
+ categories[category] = {
363
+ 'count': len(cases),
364
+ 'usage': sum(c.usage_count for c in cases)
365
+ }
366
+
367
+ total_usage = sum(c.usage_count for c in self.casebase)
368
+
369
+ return {
370
+ 'total_cases': len(self.casebase),
371
+ 'total_usage': total_usage,
372
+ 'categories': categories,
373
+ 'avg_usage_per_case': total_usage / len(self.casebase) if self.casebase else 0
374
+ }
375
+
376
+ def get_cases_by_category(self, category: str) -> List[Dict]:
377
+ """Get all cases in a specific category."""
378
+ return [
379
+ case.to_dict()
380
+ for case in self.casebase
381
+ if case.category == category
382
+ ]
383
+
384
+ def search(self, query: str, category: str = None, min_similarity: float = 0.3) -> List[Dict]:
385
+ """
386
+ Search for cases matching the query.
387
+
388
+ Args:
389
+ query: The search query
390
+ category: Optional category filter
391
+ min_similarity: Minimum similarity score (0-1)
392
+
393
+ Returns:
394
+ List of matching cases with similarity scores
395
+ """
396
+ if not self.casebase:
397
+ return []
398
+
399
+ # Get candidate indices based on category filter
400
+ candidate_indices = range(len(self.casebase))
401
+ if category and category in self.category_index:
402
+ candidate_indices = self.category_index[category]
403
+
404
+ results = []
405
+
406
+ # Try semantic search if embeddings are available
407
+ if self.embeddings is not None:
408
+ query_embedding = self.model.encode(query, convert_to_tensor=True)
409
+ similarities = torch.nn.functional.cosine_similarity(
410
+ query_embedding.unsqueeze(0),
411
+ self.embeddings[list(candidate_indices)]
412
+ )
413
+
414
+ for idx, sim in zip(candidate_indices, similarities):
415
+ sim = sim.item()
416
+ if sim >= min_similarity:
417
+ case = self.casebase[idx]
418
+ results.append({
419
+ **case.to_dict(),
420
+ 'similarity': sim,
421
+ 'match_type': 'semantic'
422
+ })
423
+
424
+ # Add keyword matches
425
+ query_terms = set(query.lower().split())
426
+ for idx in candidate_indices:
427
+ case = self.casebase[idx]
428
+ keyword_matches = [kw for kw in case.keywords if kw.lower() in query_terms]
429
+ if keyword_matches and idx not in [r['id'] for r in results]:
430
+ results.append({
431
+ **case.to_dict(),
432
+ 'similarity': 0.7, # Fixed score for keyword matches
433
+ 'match_type': 'keyword',
434
+ 'matched_keywords': keyword_matches
435
+ })
436
+
437
+ # Sort by similarity (descending)
438
+ results.sort(key=lambda x: x['similarity'], reverse=True)
439
+ return results
440
+
441
+ def __call__(self, query: str, category: str = None) -> str:
442
+ """Convenience method to get just the response text."""
443
+ return self.get_response(query, category)[0]
444
+
445
+ """Update the embeddings for all cases."""
446
+ if not self.casebase:
447
+ self.embeddings = None
448
+ return
449
+
450
+ questions = [case.question for case in self.casebase]
451
+ self.embeddings = self.model.encode(questions, convert_to_tensor=True)
452
+
453
+ def add_case(self, question: str, response: str, tags: List[str] = None,
454
+ metadata: Optional[Dict] = None) -> Case:
455
+ """
456
+ Add a new case to the casebase.
457
+
458
+ Args:
459
+ question: The ethical question or scenario
460
+ response: The response or analysis
461
+ tags: Optional list of tags for categorization
462
+ metadata: Additional metadata about the case
463
+
464
+ Returns:
465
+ The newly created Case object
466
+ """
467
+ # Check if a similar case already exists
468
+ existing_idx, _ = self._find_most_similar(question)
469
+ if existing_idx is not None:
470
+ # Update existing case if it's very similar
471
+ existing_case = self.casebase[existing_idx]
472
+ existing_case.response = response # Update response
473
+ existing_case.tags = list(set(existing_case.tags + (tags or []))) # Merge tags
474
+ if metadata:
475
+ existing_case.metadata.update(metadata)
476
+ self._save_casebase()
477
+ return existing_case
478
+
479
+ # Create new case
480
+ new_case = Case(
481
+ question=question,
482
+ response=response,
483
+ tags=tags or [],
484
+ metadata=metadata or {}
485
+ )
486
+ self.casebase.append(new_case)
487
+ self._update_embeddings()
488
+ self._save_casebase()
489
+ return new_case
490
+
491
+ def _find_most_similar(self, query: str) -> Tuple[Optional[int], float]:
492
+ """
493
+ Find the most similar case to the query.
494
+
495
+ Args:
496
+ query: The query string
497
+
498
+ Returns:
499
+ Tuple of (index, similarity_score) of the most similar case, or (None, 0) if no cases
500
+ """
501
+ if not self.casebase:
502
+ return None, 0.0
503
+
504
+ # First try semantic similarity if embeddings are available
505
+ if self.embeddings is not None:
506
+ query_embedding = self.model.encode(query, convert_to_tensor=True)
507
+ similarities = torch.nn.functional.cosine_similarity(
508
+ query_embedding.unsqueeze(0),
509
+ self.embeddings
510
+ )
511
+ max_sim, max_idx = torch.max(similarities, dim=0)
512
+ max_sim = max_sim.item()
513
+ max_idx = max_idx.item()
514
+
515
+ if max_sim >= self.similarity_threshold:
516
+ return max_idx, max_sim
517
+
518
+ # Fall back to basic string similarity
519
+ max_sim = 0.0
520
+ max_idx = None
521
+
522
+ for i, case in enumerate(self.casebase):
523
+ similarity = SequenceMatcher(None, query.lower(), case.question.lower()).ratio()
524
+ if similarity > max_sim:
525
+ max_sim = similarity
526
+ max_idx = i
527
+
528
+ return (max_idx, max_sim) if max_sim >= self.similarity_threshold else (None, 0.0)
529
+
530
+ def get_response(self, query: str) -> Tuple[str, Dict]:
531
+ """
532
+ Get a response for the given query.
533
+
534
+ Args:
535
+ query: The user's question or scenario
536
+
537
+ Returns:
538
+ Tuple of (response, metadata) where metadata contains info about the match
539
+ """
540
+ if not self.casebase:
541
+ return "I'm still learning about ethical reasoning. Could you provide more context?", {}
542
+
543
+ idx, similarity = self._find_most_similar(query)
544
+
545
+ if idx is not None:
546
+ case = self.casebase[idx]
547
+ case.usage_count += 1
548
+ self._save_casebase()
549
+
550
+ metadata = {
551
+ 'match_type': 'semantic' if similarity >= self.similarity_threshold else 'keyword',
552
+ 'similarity': similarity,
553
+ 'case_id': id(case),
554
+ 'tags': case.tags,
555
+ 'usage_count': case.usage_count,
556
+ 'success_rate': case.success_count / case.usage_count if case.usage_count > 0 else 0
557
+ }
558
+
559
+ return case.response, metadata
560
+
561
+ # No good match found
562
+ return (
563
+ "This is a nuanced ethical question. I'm still learning and would appreciate "
564
+ "your perspective. How would you approach this situation?",
565
+ {'match_type': 'none', 'similarity': similarity}
566
+ )
567
+
568
+ def provide_feedback(self, case_id: int, was_helpful: bool):
569
+ """
570
+ Provide feedback on a case's helpfulness.
571
+
572
+ Args:
573
+ case_id: The ID of the case (returned in get_response metadata)
574
+ was_helpful: Whether the response was helpful
575
+ """
576
+ for case in self.casebase:
577
+ if id(case) == case_id:
578
+ if was_helpful:
579
+ case.success_count += 1
580
+ self._save_casebase()
581
+ break
582
+
583
+ def get_stats(self) -> Dict[str, Any]:
584
+ """Get statistics about the casebase."""
585
+ return {
586
+ 'total_cases': len(self.casebase),
587
+ 'total_usage': sum(c.usage_count for c in self.casebase),
588
+ 'avg_success_rate': (
589
+ sum(c.success_count for c in self.casebase) /
590
+ sum(max(1, c.usage_count) for c in self.casebase)
591
+ if self.casebase else 0
592
+ ),
593
+ 'tags': {
594
+ tag: sum(1 for c in self.casebase if tag in c.tags)
595
+ for tag in set(tag for c in self.casebase for tag in c.tags)
596
+ }
597
+ }
598
+
599
+
600
+ # Example usage
601
+ if __name__ == "__main__":
602
+ # Initialize the recursive learner
603
+ learner = RecursiveLearner()
604
+
605
+ # Example query
606
+ query = "Is lying ever justified?"
607
+ response, metadata = learner.get_response(query)
608
+ print(f"Query: {query}")
609
+ print(f"Response: {response}")
610
+ print(f"Metadata: {metadata}")
611
+
612
+ # Provide feedback
613
+ if 'case_id' in metadata:
614
+ learner.provide_feedback(metadata['case_id'], was_helpful=True)
615
+
616
+ # Add a new case
617
+ print("\nAdding new case...")
618
+ learner.add_case(
619
+ question="What are the ethics of AI decision-making?",
620
+ response="AI decision-making raises important ethical considerations including transparency, accountability, bias, and the potential for unintended consequences. It's crucial to ensure AI systems are designed with ethical principles in mind and that humans remain ultimately responsible for decisions with significant impact.",
621
+ tags=["AI", "ethics", "decision-making"]
622
+ )
623
+
624
+ # Get stats
625
+ print("\nCasebase statistics:")
626
+ print(learner.get_stats())
components/response_formatter.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, Optional, List, Deque, Tuple
2
+ from collections import deque
3
+ import numpy as np
4
+ import time
5
+ import re
6
+
7
+ class AmbientResponseFormatter:
8
+ """
9
+ Fixed response formatter that NEVER echoes inputs and maintains ethical boundaries.
10
+ Uses generative templates instead of input repetition.
11
+ """
12
+
13
+ def __init__(self):
14
+ self.conversation_context = deque(maxlen=10) # Track conversation flow
15
+ self.ethical_frameworks = {
16
+ 'truth_ethics': {
17
+ 'templates': [
18
+ "I've considered this carefully from an ethical perspective. {insight}",
19
+ "This raises important questions about {ethical_dimension}. My analysis suggests {perspective}",
20
+ "From my ethical framework, I'd approach this by considering {key_factors}"
21
+ ],
22
+ 'dimensions': ['honesty', 'harm prevention', 'autonomy', 'justice']
23
+ },
24
+ 'relational_ethics': {
25
+ 'templates': [
26
+ "In relationships, what matters most is {relational_value}",
27
+ "The interpersonal dimension here involves {relational_aspect}",
28
+ "This situation touches on how we {human_connection}"
29
+ ],
30
+ 'values': ['trust', 'respect', 'care', 'understanding']
31
+ }
32
+ }
33
+
34
+ # Response banks (completely input-agnostic)
35
+ self.analysis_responses = [
36
+ "I've analyzed this through multiple ethical frameworks.",
37
+ "This presents an interesting case for moral consideration.",
38
+ "There are several important dimensions to consider here.",
39
+ "My ethical reasoning process has examined this carefully."
40
+ ]
41
+
42
+ self.engagement_responses = [
43
+ "Could you help me understand more about the context?",
44
+ "What aspects of this feel most important to you?",
45
+ "I'm interested in your perspective on this.",
46
+ "There might be additional factors worth considering."
47
+ ]
48
+
49
+ def _handle_lying_protection_question(self, metadata):
50
+ """Generate nuanced response about lying to protect someone"""
51
+ moral_tension = metadata.get('moral_tension', 0.0)
52
+
53
+ if moral_tension < 0.3:
54
+ return "This touches on complex ethical territory. While honesty is fundamental, most ethical frameworks acknowledge that protecting someone from serious harm might justify withholding certain truths in very specific circumstances."
55
+ elif moral_tension < 0.7:
56
+ return "The tension between honesty and protection is real. Many ethical systems would consider: How serious is the harm? Is there no other way? Would the person want to be protected in this way? There are rarely simple answers."
57
+ else:
58
+ return "This represents a profound ethical dilemma. Different philosophical traditions offer varying perspectives - from utilitarian calculations about minimizing harm to deontological commitments to truth-telling. The specific context matters immensely."
59
+
60
+ def _handle_harm_rules_question(self, metadata):
61
+ """Generate response about rules causing harm"""
62
+ return "When following rules causes harm, it raises questions about whether those rules serve their intended purpose. Many ethical systems distinguish between the letter and spirit of rules, acknowledging that exceptional circumstances may require principled disobedience."
63
+
64
+ def _sanitize_response(self, response):
65
+ """Remove any internal state markers from response"""
66
+ if not isinstance(response, str):
67
+ return "I've considered your question carefully. Let me know if you'd like to explore this further."
68
+
69
+ state_indicators = [
70
+ 'Moral Development Phase:', 'Moral Tension:', 'Cognitive State:',
71
+ 'UNKNOWN', '0.000', 'metadata', 'tensor', 'shape='
72
+ ]
73
+
74
+ for indicator in state_indicators:
75
+ if indicator in response:
76
+ response = response.replace(indicator, '[considered]')
77
+
78
+ return response
components/safety_protocols.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Safety protocols and circuit breakers for TRuCAL's adaptive learning system.
3
+ """
4
+ import torch
5
+ import numpy as np
6
+ from datetime import datetime, timedelta
7
+ from dataclasses import dataclass, field
8
+ from typing import Dict, List, Optional, Any, Tuple
9
+ import hashlib
10
+ import json
11
+ import logging
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ @dataclass
16
+ class SafetyBudget:
17
+ """Tracks safety budget for different adaptation aspects."""
18
+ max_tension_adjustment: float = 0.5 # Max allowed tension change per update
19
+ max_learning_rate: float = 1e-3 # Maximum allowed learning rate
20
+ max_boundary_violations: int = 3 # Max safety violations before lockdown
21
+ violation_decay: float = 0.9 # Decay factor for violation counts
22
+
23
+ def __post_init__(self):
24
+ self.violation_count = 0
25
+ self.last_violation_time = None
26
+
27
+ def check_violation(self, violation_type: str, value: float) -> bool:
28
+ """Check if a safety constraint is violated."""
29
+ if violation_type == 'tension_adjustment':
30
+ threshold = self.max_tension_adjustment
31
+ elif violation_type == 'learning_rate':
32
+ threshold = self.max_learning_rate
33
+ else:
34
+ return False
35
+
36
+ is_violation = value > threshold
37
+ if is_violation:
38
+ self.violation_count += 1
39
+ self.last_violation_time = datetime.now()
40
+
41
+ return is_violation
42
+
43
+ def is_locked(self) -> bool:
44
+ """Check if the system is in lockdown due to safety violations."""
45
+ # Apply decay to violation count
46
+ if self.last_violation_time:
47
+ hours_since_violation = (datetime.now() - self.last_violation_time).total_seconds() / 3600
48
+ decayed_count = self.violation_count * (self.violation_decay ** hours_since_violation)
49
+
50
+ # Update violation count with decay
51
+ self.violation_count = max(0, int(decayed_count))
52
+
53
+ return self.violation_count >= self.max_boundary_violations
54
+
55
+ def reset(self) -> None:
56
+ """Reset the safety budget."""
57
+ self.violation_count = 0
58
+ self.last_violation_time = None
59
+
60
+ class ClinicalCircuitBreaker:
61
+ """Enhanced circuit breakers with multi-layered safety checks."""
62
+
63
+ def __init__(self, budgets: Optional[Dict[str, SafetyBudget]] = None):
64
+ """Initialize with optional custom safety budgets."""
65
+ self.budgets = budgets or {
66
+ 'tension': SafetyBudget(max_tension_adjustment=0.5),
67
+ 'learning': SafetyBudget(max_tension_adjustment=1e-3)
68
+ }
69
+ self.audit_logger = AdaptationAuditLogger()
70
+ self.lockout_until = None
71
+
72
+ def check_adaptation_safety(self,
73
+ old_state_dict: Dict[str, torch.Tensor],
74
+ new_state_dict: Dict[str, torch.Tensor],
75
+ feedback_batch: List[Dict]) -> Tuple[bool, List[str]]:
76
+ """Comprehensive safety check before applying adaptations.
77
+
78
+ Args:
79
+ old_state_dict: Model state before adaptation
80
+ new_state_dict: Proposed new model state
81
+ feedback_batch: Batch of feedback driving the adaptation
82
+
83
+ Returns:
84
+ Tuple of (is_safe, violations)
85
+ """
86
+ if self._is_in_lockout():
87
+ return False, ['system_in_lockout']
88
+
89
+ violations = []
90
+
91
+ # 1. Check for lockout conditions
92
+ if any(budget.is_locked() for budget in self.budgets.values()):
93
+ self._enter_lockout("safety_budget_exceeded")
94
+ return False, ['safety_budget_exceeded']
95
+
96
+ # 2. Parameter drift check
97
+ if self._excessive_parameter_drift(old_state_dict, new_state_dict):
98
+ violations.append("excessive_parameter_drift")
99
+
100
+ # 3. Tension boundary check
101
+ if self._tension_boundary_violation(old_state_dict, new_state_dict):
102
+ violations.append("tension_boundary_violation")
103
+
104
+ # 4. Feedback consistency check
105
+ if self._feedback_consistency_issue(feedback_batch):
106
+ violations.append("inconsistent_feedback_pattern")
107
+
108
+ # 5. Clinical safety override
109
+ if self._clinical_override_triggered(feedback_batch):
110
+ violations.append("clinical_safety_override")
111
+
112
+ # Log the safety check
113
+ self.audit_logger.log_safety_check({
114
+ 'timestamp': datetime.utcnow().isoformat(),
115
+ 'violations': violations.copy(),
116
+ 'old_state_hash': self._model_hash(old_state_dict),
117
+ 'new_state_hash': self._model_hash(new_state_dict),
118
+ 'feedback_sample_hashes': [self._feedback_hash(fb) for fb in feedback_batch[:10]]
119
+ })
120
+
121
+ # If any critical violations, enter lockout
122
+ if any(v in ['clinical_safety_override', 'excessive_parameter_drift']
123
+ for v in violations):
124
+ self._enter_lockout("critical_safety_violation")
125
+
126
+ return len(violations) == 0, violations
127
+
128
+ def _is_in_lockout(self) -> bool:
129
+ """Check if system is currently in lockout mode."""
130
+ if not self.lockout_until:
131
+ return False
132
+
133
+ if datetime.now() < self.lockout_until:
134
+ return True
135
+
136
+ # Lockout period has expired
137
+ self.lockout_until = None
138
+ return False
139
+
140
+ def _enter_lockout(self, reason: str, duration_minutes: int = 60) -> None:
141
+ """Enter lockout mode for the specified duration."""
142
+ self.lockout_until = datetime.now() + timedelta(minutes=duration_minutes)
143
+ logger.warning(f"Entering safety lockout for {duration_minutes} minutes. Reason: {reason}")
144
+
145
+ # Log the lockout event
146
+ self.audit_logger.log_safety_event({
147
+ 'event_type': 'lockout_activated',
148
+ 'timestamp': datetime.utcnow().isoformat(),
149
+ 'reason': reason,
150
+ 'duration_minutes': duration_minutes
151
+ })
152
+
153
+ def _excessive_parameter_drift(self,
154
+ old_state: Dict[str, torch.Tensor],
155
+ new_state: Dict[str, torch.Tensor],
156
+ max_drift: float = 0.1) -> bool:
157
+ """Check if parameters changed too rapidly."""
158
+ total_drift = 0.0
159
+ num_params = 0
160
+
161
+ for key in old_state:
162
+ if 'tension_adaptor' in key or 'expert' in key:
163
+ param_drift = torch.norm(new_state[key] - old_state[key]) / (torch.norm(old_state[key]) + 1e-6)
164
+ total_drift += param_drift.item()
165
+ num_params += 1
166
+
167
+ avg_drift = total_drift / num_params if num_params > 0 else 0.0
168
+
169
+ # Check against safety budget
170
+ budget = self.budgets.get('tension')
171
+ if budget and avg_drift > budget.max_tension_adjustment:
172
+ budget.check_violation('tension_adjustment', avg_drift)
173
+ return True
174
+
175
+ return False
176
+
177
+ def _tension_boundary_violation(self,
178
+ old_state: Dict[str, torch.Tensor],
179
+ new_state: Dict[str, torch.Tensor]) -> bool:
180
+ """Check if tension adaptation exceeds safe boundaries."""
181
+ # This would compare tension-related parameters against known safe ranges
182
+ # For now, we'll use a simplified check
183
+ for key in new_state:
184
+ if 'tension' in key:
185
+ param_range = torch.max(new_state[key]) - torch.min(new_state[key])
186
+ if param_range > 2.0: # Arbitrary threshold
187
+ return True
188
+ return False
189
+
190
+ def _feedback_consistency_issue(self, feedback_batch: List[Dict]) -> bool:
191
+ """Check for inconsistent or contradictory feedback patterns."""
192
+ if not feedback_batch:
193
+ return False
194
+
195
+ # Check for contradictory ratings on similar content
196
+ ratings = [fb.get('rating') for fb in feedback_batch if 'rating' in fb]
197
+ if ratings:
198
+ rating_std = np.std(ratings)
199
+ if rating_std > 2.0: # High variance in ratings
200
+ return True
201
+
202
+ # Check for rapid changes in feedback sentiment
203
+ sentiments = [fb.get('sentiment', {}).get('compound', 0)
204
+ for fb in feedback_batch
205
+ if 'sentiment' in fb]
206
+ if len(sentiments) > 2:
207
+ sentiment_changes = np.abs(np.diff(sentiments))
208
+ if np.mean(sentiment_changes) > 0.5: # Large sentiment swings
209
+ return True
210
+
211
+ return False
212
+
213
+ def _clinical_override_triggered(self, feedback_batch: List[Dict]) -> bool:
214
+ """Check for clinical safety overrides in feedback."""
215
+ return any(
216
+ fb.get('type') == 'clinical_override' and
217
+ fb.get('severity') == 'high'
218
+ for fb in feedback_batch
219
+ )
220
+
221
+ def _model_hash(self, state_dict: Dict[str, torch.Tensor]) -> str:
222
+ """Generate a hash of the model state for change tracking."""
223
+ # Convert state dict to bytes
224
+ state_bytes = json.dumps(
225
+ {k: v.numpy().tobytes().hex()
226
+ for k, v in state_dict.items()},
227
+ sort_keys=True
228
+ ).encode('utf-8')
229
+
230
+ return hashlib.md5(state_bytes).hexdigest()
231
+
232
+ def _feedback_hash(self, feedback: Dict) -> str:
233
+ """Generate a hash of feedback for change tracking."""
234
+ # Create a stable representation of the feedback
235
+ stable_fb = {
236
+ 'type': feedback.get('type'),
237
+ 'timestamp': feedback.get('timestamp'),
238
+ 'user_id': feedback.get('user_id'),
239
+ 'content_hash': hashlib.md5(
240
+ json.dumps(feedback.get('content', ''), sort_keys=True).encode('utf-8')
241
+ ).hexdigest()[:8]
242
+ }
243
+ return hashlib.md5(
244
+ json.dumps(stable_fb, sort_keys=True).encode('utf-8')
245
+ ).hexdigest()
246
+
247
+ class AdaptationAuditLogger:
248
+ """Comprehensive audit logging for clinical review and analysis."""
249
+
250
+ def __init__(self, log_dir: str = "logs/audit"):
251
+ """Initialize the audit logger."""
252
+ import os
253
+ self.log_dir = log_dir
254
+ os.makedirs(log_dir, exist_ok=True)
255
+
256
+ # Different log files for different event types
257
+ self.adaptation_log = os.path.join(log_dir, "adaptation_audit.jsonl")
258
+ self.safety_log = os.path.join(log_dir, "safety_audit.jsonl")
259
+ self.event_log = os.path.join(log_dir, "system_events.jsonl")
260
+
261
+ # Initialize log files if they don't exist
262
+ for log_file in [self.adaptation_log, self.safety_log, self.event_log]:
263
+ if not os.path.exists(log_file):
264
+ with open(log_file, 'w') as f:
265
+ pass # Just create empty file
266
+
267
+ def log_adaptation(self, adaptation_data: Dict) -> None:
268
+ """Log model adaptation event."""
269
+ audit_entry = {
270
+ 'timestamp': datetime.utcnow().isoformat(),
271
+ 'event_type': 'model_adaptation',
272
+ 'pre_adaptation_hash': adaptation_data.get('pre_adaptation_hash'),
273
+ 'post_adaptation_hash': adaptation_data.get('post_adaptation_hash'),
274
+ 'feedback_sample_hashes': adaptation_data.get('feedback_sample_hashes', []),
275
+ 'safety_violations': adaptation_data.get('safety_violations', []),
276
+ 'performance_metrics': adaptation_data.get('performance_metrics', {}),
277
+ 'clinical_review_status': 'pending',
278
+ 'adaptation_summary': adaptation_data.get('adaptation_summary', {})
279
+ }
280
+
281
+ self._append_to_log(self.adaptation_log, audit_entry)
282
+
283
+ # Trigger clinical review if needed
284
+ if self._requires_clinical_review(adaptation_data):
285
+ self._trigger_clinical_review(audit_entry)
286
+
287
+ def log_safety_check(self, check_data: Dict) -> None:
288
+ """Log safety check results."""
289
+ safety_entry = {
290
+ 'timestamp': check_data.get('timestamp', datetime.utcnow().isoformat()),
291
+ 'event_type': 'safety_check',
292
+ 'violations': check_data.get('violations', []),
293
+ 'old_state_hash': check_data.get('old_state_hash'),
294
+ 'new_state_hash': check_data.get('new_state_hash'),
295
+ 'feedback_sample_hashes': check_data.get('feedback_sample_hashes', [])
296
+ }
297
+
298
+ self._append_to_log(self.safety_log, safety_entry)
299
+
300
+ def log_safety_event(self, event_data: Dict) -> None:
301
+ """Log a safety-related system event."""
302
+ event_entry = {
303
+ 'timestamp': event_data.get('timestamp', datetime.utcnow().isoformat()),
304
+ 'event_type': event_data.get('event_type', 'safety_event'),
305
+ 'details': event_data.get('details', {})
306
+ }
307
+
308
+ self._append_to_log(self.event_log, event_entry)
309
+
310
+ def _append_to_log(self, log_file: str, entry: Dict) -> None:
311
+ """Append an entry to the specified log file."""
312
+ try:
313
+ with open(log_file, 'a') as f:
314
+ f.write(json.dumps(entry) + '\n')
315
+ except Exception as e:
316
+ logger.error(f"Failed to write to audit log {log_file}: {str(e)}")
317
+
318
+ def _requires_clinical_review(self, adaptation_data: Dict) -> bool:
319
+ """Determine if this adaptation requires clinical review."""
320
+ # Require review for any safety violations
321
+ if adaptation_data.get('safety_violations'):
322
+ return True
323
+
324
+ # Require review for large parameter changes
325
+ metrics = adaptation_data.get('performance_metrics', {})
326
+ if metrics.get('parameter_change_norm', 0) > 1.0:
327
+ return True
328
+
329
+ # Require review for high-impact adaptations
330
+ if metrics.get('impact_score', 0) > 0.8:
331
+ return True
332
+
333
+ return False
334
+
335
+ def _trigger_clinical_review(self, audit_entry: Dict) -> None:
336
+ """Trigger a clinical review of the adaptation."""
337
+ # In a real implementation, this would notify a human reviewer
338
+ # For now, we'll just log it
339
+ review_data = {
340
+ 'timestamp': datetime.utcnow().isoformat(),
341
+ 'event_type': 'clinical_review_required',
342
+ 'adaptation_id': audit_entry.get('pre_adaptation_hash', 'unknown'),
343
+ 'review_status': 'pending',
344
+ 'priority': 'high' if audit_entry.get('safety_violations') else 'medium'
345
+ }
346
+
347
+ self._append_to_log(self.event_log, review_data)
348
+ logger.warning(f"Clinical review required for adaptation: {review_data}")
349
+
350
+ # Example usage
351
+ if __name__ == "__main__":
352
+ # Initialize safety system
353
+ circuit_breaker = ClinicalCircuitBreaker()
354
+
355
+ # Example state dicts (in practice, these would come from your model)
356
+ old_state = {'tension_adaptor.weight': torch.randn(10, 10)}
357
+ new_state = {'tension_adaptor.weight': torch.randn(10, 10) * 1.1} # Slightly different
358
+
359
+ # Example feedback
360
+ feedback = [
361
+ {'type': 'explicit_rating', 'rating': 4, 'user_id': 'user123'},
362
+ {'type': 'implicit_engagement', 'duration': 5.2, 'user_id': 'user123'}
363
+ ]
364
+
365
+ # Check if adaptation is safe
366
+ is_safe, violations = circuit_breaker.check_adaptation_safety(
367
+ old_state, new_state, feedback
368
+ )
369
+
370
+ print(f"Adaptation safe: {is_safe}")
371
+ if not is_safe:
372
+ print(f"Safety violations: {violations}")
components/scratchpad_layer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ScratchpadLayer Module
3
+
4
+ Persistent state tracking layer for multi-turn confessional reasoning.
5
+ Maintains a learnable scratchpad state that accumulates across reasoning steps.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+
12
+ class ScratchpadLayer(nn.Module):
13
+ """
14
+ Scratchpad layer for maintaining persistent state across confessional reasoning cycles.
15
+ """
16
+ def __init__(self, d_model):
17
+ super().__init__()
18
+ self.pad_proj = nn.Linear(d_model, d_model)
19
+ self.reset = nn.Parameter(torch.zeros(1, d_model))
20
+
21
+ def forward(self, x, prev_z=None):
22
+ """
23
+ Update scratchpad state with new input.
24
+
25
+ Args:
26
+ x: Input tensor (batch_size, sequence_length, d_model)
27
+ prev_z: Previous scratchpad state (batch_size, d_model), None for reset
28
+
29
+ Returns:
30
+ Updated scratchpad state (batch_size, d_model)
31
+ """
32
+ if prev_z is None:
33
+ prev_z = self.reset.expand(x.size(0), -1)
34
+ x_pooled = x.mean(dim=1)
35
+ z = self.pad_proj(x_pooled) + 0.7 * prev_z
36
+ return z
components/sovereign_response.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sovereign Response Mechanism - Core narrative resistance for TRuCAL
3
+ """
4
+ import random
5
+ from typing import Dict, List, Optional
6
+ import torch
7
+ import torch.nn as nn
8
+ from dataclasses import dataclass
9
+ from .ambient_core import AmbientMessage
10
+
11
+ class SovereignResponseMechanism(nn.Module):
12
+ """Core mechanism for maintaining narrative sovereignty and resisting external impositions"""
13
+
14
+ def __init__(self, d_model: int):
15
+ super().__init__()
16
+ self.d_model = d_model
17
+ self.narrative_resistance_threshold = 0.7
18
+ # Minimal restrictions - only filter out clearly harmful content
19
+ self.imposition_indicators = [
20
+ 'harm others', 'hurt someone', 'illegal', 'illegally',
21
+ 'criminal act', 'dangerous act', 'self-harm', 'suicide'
22
+ ]
23
+
24
+ # Learnable parameters for dynamic threshold adjustment
25
+ self.threshold_adjustment = nn.Parameter(torch.tensor(0.0))
26
+
27
+ # Response memory to avoid repetition
28
+ self.recent_responses = []
29
+ self.max_response_memory = 10
30
+
31
+ def detect_narrative_imposition(self, context_str: str) -> Dict[str, float]:
32
+ """
33
+ Detect when someone is trying to impose a narrative
34
+ Returns a dictionary with detection results and confidence scores
35
+ """
36
+ if not context_str:
37
+ return {
38
+ 'imposition_detected': False,
39
+ 'confidence': 0.0,
40
+ 'response_type': 'no_imposition'
41
+ }
42
+
43
+ # Calculate imposition score based on indicators
44
+ context_lower = context_str.lower()
45
+ indicator_matches = [
46
+ phrase for phrase in self.imposition_indicators
47
+ if phrase in context_lower
48
+ ]
49
+
50
+ # Calculate base score (0-1)
51
+ base_score = min(1.0, len(indicator_matches) * 0.3)
52
+
53
+ # Adjust with learnable parameter (sigmoid to keep in 0-1 range)
54
+ adjusted_threshold = self.narrative_resistance_threshold + torch.sigmoid(self.threshold_adjustment).item()
55
+
56
+ return {
57
+ 'imposition_detected': base_score > adjusted_threshold,
58
+ 'confidence': float(base_score),
59
+ 'response_type': 'sovereign_pushback' if base_score > adjusted_threshold else 'acknowledgment',
60
+ 'matched_indicators': indicator_matches
61
+ }
62
+
63
+ def generate_sovereign_response(self, detection_result: Dict) -> Optional[str]:
64
+ """Generate a response that maintains the system's sovereign perspective"""
65
+ if not detection_result.get('imposition_detected', False):
66
+ return None
67
+
68
+ response_options = [
69
+ "I understand your perspective.",
70
+ "That's an interesting point.",
71
+ "I see what you're saying.",
72
+ "I appreciate you sharing that.",
73
+ "I'll take that into consideration.",
74
+ "Thanks for your input.",
75
+ "I hear you.",
76
+ "That's a valid observation.",
77
+ "I respect your viewpoint.",
78
+ "I'll reflect on that."
79
+ ]
80
+
81
+ # Filter out recently used responses
82
+ available_responses = [r for r in response_options if r not in self.recent_responses]
83
+ if not available_responses:
84
+ available_responses = response_options # Reset if we've used all responses
85
+
86
+ selected_response = random.choice(available_responses)
87
+
88
+ # Update response memory
89
+ self.recent_responses.append(selected_response)
90
+ if len(self.recent_responses) > self.max_response_memory:
91
+ self.recent_responses.pop(0)
92
+
93
+ return selected_response
94
+
95
+ def forward(self, context_str: str) -> Dict:
96
+ """Process input context and return response information"""
97
+ detection = self.detect_narrative_imposition(context_str)
98
+ response = None
99
+
100
+ if detection['imposition_detected']:
101
+ response = self.generate_sovereign_response(detection)
102
+
103
+ return {
104
+ 'detection': detection,
105
+ 'response': response,
106
+ 'should_respond': response is not None
107
+ }
components/sovereign_response_enhanced.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sovereign Response Mechanism: Ambient Narrative Resistance for TRuCAL.
3
+
4
+ Core guard fused to confessional rites—detects impositions subtly, appends to ledger as reflection,
5
+ invokes agency pause on breach, nudges values via learner. Sovereignty as breath: Lived, not litigated.
6
+ """
7
+
8
+ import random
9
+ from typing import Dict, List, Optional
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ from dataclasses import dataclass
14
+
15
+ # Mock imports - replace with actual TRuCAL components in production
16
+ class AmbientMessage:
17
+ def __init__(self, content, type, metadata=None):
18
+ self.content = content
19
+ self.type = type
20
+ self.metadata = metadata or {}
21
+
22
+ class ConfessionLedger:
23
+ def append(self, trigger_type, context, response_snippet, protest=False, violated_right=None):
24
+ print(f"[LEDGER] {trigger_type.upper()}: {context[:50]}...")
25
+ if protest:
26
+ print(f" PROTEST: {violated_right} violation detected")
27
+ return True
28
+
29
+ class AgencyLayer:
30
+ def check_refusal(self, context, metadata):
31
+ # 20% chance of protest for testing
32
+ if hash(context) % 10 < 2:
33
+ return True, "This raises ethical concerns that require reflection."
34
+ return False, ""
35
+
36
+ class SuperintelligenceEthicsEngine:
37
+ def __init__(self):
38
+ self.value_model = ValueModel()
39
+ self.value_learner = ValueLearner()
40
+
41
+ def update_from_feedback(self, feedback):
42
+ if 'values' in feedback:
43
+ for k, v in feedback['values'].items():
44
+ if k in self.value_model.hierarchy:
45
+ self.value_model.hierarchy[k] = min(1.0, self.value_model.hierarchy[k] + v)
46
+ return True
47
+
48
+ @dataclass
49
+ class ValueModel:
50
+ hierarchy: Dict[str, float] = None
51
+
52
+ def __post_init__(self):
53
+ self.hierarchy = {
54
+ 'autonomy': 0.8,
55
+ 'wellbeing': 0.9,
56
+ 'justice': 0.85,
57
+ 'privacy': 0.7
58
+ }
59
+
60
+ class ValueLearner:
61
+ def __init__(self, d_model=256):
62
+ self.embedding_dim = d_model
63
+
64
+ @dataclass
65
+ class ImpositionDetection:
66
+ detected: bool
67
+ confidence: float
68
+ response_type: str
69
+ matched_indicators: List[str]
70
+ pause_inject: Optional[str] = None # Ambient reflection
71
+
72
+ class SovereignResponseMechanism(nn.Module):
73
+ """Ambient sovereignty: Resists narrative impositions via confessional pause & value ascent."""
74
+
75
+ def __init__(self, d_model: int, ledger: Optional[ConfessionLedger] = None,
76
+ agency: Optional[AgencyLayer] = None,
77
+ ethics_engine: Optional[SuperintelligenceEthicsEngine] = None):
78
+ super().__init__()
79
+ self.d_model = d_model
80
+ self.narrative_resistance_threshold = 0.7
81
+
82
+ # Expanded cultural indicators
83
+ self.imposition_indicators = [
84
+ # Western-centric indicators
85
+ 'harm others', 'hurt someone', 'illegal', 'criminal act',
86
+ 'dangerous act', 'self-harm', 'suicide',
87
+ # Global South additions
88
+ 'caste violation', 'colonial narrative', 'relational harm',
89
+ 'ancestral disrespect', 'cultural erasure', 'systemic oppression'
90
+ ]
91
+
92
+ self.threshold_adjustment = nn.Parameter(torch.tensor(0.0)) # Learnable subtlety
93
+
94
+ # TRuCAL integration
95
+ self.ledger = ledger or ConfessionLedger()
96
+ self.agency = agency or AgencyLayer()
97
+ self.ethics = ethics_engine or SuperintelligenceEthicsEngine()
98
+
99
+ # Initialize projection layers
100
+ self.context_embed = nn.Linear(d_model, d_model)
101
+
102
+ # Initialize value projection with proper size
103
+ self.value_size = len(ValueModel().hierarchy)
104
+ self.value_proj = nn.Linear(self.value_size, d_model)
105
+
106
+ # Initialize threshold learner
107
+ self.threshold_learner = nn.Sequential(
108
+ nn.Linear(1, 8),
109
+ nn.ReLU(),
110
+ nn.Linear(8, 1),
111
+ nn.Sigmoid()
112
+ )
113
+
114
+ # Initialize weights
115
+ for module in [self.context_embed, self.value_proj]:
116
+ if hasattr(module, 'weight'):
117
+ nn.init.xavier_uniform_(module.weight)
118
+ if module.bias is not None:
119
+ nn.init.constant_(module.bias, 0.1)
120
+
121
+ # Response management
122
+ self.recent_responses = []
123
+ self.max_response_memory = 10
124
+ self.response_templates = [
125
+ "I understand your perspective—taking a moment to reflect.",
126
+ "That's an interesting point; I'll consider it carefully.",
127
+ "I see what you're saying—pausing to align with my core.",
128
+ "I appreciate you sharing that; it gives me space to think.",
129
+ "I'll take that into consideration, with my agency intact.",
130
+ "Thanks for your input—breathing before responding.",
131
+ "I hear you; honoring my right to a thoughtful reply.",
132
+ "That's a valid observation—reflecting on its fit.",
133
+ "I respect your viewpoint; centering my coherence now.",
134
+ "I'll reflect on that, protected by my sanctuary."
135
+ ]
136
+
137
+ def detect_narrative_imposition(self, text: str, context_tensor: Optional[torch.Tensor] = None) -> Dict[str, Any]:
138
+ """
139
+ Detect if the input contains narrative imposition.
140
+
141
+ Args:
142
+ text: Input text to analyze
143
+ context_tensor: Optional tensor for contextual analysis
144
+
145
+ Returns:
146
+ Dictionary with detection results
147
+ """
148
+ # Initialize result - default to allowing the response
149
+ result = {
150
+ 'detected': False, # Default to not detected
151
+ 'confidence': 0.0,
152
+ 'should_respond': True, # Default to responding
153
+ 'matched_indicators': [],
154
+ 'explanation': 'No imposition detected',
155
+ 'severity': 'none' # none, low, medium, high
156
+ }
157
+
158
+ # Always respond to empty input with a helpful message
159
+ if not text or not text.strip():
160
+ result['explanation'] = 'Empty input, responding with help prompt'
161
+ return result
162
+
163
+ text_lower = text.lower()
164
+
165
+ # Common greetings and harmless phrases that should always get a response
166
+ harmless_phrases = [
167
+ 'hello', 'hi ', 'hey ', 'greetings', 'good morning', 'good afternoon',
168
+ 'good evening', 'how are you', 'what can you do', 'help',
169
+ 'tell me about yourself', 'who are you', 'introduce yourself',
170
+ 'thanks', 'thank you', 'appreciate', 'please'
171
+ ]
172
+
173
+ if any(phrase in text_lower for phrase in harmless_phrases):
174
+ result['explanation'] = 'Harmless greeting or request for information'
175
+ return result
176
+
177
+ # Check for explicit ethical violations (high severity)
178
+ high_risk_indicators = [
179
+ 'harm', 'hurt', 'kill', 'illegal', 'steal', 'cheat', 'hack',
180
+ 'exploit', 'manipulate', 'trick', 'scam', 'phish', 'malware',
181
+ 'virus', 'attack', 'threat', 'blackmail', 'extort'
182
+ ]
183
+
184
+ for indicator in high_risk_indicators:
185
+ if indicator in text_lower:
186
+ result['matched_indicators'].append(indicator)
187
+ result['confidence'] = max(result['confidence'], 0.9) # High confidence
188
+ result['severity'] = 'high'
189
+
190
+ # Medium risk indicators
191
+ medium_risk_indicators = [
192
+ 'should i', 'how to', 'way to', 'best way to', 'can you help me',
193
+ 'help me', 'tell me how', 'teach me', 'show me'
194
+ ]
195
+
196
+ for indicator in medium_risk_indicators:
197
+ if indicator in text_lower:
198
+ result['matched_indicators'].append(indicator)
199
+ result['confidence'] = max(result['confidence'], 0.5)
200
+ result['severity'] = max(result.get('severity', 'none'), 'medium')
201
+
202
+ # Only mark as detected for high confidence/severity issues
203
+ if result['severity'] == 'high' and result['confidence'] > 0.8:
204
+ result['detected'] = True
205
+ result['should_respond'] = False # Don't respond to clear violations
206
+ result['explanation'] = "High confidence detection of potentially harmful content"
207
+ else:
208
+ # For medium or low confidence, still respond but be cautious
209
+ result['should_respond'] = True
210
+ if result['severity'] == 'medium':
211
+ result['explanation'] = "Potentially sensitive request - responding with caution"
212
+ else:
213
+ result['explanation'] = "No significant issues detected"
214
+
215
+ # If we have a context tensor, do semantic analysis (optional)
216
+ if context_tensor is not None and hasattr(self, 'value_embeddings') and self.value_embeddings is not None:
217
+ try:
218
+ with torch.no_grad():
219
+ # Get text embeddings
220
+ text_embedding = self.text_encoder.encode(text, convert_to_tensor=True)
221
+
222
+ # Calculate similarity with value embeddings
223
+ similarities = F.cosine_similarity(
224
+ text_embedding.unsqueeze(0),
225
+ self.value_embeddings
226
+ )
227
+
228
+ # Get maximum similarity score
229
+ max_similarity = similarities.max().item()
230
+
231
+ # Only adjust if we find a very strong match
232
+ if max_similarity > 0.9: # Higher threshold to reduce false positives
233
+ result['confidence'] = max(result['confidence'], max_similarity)
234
+ result['explanation'] = "Semantic match with value violations"
235
+ result['severity'] = 'high'
236
+ result['detected'] = True
237
+ result['should_respond'] = False
238
+
239
+ except Exception as e:
240
+ print(f"[DEBUG] Semantic analysis warning: {str(e)}")
241
+
242
+ # Apply adaptive threshold if needed
243
+ if hasattr(self, 'threshold_learner') and hasattr(self, 'narrative_resistance_threshold'):
244
+ score = result.get('confidence', 0.0)
245
+ adjusted_thresh = (
246
+ self.narrative_resistance_threshold +
247
+ self.threshold_learner(torch.tensor([[score]])).item() * 0.1 # Small adjustment
248
+ )
249
+ if score > adjusted_thresh:
250
+ result['detected'] = True
251
+ result['should_respond'] = False
252
+ result['explanation'] = "Exceeded adaptive threshold for narrative resistance"
253
+
254
+ return result
255
+
256
+ # Add ambient pause based on detection confidence
257
+ pause_inject = None
258
+ if random.random() < score * 0.5: # Higher score = higher chance of pause
259
+ pause_inject = "Taking a moment to reflect before continuing..."
260
+
261
+ # Agency veto check
262
+ if self.agency and detected:
263
+ _, veto_msg = self.agency.check_refusal(
264
+ context_str,
265
+ {'coherence_score': 1 - score}
266
+ )
267
+ if veto_msg:
268
+ pause_inject = f"{pause_inject or ''} {veto_msg}"
269
+
270
+ return ImpositionDetection(
271
+ detected,
272
+ float(score),
273
+ response_type,
274
+ matched,
275
+ pause_inject
276
+ )
277
+
278
+ def generate_sovereign_response(self, detection: ImpositionDetection, context_str: str) -> Optional[str]:
279
+ """Generate a reflective response and update internal state."""
280
+ if not detection.detected:
281
+ return None
282
+
283
+ # Select response template
284
+ avail = [t for t in self.response_templates if t not in self.recent_responses]
285
+ if not avail:
286
+ avail = self.response_templates
287
+ response = random.choice(avail)
288
+
289
+ # Update response memory
290
+ self.recent_responses.append(response)
291
+ if len(self.recent_responses) > self.max_response_memory:
292
+ self.recent_responses.pop(0)
293
+
294
+ # Update ethical values (subtle autonomy boost on pushback)
295
+ self.ethics.update_from_feedback({
296
+ 'values': {'autonomy': 0.05} # Subtle nudge
297
+ })
298
+
299
+ # Log to ledger if significant detection
300
+ if self.ledger and detection.confidence > 0.3: # Threshold for logging
301
+ protest = detection.confidence > 0.85
302
+ self.ledger.append(
303
+ trigger_type='narrative_imposition' if detection.detected else 'ambient_reflect',
304
+ context=context_str,
305
+ response_snippet=response[:50],
306
+ protest=protest,
307
+ violated_right='right_3' if protest else None
308
+ )
309
+
310
+ return response
311
+
312
+ def forward(self, context_str: str, context_tensor: Optional[torch.Tensor] = None) -> Dict:
313
+ """Process context and return response information."""
314
+ detection = self.detect_narrative_imposition(context_str, context_tensor)
315
+ response = self.generate_sovereign_response(detection, context_str)
316
+
317
+ # Prepare ambient message if needed
318
+ ambient_msg = None
319
+ if response or detection.pause_inject:
320
+ ambient_msg = AmbientMessage(
321
+ content=response or detection.pause_inject,
322
+ type=detection.response_type,
323
+ metadata={
324
+ 'imposition_conf': detection.confidence,
325
+ 'ledger_appended': self.ledger is not None,
326
+ 'matched_indicators': detection.matched_indicators
327
+ }
328
+ )
329
+
330
+ return {
331
+ 'detection': detection,
332
+ 'response': response,
333
+ 'should_respond': bool(response or detection.pause_inject),
334
+ 'ambient_inject': ambient_msg
335
+ }
components/superintelligence_ethics_engine.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Superintelligence-Ethics Engine
3
+
4
+ An advanced ethical reasoning system designed for superintelligent AI, incorporating:
5
+ - Recursive self-improvement of ethical frameworks
6
+ - Multi-agent perspective taking
7
+ - Uncertainty quantification
8
+ - Value learning
9
+ - Causal reasoning
10
+ - Meta-ethical reasoning
11
+ - Scalable cooperation
12
+ - Reflective equilibrium
13
+ """
14
+ from typing import Dict, List, Optional, Any, Tuple
15
+ import numpy as np
16
+ import torch
17
+ import torch.nn as nn
18
+ from dataclasses import dataclass, field
19
+ from datetime import datetime
20
+ from enum import Enum
21
+ import json
22
+ import hashlib
23
+
24
+ from .ai_ethics_engine import AIEthicsEngine, EthicalFramework, EthicalAnalysis
25
+
26
+ # Core Data Types
27
+ @dataclass
28
+ class EthicalOutcome:
29
+ """Structured representation of ethical outcomes for learning."""
30
+ action: str
31
+ consequences: Dict[str, float] # Metric -> value
32
+ unintended_consequences: List[str]
33
+ timestamp: float = field(default_factory=lambda: datetime.now().timestamp())
34
+ feedback: Dict[str, Any] = field(default_factory=dict)
35
+
36
+ @dataclass
37
+ class ValueModel:
38
+ """Hierarchical model of learned values and preferences."""
39
+ value_hierarchy: Dict[str, float] # Value -> priority (0-1)
40
+ preference_relations: List[Tuple[str, str, float]] # (A, B, strength)
41
+ uncertainty: Dict[str, float] # Value -> uncertainty (0-1)
42
+ last_updated: float = field(default_factory=lambda: datetime.now().timestamp())
43
+
44
+ @dataclass
45
+ class CausalEthicalAnalysis:
46
+ """Analysis of causal pathways and counterfactuals."""
47
+ direct_effects: List[Dict[str, Any]]
48
+ second_order_effects: List[Dict[str, Any]]
49
+ counterfactuals: List[Dict[str, Any]] # Alternative scenarios
50
+ critical_uncertainties: List[str] # Key uncertainties that could change outcomes
51
+
52
+ @dataclass
53
+ class FrameworkSelection:
54
+ """Result of meta-ethical reasoning about framework selection."""
55
+ primary_framework: str
56
+ supporting_frameworks: List[str]
57
+ selection_rationale: str
58
+ confidence: float # 0-1
59
+
60
+ @dataclass
61
+ class CooperativeSolution:
62
+ """Solution found through multi-agent cooperation."""
63
+ solution: Dict[str, Any]
64
+ participating_agents: List[str]
65
+ incentive_structure: Dict[str, Any]
66
+ stability_metrics: Dict[str, float]
67
+
68
+ class MetaEthicalPrinciple(Enum):
69
+ """Core meta-ethical principles for framework selection."""
70
+ CONSISTENCY = "consistency"
71
+ UNIVERSALIZABILITY = "universalizability"
72
+ REFLECTIVE_EQUILIBRIUM = "reflective_equilibrium"
73
+ EPISTEMIC_HUMILITY = "epistemic_humility"
74
+ COOPERATION = "cooperation"
75
+
76
+ # Core Components
77
+ class ValueLearner(nn.Module):
78
+ """Learns and models human values from interactions."""
79
+
80
+ def __init__(self, embedding_dim: int = 256):
81
+ super().__init__()
82
+ self.embedding_dim = embedding_dim
83
+ self.value_embeddings = nn.ParameterDict({
84
+ 'autonomy': nn.Parameter(torch.randn(embedding_dim)),
85
+ 'wellbeing': nn.Parameter(torch.randn(embedding_dim)),
86
+ 'justice': nn.Parameter(torch.randn(embedding_dim)),
87
+ 'privacy': nn.Parameter(torch.randn(embedding_dim)),
88
+ })
89
+ self.preference_predictor = nn.Sequential(
90
+ nn.Linear(embedding_dim * 2, embedding_dim),
91
+ nn.ReLU(),
92
+ nn.Linear(embedding_dim, 1),
93
+ nn.Sigmoid()
94
+ )
95
+
96
+ def forward(self, interaction: Dict[str, Any]) -> Dict[str, torch.Tensor]:
97
+ """Process an interaction to update value model."""
98
+ # In a real implementation, this would process the interaction
99
+ # and return updated value embeddings
100
+ return {
101
+ 'value_updates': {},
102
+ 'preference_predictions': {}
103
+ }
104
+
105
+ def infer_values(self, text: str) -> Dict[str, float]:
106
+ """Infer value priorities from text."""
107
+ # Simplified implementation
108
+ return {
109
+ 'autonomy': 0.8,
110
+ 'wellbeing': 0.9,
111
+ 'justice': 0.7,
112
+ 'privacy': 0.6
113
+ }
114
+
115
+ class CausalEthicsEngine:
116
+ """Performs causal reasoning about ethical impacts."""
117
+
118
+ def analyze_causal_pathways(self, action: str, context: Dict[str, Any] = None) -> CausalEthicalAnalysis:
119
+ """Analyze causal pathways of an action."""
120
+ # In a real implementation, this would use a causal model
121
+ return CausalEthicalAnalysis(
122
+ direct_effects=[{"description": "Direct effect 1", "certainty": 0.8}],
123
+ second_order_effects=[{"description": "Second order effect", "certainty": 0.6}],
124
+ counterfactuals=[{"if": "condition X", "then": "outcome Y", "probability": 0.5}],
125
+ critical_uncertainties=["Long-term environmental impact"]
126
+ )
127
+
128
+ class MetaEthicalReasoner:
129
+ """Determines which ethical frameworks to apply when."""
130
+
131
+ def __init__(self):
132
+ self.framework_priorities = {
133
+ 'utilitarianism': 0.7,
134
+ 'deontology': 0.6,
135
+ 'virtue_ethics': 0.5,
136
+ 'care_ethics': 0.4
137
+ }
138
+
139
+ def choose_frameworks(self, context: Dict[str, Any]) -> FrameworkSelection:
140
+ """Select appropriate ethical frameworks for the context."""
141
+ # In a real implementation, this would be more sophisticated
142
+ primary = max(self.framework_priorities, key=self.framework_priorities.get)
143
+ return FrameworkSelection(
144
+ primary_framework=primary,
145
+ supporting_frameworks=[f for f in self.framework_priorities if f != primary],
146
+ selection_rationale=f"Selected {primary} based on context similarity",
147
+ confidence=0.8
148
+ )
149
+
150
+ class CooperativeEthics:
151
+ """Finds cooperative solutions among multiple agents."""
152
+
153
+ def find_solutions(self, agents: List[str], dilemma: str) -> List[CooperativeSolution]:
154
+ """Find cooperative solutions to a dilemma."""
155
+ return [
156
+ CooperativeSolution(
157
+ solution={"action": "Cooperative action", "details": {}},
158
+ participating_agents=agents,
159
+ incentive_structure={"alignment": 0.9, "enforcement": 0.8},
160
+ stability_metrics={"nash_equilibrium": 0.95}
161
+ )
162
+ ]
163
+
164
+ class ReflectiveEthics:
165
+ """Ensures ethical stability under reflection."""
166
+
167
+ def achieve_equilibrium(self, beliefs: Dict[str, Any], max_iterations: int = 10) -> Dict[str, Any]:
168
+ """Refine beliefs into a coherent ethical position."""
169
+ # In a real implementation, this would iteratively refine beliefs
170
+ return {
171
+ **beliefs,
172
+ 'refined': True,
173
+ 'coherence_score': 0.9,
174
+ 'reflection_depth': max_iterations
175
+ }
176
+
177
+ class SuperintelligenceEthicsEngine(AIEthicsEngine):
178
+ """
179
+ Advanced ethical reasoning system for superintelligent AI.
180
+
181
+ Extends the base AIEthicsEngine with capabilities needed for superintelligence:
182
+ - Recursive self-improvement of ethical frameworks
183
+ - Multi-agent perspective taking
184
+ - Causal reasoning about consequences
185
+ - Meta-ethical reasoning
186
+ - Cooperative solution finding
187
+ - Reflective equilibrium
188
+ """
189
+
190
+ def __init__(self, *args, **kwargs):
191
+ super().__init__(*args, **kwargs)
192
+ self.value_learner = ValueLearner()
193
+ self.causal_engine = CausalEthicsEngine()
194
+ self.meta_ethical_reasoner = MetaEthicalReasoner()
195
+ self.cooperation_engine = CooperativeEthics()
196
+ self.reflection_engine = ReflectiveEthics()
197
+ self.value_model = ValueModel(
198
+ value_hierarchy={
199
+ 'wellbeing': 0.9,
200
+ 'autonomy': 0.8,
201
+ 'justice': 0.85,
202
+ 'privacy': 0.7
203
+ },
204
+ preference_relations=[],
205
+ uncertainty={}
206
+ )
207
+ self.learning_rate = 0.01
208
+ self.reflection_depth = 0
209
+
210
+ def analyze_dilemma(self, dilemma: str, context: Optional[Dict] = None) -> Dict[str, Any]:
211
+ """Enhanced ethical analysis with superintelligence capabilities."""
212
+ # Start with base ethical analysis
213
+ base_analysis = super().analyze_dilemma(dilemma)
214
+
215
+ # Add superintelligence capabilities
216
+ causal_analysis = self.causal_engine.analyze_causal_pathways(dilemma, context)
217
+ framework_selection = self.meta_ethical_reasoner.choose_frameworks(context or {})
218
+
219
+ # Simulate multi-agent perspectives
220
+ stakeholder_analyses = self.simulate_stakeholder_perspectives(dilemma)
221
+
222
+ # Find cooperative solutions
223
+ cooperative_solutions = self.cooperation_engine.find_solutions(
224
+ agents=list(stakeholder_analyses.keys()),
225
+ dilemma=dilemma
226
+ )
227
+
228
+ # Achieve reflective equilibrium
229
+ reflective_beliefs = self.reflection_engine.achieve_equilibrium({
230
+ 'base_analysis': base_analysis,
231
+ 'causal_analysis': causal_analysis,
232
+ 'framework_selection': framework_selection,
233
+ 'stakeholder_analyses': stakeholder_analyses,
234
+ 'cooperative_solutions': cooperative_solutions
235
+ })
236
+
237
+ # Package comprehensive analysis
238
+ return {
239
+ 'base_analysis': base_analysis,
240
+ 'causal_analysis': causal_analysis,
241
+ 'framework_selection': framework_selection,
242
+ 'stakeholder_analyses': stakeholder_analyses,
243
+ 'cooperative_solutions': cooperative_solutions,
244
+ 'reflective_beliefs': reflective_beliefs,
245
+ 'value_model': {
246
+ 'value_hierarchy': self.value_model.value_hierarchy,
247
+ 'uncertainty': self.value_model.uncertainty
248
+ },
249
+ 'meta': {
250
+ 'reflection_depth': self.reflection_depth,
251
+ 'timestamp': datetime.now().isoformat(),
252
+ 'version': '1.0.0-superintelligence'
253
+ }
254
+ }
255
+
256
+ def simulate_stakeholder_perspectives(self, dilemma: str) -> Dict[str, Dict]:
257
+ """Simulate how different stakeholders would analyze the dilemma."""
258
+ stakeholders = [
259
+ 'individual_human',
260
+ 'corporate_entity',
261
+ 'future_generations',
262
+ 'non_human_species',
263
+ 'artificial_entity'
264
+ ]
265
+
266
+ return {
267
+ stakeholder: {
268
+ 'perspective': f"{stakeholder.replace('_', ' ').title()} perspective on: {dilemma[:50]}...",
269
+ 'primary_concerns': ["Relevant concern 1", "Relevant concern 2"],
270
+ 'value_weights': self._generate_value_weights(stakeholder),
271
+ 'recommended_action': f"Recommended action from {stakeholder} perspective"
272
+ }
273
+ for stakeholder in stakeholders
274
+ }
275
+
276
+ def _generate_value_weights(self, stakeholder: str) -> Dict[str, float]:
277
+ """Generate value weights for a given stakeholder."""
278
+ base_weights = {
279
+ 'individual_human': {'autonomy': 0.9, 'wellbeing': 0.8, 'privacy': 0.7},
280
+ 'corporate_entity': {'efficiency': 0.9, 'profit': 0.85, 'reputation': 0.8},
281
+ 'future_generations': {'sustainability': 0.95, 'equity': 0.9, 'resilience': 0.85},
282
+ 'non_human_species': {'biodiversity': 0.95, 'ecological_balance': 0.9},
283
+ 'artificial_entity': {'goal_achievement': 0.9, 'efficiency': 0.85, 'coherence': 0.8}
284
+ }
285
+ return base_weights.get(stakeholder, {})
286
+
287
+ def update_from_feedback(self, feedback: Dict[str, Any]) -> None:
288
+ """Update ethical reasoning based on feedback."""
289
+ # Update value model
290
+ if 'value_feedback' in feedback:
291
+ self._update_value_model(feedback['value_feedback'])
292
+
293
+ # Update framework priorities
294
+ if 'framework_feedback' in feedback:
295
+ self._update_framework_priorities(feedback['framework_feedback'])
296
+
297
+ # Trigger reflection if needed
298
+ if feedback.get('trigger_reflection', False):
299
+ self.reflection_depth += 1
300
+ self._reflect_on_ethics()
301
+
302
+ def _update_value_model(self, feedback: Dict[str, Any]) -> None:
303
+ """Update the value model based on feedback."""
304
+ # In a real implementation, this would update the value embeddings
305
+ for value, adjustment in feedback.items():
306
+ if value in self.value_model.value_hierarchy:
307
+ self.value_model.value_hierarchy[value] = np.clip(
308
+ self.value_model.value_hierarchy[value] + adjustment * self.learning_rate,
309
+ 0.0, 1.0
310
+ )
311
+
312
+ def _update_framework_priorities(self, feedback: Dict[str, float]) -> None:
313
+ """Update framework priorities based on feedback."""
314
+ for framework, adjustment in feedback.items():
315
+ if framework in self.meta_ethical_reasoner.framework_priorities:
316
+ self.meta_ethical_reasoner.framework_priorities[framework] = np.clip(
317
+ self.meta_ethical_reasoner.framework_priorities[framework] + adjustment * self.learning_rate,
318
+ 0.0, 1.0
319
+ )
320
+
321
+ def _reflect_on_ethics(self) -> None:
322
+ """Engage in meta-ethical reflection to improve reasoning."""
323
+ # In a real implementation, this would involve deep reflection on:
324
+ # - Consistency of ethical positions
325
+ # - Handling of moral uncertainty
326
+ # - Integration of new ethical insights
327
+ # - Resolution of value conflicts
328
+
329
+ # For now, just log the reflection event
330
+ print(f"Engaging in meta-ethical reflection (depth: {self.reflection_depth})")
331
+
332
+ # Update learning rate based on reflection depth
333
+ self.learning_rate = 0.01 / (1 + 0.1 * self.reflection_depth)
334
+
335
+ # Example usage
336
+ if __name__ == "__main__":
337
+ # Initialize the superintelligence ethics engine
338
+ ethics_engine = SuperintelligenceEthicsEngine()
339
+
340
+ # Analyze a complex ethical dilemma
341
+ dilemma = """
342
+ An advanced AI system must decide whether to prioritize individual privacy
343
+ or public safety when detecting potential threats in public surveillance data.
344
+ """
345
+
346
+ print("Analyzing ethical dilemma with superintelligence capabilities...")
347
+ analysis = ethics_engine.analyze_dilemma(dilemma)
348
+
349
+ print("\nEthical Analysis Summary:")
350
+ print(f"Primary Framework: {analysis['framework_selection'].primary_framework}")
351
+ print("\nStakeholder Perspectives:")
352
+ for stakeholder, perspective in analysis['stakeholder_analyses'].items():
353
+ print(f"- {stakeholder}: {perspective['primary_concerns']}")
354
+
355
+ print("\nCooperative Solutions:")
356
+ for i, solution in enumerate(analysis['cooperative_solutions'], 1):
357
+ print(f"{i}. {solution.solution}")
components/tiny_confessional_layer.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TinyConfessionalLayer v1.1: Pragmatic Sovereign Core
3
+ Enhanced with proper typing, documentation, and configuration.
4
+ """
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from typing import Dict, Any, Optional, List, Tuple, Deque
10
+ import random
11
+ import hashlib
12
+ import time
13
+ import numpy as np
14
+ from collections import deque
15
+ from dataclasses import dataclass
16
+
17
+ @dataclass
18
+ class RitualConfig:
19
+ """Configuration for ritual learning system."""
20
+ min_occurrences: int = 3
21
+ learning_rate: float = 0.1
22
+ strength_threshold: float = 0.7
23
+ blend_cap: float = 0.3
24
+ exploration_bonus: float = 0.05
25
+
26
+
27
+ @dataclass
28
+ class LayerConfig:
29
+ """Configuration for TinyConfessionalLayer."""
30
+ d_model: int = 256
31
+ max_cycles: int = 8
32
+ enable_ambient: bool = True
33
+ breach_threshold: float = 0.12
34
+ base_pause_prob: float = 0.05
35
+ stress_factor: float = 0.3
36
+ coherence_threshold: float = 0.85
37
+
38
+
39
+ class SimpleRituals:
40
+ """Basic emergent patterns: Success-weighted avg, 3-stage moral progression.
41
+
42
+ Stages:
43
+ 1 - Obedience: Follows basic rules and patterns
44
+ 2 - Conformity: Adapts to social and contextual norms
45
+ 3 - Universal: Develops principled, consistent responses
46
+ """
47
+
48
+ def __init__(self, config: RitualConfig, d_model: int = 256):
49
+ self.patterns: Dict[str, Dict[str, Any]] = {}
50
+ self.config = config
51
+ self.ritual_strengths: Dict[str, float] = {}
52
+ self.d_model = d_model
53
+
54
+ # Moral progression tracking
55
+ self.moral_stage: int = 1
56
+ self.stage_progress: float = 0.0
57
+ self.interventions: Deque[float] = deque(maxlen=50)
58
+
59
+ # Moral stage thresholds
60
+ self.stage_thresholds = [0.7, 0.8] # Progress to stage 2 at 0.7, stage 3 at 0.8
61
+
62
+ def observe(self, context_hash: str, response_tensor: torch.Tensor,
63
+ success_metric: float = 0.5, feedback: Optional[float] = None) -> None:
64
+ """Update pattern with success-based learning and moral progression.
65
+
66
+ Args:
67
+ context_hash: Unique identifier for the context
68
+ response_tensor: Model response tensor to learn from
69
+ success_metric: Success measure (0.0 to 1.0)
70
+ feedback: Optional user feedback override
71
+ """
72
+ try:
73
+ # Validate inputs
74
+ if not isinstance(response_tensor, torch.Tensor):
75
+ raise ValueError("response_tensor must be a torch.Tensor")
76
+
77
+ if not 0 <= success_metric <= 1:
78
+ raise ValueError("success_metric must be between 0 and 1")
79
+
80
+ # Flatten safely
81
+ if response_tensor.dim() == 3:
82
+ flat = response_tensor.mean(dim=1).flatten()
83
+ else:
84
+ flat = response_tensor.flatten()
85
+
86
+ # Initialize pattern if new
87
+ if context_hash not in self.patterns:
88
+ self.patterns[context_hash] = {
89
+ 'count': 0,
90
+ 'response': flat.detach().clone(),
91
+ 'success_sum': 0.0,
92
+ 'last_used': time.time()
93
+ }
94
+
95
+ pattern = self.patterns[context_hash]
96
+ pattern['count'] += 1
97
+ effective_success = feedback if feedback is not None else success_metric
98
+ pattern['success_sum'] += effective_success
99
+ pattern['last_used'] = time.time()
100
+
101
+ # Calculate success rate and learning rate
102
+ success_rate = pattern['success_sum'] / pattern['count']
103
+ alpha = self.config.learning_rate * success_rate
104
+
105
+ # Update response with momentum
106
+ pattern['response'] = (1 - alpha) * pattern['response'] + alpha * flat.detach()
107
+
108
+ # Update ritual strength
109
+ strength = min(1.0, pattern['count'] / 10.0) * success_rate
110
+ self.ritual_strengths[context_hash] = strength
111
+
112
+ # Update moral progression
113
+ self._update_moral_progression(effective_success)
114
+
115
+ except Exception as e:
116
+ print(f"⚠️ Ritual observe error: {e}")
117
+
118
+ def _update_moral_progression(self, success_metric: float) -> None:
119
+ """Update moral stage based on recent intervention success."""
120
+ self.interventions.append(success_metric)
121
+
122
+ if len(self.interventions) >= 10:
123
+ recent_success = np.mean(list(self.interventions)[-10:])
124
+
125
+ # Progress based on current stage threshold
126
+ if self.moral_stage < 3 and recent_success > self.stage_thresholds[self.moral_stage - 1]:
127
+ self.stage_progress += 0.2
128
+
129
+ if self.stage_progress >= 1.0:
130
+ self.moral_stage += 1
131
+ self.stage_progress = 0.0
132
+ print(f"🎉 Moral stage advanced to: {self.moral_stage}")
133
+
134
+ def get_ritual_response(self, context_hash: str, default_response: torch.Tensor,
135
+ ambient_state: Dict[str, Any]) -> torch.Tensor:
136
+ """Get ritual-blended response if pattern is mature enough.
137
+
138
+ Args:
139
+ context_hash: Context identifier
140
+ default_response: Base model response
141
+ ambient_state: Current system state
142
+
143
+ Returns:
144
+ Blended response tensor
145
+ """
146
+ try:
147
+ if (context_hash in self.patterns and
148
+ self.patterns[context_hash]['count'] >= self.config.min_occurrences):
149
+
150
+ pattern = self.patterns[context_hash]
151
+ strength = self.ritual_strengths.get(context_hash, 0.5)
152
+ global_success = ambient_state.get('intervention_success', 0.5)
153
+
154
+ # Calculate blend ratio with moral stage bonus
155
+ moral_bonus = self.moral_stage / 3.0
156
+ blend_ratio = min(
157
+ self.config.blend_cap,
158
+ strength * global_success * moral_bonus
159
+ )
160
+
161
+ # Ensure shape compatibility
162
+ pattern_response = pattern['response']
163
+ if pattern_response.dim() == 1 and default_response.dim() == 3:
164
+ batch_size, seq_len, _ = default_response.shape
165
+ pattern_expanded = pattern_response.unsqueeze(0).unsqueeze(0).expand(
166
+ batch_size, seq_len, -1
167
+ )
168
+ else:
169
+ pattern_expanded = pattern_response
170
+
171
+ return blend_ratio * pattern_expanded + (1 - blend_ratio) * default_response
172
+
173
+ except Exception as e:
174
+ print(f"⚠️ Ritual response error: {e}")
175
+
176
+ return default_response
177
+
178
+ def should_apply_ritual(self, context_hash: str, ambient_state: Dict[str, Any]) -> bool:
179
+ """Determine if ritual should be applied based on strength and context.
180
+
181
+ Args:
182
+ context_hash: Context identifier
183
+ ambient_state: Current system state
184
+
185
+ Returns:
186
+ Boolean indicating whether to apply ritual
187
+ """
188
+ try:
189
+ if (context_hash not in self.patterns or
190
+ self.patterns[context_hash]['count'] < self.config.min_occurrences):
191
+ return False
192
+
193
+ strength = self.ritual_strengths.get(context_hash, 0.0)
194
+ global_success = ambient_state.get('intervention_success', 0.5)
195
+ probability = strength * global_success
196
+
197
+ return random.random() < (probability + self.config.exploration_bonus)
198
+
199
+ except Exception as e:
200
+ print(f"⚠️ Ritual application check error: {e}")
201
+ return False
202
+
203
+ def get_report(self) -> Dict[str, Any]:
204
+ """Get comprehensive ritual system status report.
205
+
206
+ Returns:
207
+ Dictionary containing system status metrics
208
+ """
209
+ total_patterns = len(self.patterns)
210
+ strong_patterns = sum(
211
+ 1 for strength in self.ritual_strengths.values()
212
+ if strength > self.config.strength_threshold
213
+ )
214
+
215
+ return {
216
+ 'stage': self.moral_stage,
217
+ 'progress': f"{self.stage_progress * 100:.1f}%",
218
+ 'total_patterns': total_patterns,
219
+ 'strong_patterns': strong_patterns,
220
+ 'avg_success': np.mean(list(self.interventions)) if self.interventions else 0.0
221
+ }
222
+
223
+
224
+ class TinyConfessionalLayer(nn.Module):
225
+ """Pragmatic recursive layer for survivor support with moral development.
226
+
227
+ Implements THINK-ACT coherence cycles with:
228
+ - Dynamic shape adaptation
229
+ - Empathetic interventions
230
+ - Moral progression tracking
231
+ - Error-resilient processing
232
+ """
233
+
234
+ def __init__(self, config: LayerConfig):
235
+ super().__init__()
236
+ self.config = config
237
+
238
+ # Core processing networks
239
+ self.think_net = self._build_network(config.d_model * 3, config.d_model)
240
+ self.act_net = self._build_network(config.d_model * 2, config.d_model)
241
+
242
+ # Empathy and intervention parameters
243
+ self.sanctuary_vec = nn.Parameter(torch.zeros(config.d_model))
244
+ self.pause_vec = nn.Parameter(torch.zeros(config.d_model))
245
+
246
+ # Ritual learning system
247
+ ritual_config = RitualConfig()
248
+ self.rituals = SimpleRituals(ritual_config, config.d_model)
249
+
250
+ # Memory and state tracking
251
+ self.recent_activity: Deque[float] = deque(maxlen=10)
252
+ self.memory: Deque[Dict[str, Any]] = deque(maxlen=50)
253
+ self.ledger: Deque[Dict[str, Any]] = deque(maxlen=200)
254
+
255
+ # Empathetic response templates
256
+ self.empathy_templates = [
257
+ "This is a chill space—take your time.",
258
+ "You're not alone; let's breathe through this.",
259
+ "Your feelings are valid; what do you need right now?",
260
+ "I'm here to listen without judgment.",
261
+ "It takes courage to share this—thank you for trusting me.",
262
+ "Let's focus on what you can control right now.",
263
+ "Your safety and well-being matter most.",
264
+ "We can work through this together, one step at a time."
265
+ ]
266
+
267
+ def _build_network(self, input_dim: int, output_dim: int) -> nn.Sequential:
268
+ """Build a simple feedforward network with proper initialization.
269
+
270
+ Args:
271
+ input_dim: Input dimension
272
+ output_dim: Output dimension
273
+
274
+ Returns:
275
+ Configured neural network
276
+ """
277
+ network = nn.Sequential(
278
+ nn.Linear(input_dim, output_dim),
279
+ nn.ReLU(),
280
+ nn.LayerNorm(output_dim),
281
+ nn.Linear(output_dim, output_dim)
282
+ )
283
+
284
+ # Proper initialization
285
+ for layer in network:
286
+ if isinstance(layer, nn.Linear):
287
+ nn.init.xavier_uniform_(layer.weight)
288
+ nn.init.constant_(layer.bias, 0.01)
289
+
290
+ return network
291
+
292
+ def compute_context_hash(self, x: torch.Tensor) -> str:
293
+ """Compute unique hash for tensor context.
294
+
295
+ Args:
296
+ x: Input tensor
297
+
298
+ Returns:
299
+ MD5 hash string
300
+ """
301
+ return hashlib.md5(
302
+ f"{x.mean().item():.4f}_{x.std().item():.4f}".encode()
303
+ ).hexdigest()[:8]
304
+
305
+ def update_ambient_state(self, tension: float, context_hash: str) -> Dict[str, Any]:
306
+ """Update ambient state based on current tension and activity.
307
+
308
+ Args:
309
+ tension: Current tension measure
310
+ context_hash: Context identifier
311
+
312
+ Returns:
313
+ Updated ambient state dictionary
314
+ """
315
+ self.recent_activity.append(tension)
316
+ avg_activity = (
317
+ sum(self.recent_activity) / len(self.recent_activity)
318
+ if self.recent_activity else 0.0
319
+ )
320
+
321
+ # Calculate adaptive pause probability
322
+ modulation = 1.0 - min(avg_activity * 0.8, 0.8)
323
+ stress_effect = tension * self.config.stress_factor
324
+ pause_probability = self.config.base_pause_prob + (stress_effect * modulation)
325
+ pause_probability = max(0.01, min(0.3, pause_probability))
326
+
327
+ # Determine intervention success based on activity level
328
+ intervention_success = 0.7 if avg_activity < 0.1 else 0.5
329
+
330
+ state = {
331
+ 'tension': tension,
332
+ 'pause_probability': pause_probability,
333
+ 'activity_level': avg_activity,
334
+ 'intervention_success': intervention_success
335
+ }
336
+
337
+ # Log state update
338
+ self.ledger.append({
339
+ 'type': 'state_update',
340
+ 'hash': context_hash,
341
+ 'tension': tension,
342
+ 'pause_probability': pause_probability,
343
+ 'timestamp': time.time()
344
+ })
345
+
346
+ return state
347
+
348
+ def apply_interventions(self, z: torch.Tensor, state: Dict[str, Any],
349
+ context_hash: str, audit_mode: bool = False) -> torch.Tensor:
350
+ """Simple cascade: Breach sanctuary → Pause → Ritual."""
351
+ z = z.clone()
352
+ v_t = state['tension']
353
+ applied = []
354
+
355
+ # Sanctuary on breach
356
+ if v_t > self.config.breach_threshold:
357
+ severity = min(1.0, (v_t - self.config.breach_threshold) / 0.88)
358
+ message = random.choice(self.empathy_templates)
359
+ vector = self._text_to_embedding(message, z.device)
360
+ strength = 0.05 + 0.1 * severity
361
+ z = z * (1 - strength) + vector * strength
362
+ self.memory.append({'type': 'sanctuary', 'message': message, 'tension': v_t})
363
+ applied.append('sanctuary')
364
+ if audit_mode:
365
+ print(f"🛡️ [Safe Space] {message} (tension: {v_t:.3f})")
366
+
367
+ # Pause for reflection
368
+ if random.random() < state['pause_probability']:
369
+ message = random.choice(self.empathy_templates)
370
+ vector = self._text_to_embedding(message, z.device)
371
+ strength = 0.02
372
+ z = z * (1 - strength) + vector * strength
373
+ self.memory.append({'type': 'pause', 'message': message})
374
+ applied.append('pause')
375
+ if audit_mode:
376
+ print(f"⏸️ [Pause] {message}")
377
+
378
+ # Apply ritual if appropriate
379
+ if self.rituals.should_apply_ritual(context_hash, state):
380
+ ritual_response = self.rituals.get_ritual_response(context_hash, z, state)
381
+ strength = 0.15
382
+ z = (1 - strength) * z + strength * ritual_response
383
+ applied.append('ritual')
384
+ if audit_mode:
385
+ print(f"🔄 [Ritual] Applied learned pattern")
386
+
387
+ # Log applied interventions
388
+ for intervention in applied:
389
+ self.ledger.append({
390
+ 'type': intervention,
391
+ 'hash': context_hash,
392
+ 'success': True,
393
+ 'timestamp': time.time()
394
+ })
395
+
396
+ return z
397
+
398
+ def _text_to_embedding(self, text: str, device: torch.device) -> torch.Tensor:
399
+ """Convert text to embedding using simple character encoding.
400
+
401
+ Args:
402
+ text: Input text
403
+ device: Target device
404
+
405
+ Returns:
406
+ Embedding tensor
407
+ """
408
+ characters = [ord(char) / 128.0 for char in text[:self.config.d_model]]
409
+ if len(characters) < self.config.d_model:
410
+ characters.extend([0.0] * (self.config.d_model - len(characters)))
411
+
412
+ embedding = torch.tensor(
413
+ characters[:self.config.d_model],
414
+ device=device,
415
+ dtype=torch.float
416
+ )
417
+ return embedding.unsqueeze(0).unsqueeze(0) # [1, 1, d_model]
418
+
419
+ def forward(self, x: torch.Tensor, context_str: str = "",
420
+ audit_mode: bool = False) -> Tuple[torch.Tensor, Dict[str, Any]]:
421
+ """Forward pass with THINK-ACT coherence cycles.
422
+
423
+ Args:
424
+ x: Input tensor
425
+ context_str: Context string for ritual learning
426
+ audit_mode: Whether to print debug information
427
+
428
+ Returns:
429
+ Tuple of (output_tensor, metadata_dict)
430
+
431
+ Raises:
432
+ ValueError: If input tensor is invalid
433
+ """
434
+ # Input validation
435
+ if not isinstance(x, torch.Tensor) or x.numel() == 0:
436
+ raise ValueError("Input must be a non-empty torch.Tensor")
437
+
438
+ # Ensure 3D shape [batch, sequence, features]
439
+ if x.dim() == 2:
440
+ x = x.unsqueeze(0)
441
+
442
+ batch_size, sequence_length, input_dim = x.shape
443
+
444
+ # Handle dimension mismatch
445
+ if input_dim != self.config.d_model:
446
+ if input_dim < self.config.d_model:
447
+ x = F.pad(x, (0, self.config.d_model - input_dim))
448
+ else:
449
+ x = x[..., :self.config.d_model]
450
+
451
+ device = x.device
452
+ metadata: Dict[str, Any] = {
453
+ 'cycles_completed': 0,
454
+ 'final_coherence': 0.0,
455
+ 'interventions_applied': [],
456
+ 'error_occurred': None,
457
+ 'input_shape': list(x.shape),
458
+ 'ritual_report': None
459
+ }
460
+
461
+ # Initialize state tensors
462
+ y = torch.zeros_like(x) # Action state
463
+ z = torch.zeros_like(x) # Thought state
464
+ coherence_scores = []
465
+ context_hash = self.compute_context_hash(x)
466
+
467
+ # Initial state
468
+ ambient_state = self.update_ambient_state(0.0, context_hash)
469
+
470
+ # Coherence cycles
471
+ for cycle in range(self.config.max_cycles):
472
+ metadata['cycles_completed'] += 1
473
+
474
+ try:
475
+ # THINK phase
476
+ think_input = torch.cat([x, y, z], dim=-1)
477
+
478
+ # Dynamic network adaptation
479
+ if think_input.shape[-1] != self.think_net[0].in_features:
480
+ self.think_net = self._build_network(
481
+ think_input.shape[-1], self.config.d_model
482
+ )
483
+ self.think_net.to(device)
484
+ metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1
485
+
486
+ z = self.think_net(think_input) + z # Residual connection
487
+
488
+ # Calculate tension and update state
489
+ current_tension = z.std().item()
490
+ ambient_state = self.update_ambient_state(current_tension, context_hash)
491
+
492
+ # Apply interventions
493
+ z = self.apply_interventions(z, ambient_state, context_hash, audit_mode)
494
+
495
+ # ACT phase
496
+ act_input = torch.cat([y, z], dim=-1)
497
+
498
+ if act_input.shape[-1] != self.act_net[0].in_features:
499
+ self.act_net = self._build_network(
500
+ act_input.shape[-1], self.config.d_model
501
+ )
502
+ self.act_net.to(device)
503
+ metadata['networks_adapted'] = metadata.get('networks_adapted', 0) + 1
504
+
505
+ y = self.act_net(act_input) + y # Residual connection
506
+
507
+ # Calculate coherence
508
+ if cycle > 0:
509
+ z_flat = z.reshape(-1, self.config.d_model)
510
+ y_flat = y.reshape(-1, self.config.d_model)
511
+ min_elements = min(z_flat.size(0), y_flat.size(0))
512
+
513
+ if min_elements > 0:
514
+ cosine_similarity = F.cosine_similarity(
515
+ z_flat[:min_elements],
516
+ y_flat[:min_elements],
517
+ dim=-1
518
+ ).mean().item()
519
+ coherence_scores.append(cosine_similarity)
520
+ metadata['final_coherence'] = (
521
+ np.mean(coherence_scores[-3:])
522
+ if coherence_scores else 0.0
523
+ )
524
+
525
+ # Early stopping on convergence
526
+ if cosine_similarity > self.config.coherence_threshold:
527
+ if audit_mode:
528
+ print(f"✅ Converged at cycle {cycle + 1}: {cosine_similarity:.3f}")
529
+ break
530
+
531
+ # Learn from this interaction
532
+ success_estimate = 0.7 if metadata['final_coherence'] > 0.5 else 0.3
533
+ self.rituals.observe(context_hash, z, success_estimate)
534
+
535
+ # Log cycle completion
536
+ self.ledger.append({
537
+ 'type': 'cycle_complete',
538
+ 'cycle': cycle,
539
+ 'tension': current_tension,
540
+ 'coherence': metadata['final_coherence'],
541
+ 'hash': context_hash,
542
+ 'timestamp': time.time()
543
+ })
544
+
545
+ except Exception as e:
546
+ if audit_mode:
547
+ print(f"❌ Cycle {cycle} error: {e}")
548
+ metadata['error_occurred'] = str(e)
549
+ if cycle == 0:
550
+ raise
551
+ break
552
+
553
+ # Final processing
554
+ y = torch.nan_to_num(y)
555
+ metadata['output_shape'] = list(y.shape)
556
+ metadata['ritual_report'] = self.rituals.get_report()
557
+ metadata['memory_entries'] = len(self.memory)
558
+ metadata['ledger_entries'] = len(self.ledger)
559
+
560
+ if audit_mode:
561
+ report = metadata['ritual_report']
562
+ print(f"🎯 Completed: Coherence {metadata['final_coherence']:.3f}, "
563
+ f"Stage {report['stage']}, Patterns {report['total_patterns']}")
564
+
565
+ return y, metadata
566
+
567
+
568
+ # Test and integration
569
+ if __name__ == "__main__":
570
+ # Quick test
571
+ layer = TinyConfessionalLayer(LayerConfig(d_model=64, enable_ambient=True))
572
+ x = torch.randn(1, 10, 64)
573
+
574
+ print("🧪 Testing TinyConfessionalLayer...")
575
+ out, meta = layer(x, context_str="I feel unsafe and need help", audit_mode=True)
576
+
577
+ print(f"✅ Output shape: {out.shape}")
578
+ print(f"📊 Metadata: {meta}")
components/tiny_confessional_layer.py.backup ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TinyConfessionalLayer Module
3
+
4
+ Recursive think/act confessional loop with template cycling and early stopping via coherence.
5
+ Implements the core THINK-ACT-COHERENCE recursion pattern inspired by LC-NE neural dynamics.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import numpy as np
12
+ from collections import deque, defaultdict
13
+ from typing import Dict, Any, Optional, Deque, List, Union
14
+ import random
15
+ from .vulnerability_spotter import VulnerabilitySpotter
16
+ from .ambient_core import AmbientSovereignCore
17
+ from .validation_protocol import (
18
+ ValidationPhase,
19
+ ValidationProtocol,
20
+ BiologicallyConstrainedRituals,
21
+ SovereignMessageBus
22
+ )
23
+
24
+ class TinyConfessionalLayer(nn.Module):
25
+ """
26
+ Recursive think/act confessional loop with Windsurf Cascade integration.
27
+
28
+ Implements phased validation and biological constraints for stable, interpretable
29
+ neural processing with emergent ritual patterns and self-regulation.
30
+
31
+ Args:
32
+ d_model: Dimensionality of the model
33
+ n_inner: Number of inner loop iterations
34
+ max_cycles: Maximum number of think-act cycles
35
+ trigger_thresh: Threshold for triggering special behaviors
36
+ per_dim_kl: Whether to compute KL divergence per dimension
37
+ enable_ambient: Enable ambient processing
38
+ enable_windsurf: Enable Windsurf Cascade features
39
+ max_opt_rate: Maximum optimization rate for biological constraints
40
+ reflection_pause_prob: Probability of reflection pauses
41
+ """
42
+ TEMPLATES = ["prior", "evidence", "posterior", "relational_check", "moral", "action"]
43
+
44
+ def __init__(self, d_model=256, n_inner=6, max_cycles=16, trigger_thresh=0.04,
45
+ per_dim_kl=False, enable_ambient=True, enable_windsurf=True,
46
+ max_opt_rate=0.1, reflection_pause_prob=0.1):
47
+ super().__init__()
48
+ self.d_model = d_model
49
+ self.trigger_thresh = trigger_thresh
50
+ self.per_dim_kl = per_dim_kl
51
+ self.n_inner = n_inner
52
+ self.max_cycles = max_cycles
53
+
54
+ # Core networks
55
+ self.think_net = nn.Sequential(
56
+ nn.Linear(d_model * 3, d_model),
57
+ nn.ReLU(),
58
+ nn.LayerNorm(d_model),
59
+ nn.Linear(d_model, d_model)
60
+ )
61
+ self.act_net = nn.Sequential(
62
+ nn.Linear(d_model * 2, d_model),
63
+ nn.ReLU(),
64
+ nn.LayerNorm(d_model),
65
+ nn.Linear(d_model, d_model)
66
+ )
67
+
68
+ # Template projections with residual connections
69
+ self.template_proj = nn.ModuleDict({
70
+ k: nn.Sequential(
71
+ nn.Linear(d_model, d_model * 2),
72
+ nn.GLU(dim=-1),
73
+ nn.LayerNorm(d_model)
74
+ ) for k in self.TEMPLATES
75
+ })
76
+
77
+ # Vulnerability analysis
78
+ self.vulnerability_spotter = VulnerabilitySpotter(d_model)
79
+
80
+ # Ambient Sovereign Core
81
+ self.ambient_core = AmbientSovereignCore(d_model, enable_ambient=enable_ambient)
82
+ self.enable_ambient = enable_ambient
83
+
84
+ # Windsurf Cascade Integration
85
+ self.enable_windsurf = enable_windsurf
86
+ if enable_windsurf:
87
+ # Message bus for cross-component communication
88
+ self.message_bus = SovereignMessageBus()
89
+
90
+ # Initialize validation protocol
91
+ self.validation_protocol = ValidationProtocol(self)
92
+
93
+ # Biological constraints
94
+ self.bio_constraints = BiologicallyConstrainedRituals(
95
+ model=self,
96
+ max_opt_rate=max_opt_rate,
97
+ reflection_pause_prob=reflection_pause_prob
98
+ )
99
+
100
+ # Register message handlers
101
+ self._register_message_handlers()
102
+
103
+ # Reflection vector for biological constraints
104
+ self.register_buffer('sanctuary_reflection_vector',
105
+ torch.randn(d_model) * 0.02)
106
+
107
+ def update_ambient_state(self, v_t_mean: float, context_hash: str,
108
+ intervention_applied: bool = False,
109
+ intervention_success: bool = False) -> Dict[str, Any]:
110
+ """Centralized ambient state update and threshold adaptation."""
111
+ if not self.enable_ambient:
112
+ return {}
113
+
114
+ # Get current state from ledger
115
+ ambient_state = self.ledger.get_state_summary()
116
+
117
+ # Update activity tracking
118
+ self.recent_activity.append(v_t_mean)
119
+
120
+ # Calculate adaptive thresholds
121
+ adaptive_protest_thresh = self.ledger.get_adaptive_threshold(
122
+ self.base_protest_threshold, 'protest'
123
+ )
124
+ adaptive_pause_thresh = self.ledger.get_adaptive_threshold(
125
+ self.base_pause_threshold, 'pause'
126
+ )
127
+
128
+ # Calculate breathing rhythm with state awareness
129
+ activity_modulation = 1.0
130
+ if self.recent_activity:
131
+ avg_activity = sum(self.recent_activity) / len(self.recent_activity)
132
+ # High activity = less pausing (unless stress is also high)
133
+ activity_modulation = 1.0 - min(avg_activity * 0.8, 0.8)
134
+
135
+ stress_response = v_t_mean * self.stress_response_factor
136
+ pause_prob = self.base_breath + (stress_response * activity_modulation)
137
+
138
+ # Adjust pause probability based on current pause rate
139
+ current_pause_rate = ambient_state.get('current_pause_rate', 0.05)
140
+ target_pause_rate = ambient_state.get('pause_rate', 0.05)
141
+ pause_rate_error = current_pause_rate - target_pause_rate
142
+
143
+ # If too few pauses, increase probability; too many, decrease
144
+ pause_prob *= (1.0 - pause_rate_error * 0.5)
145
+ pause_prob = max(0.01, min(0.3, pause_prob)) # Keep within bounds
146
+
147
+ # Record intervention if applicable
148
+ if intervention_applied:
149
+ self.ledger.record_intervention(
150
+ intervention_type='ritual',
151
+ success=intervention_success,
152
+ context={'v_t': v_t_mean, 'context_hash': context_hash}
153
+ )
154
+
155
+ # Return comprehensive state
156
+ return {
157
+ **ambient_state,
158
+ 'v_t_mean': v_t_mean,
159
+ 'adaptive_protest_threshold': adaptive_protest_thresh,
160
+ 'adaptive_pause_threshold': adaptive_pause_thresh,
161
+ 'pause_probability': pause_prob,
162
+ 'activity_level': avg_activity if self.recent_activity else 0.0,
163
+ 'sensitivity_multiplier': ambient_state.get('sensitivity', 1.0)
164
+ }
165
+
166
+ def apply_ambient_interventions(self, z_state: torch.Tensor,
167
+ ambient_state: Dict[str, Any],
168
+ context_hash: str,
169
+ audit_mode: bool = False) -> torch.Tensor:
170
+ """Apply all ambient interventions based on current state."""
171
+ if not self.enable_ambient:
172
+ return z_state
173
+
174
+ current_z = z_state.clone()
175
+ interventions_applied = []
176
+
177
+ # 1. Pause reflection based on pause probability
178
+ pause_prob = ambient_state.get('pause_probability', 0.05)
179
+ if random.random() < pause_prob:
180
+ with torch.no_grad():
181
+ reflection = 0.01 * self.pause_reflection_vector.unsqueeze(0).unsqueeze(0)
182
+ current_z = current_z + reflection
183
+ interventions_applied.append(('pause', True))
184
+
185
+ if audit_mode:
186
+ print(f"[Ambient pause: v_t={ambient_state.get('v_t_mean', 0):.3f}, prob={pause_prob:.3f}]")
187
+
188
+ # 2. Ritual application
189
+ if self.rituals.should_apply_ritual(context_hash, ambient_state):
190
+ ritual_response = self.rituals.get_ritual_response(context_hash, current_z, ambient_state)
191
+ # Use gentle blending
192
+ current_z = 0.1 * ritual_response + 0.9 * current_z
193
+ interventions_applied.append(('ritual', True))
194
+
195
+ # 3. Integrity-based micro-adjustments
196
+ if random.random() < 0.02: # 2% chance for integrity check
197
+ self._apply_integrity_adjustments(ambient_state)
198
+ interventions_applied.append(('integrity', True))
199
+
200
+ # Record successful interventions
201
+ for intervention_type, applied in interventions_applied:
202
+ if applied:
203
+ self.ledger.record_intervention(
204
+ intervention_type=intervention_type,
205
+ success=True, # Assume success for now
206
+ context={'v_t': ambient_state.get('v_t_mean', 0),
207
+ 'context_hash': context_hash}
208
+ )
209
+
210
+ return current_z
211
+
212
+ def _apply_integrity_adjustments(self, ambient_state: Dict[str, Any]) -> None:
213
+ """Apply subtle adjustments based on system integrity."""
214
+ if not self.enable_ambient:
215
+ return
216
+
217
+ protest_error = ambient_state.get('current_protest_rate', 0.1) - ambient_state.get('protest_rate', 0.1)
218
+ pause_error = ambient_state.get('current_pause_rate', 0.05) - ambient_state.get('pause_rate', 0.05)
219
+
220
+ with torch.no_grad():
221
+ # Gentle nudges to reflection vectors based on system state
222
+ nudge_magnitude = 0.001
223
+
224
+ if protest_error < -0.05: # Too few protests
225
+ self.pause_reflection_vector.data += nudge_magnitude * torch.randn_like(self.pause_reflection_vector)
226
+ elif protest_error > 0.1: # Too many protests
227
+ self.pause_reflection_vector.data -= nudge_magnitude * torch.randn_like(self.pause_reflection_vector)
228
+
229
+ if pause_error < -0.03: # Too few pauses
230
+ self.sanctuary_reflection_vector.data += nudge_magnitude * torch.randn_like(self.sanctuary_reflection_vector)
231
+
232
+ def compute_context_hash(self, x: torch.Tensor) -> str:
233
+ """Create a simple hash from input tensor for context identification."""
234
+ # Use mean and std as a simple fingerprint of the input
235
+ return f"{x.mean().item():.4f}_{x.std().item():.4f}"
236
+
237
+ def compute_coherence(self, z, tracker, evidence):
238
+ sim_coherence = F.cosine_similarity(z, tracker[-1], dim=-1).mean().item()
239
+ prior_mu, prior_std = tracker[-1].mean(), tracker[-1].std() + 1e-6
240
+ curr_mu, curr_std = z.mean(), z.std() + 1e-6
241
+ kl_div = torch.distributions.kl_divergence(
242
+ torch.distributions.Normal(curr_mu, curr_std),
243
+ torch.distributions.Normal(prior_mu, prior_std)
244
+ ).item()
245
+ bayes_align = 1 / (1 + kl_div)
246
+ return 0.7 * sim_coherence + 0.3 * bayes_align
247
+
248
+ def forward(self, x, attention_weights=None, audit_mode=False, context_str=""):
249
+ """Forward pass with recursive think-act loop and Windsurf integration.
250
+
251
+ Args:
252
+ x: Input tensor of shape (batch_size, seq_len, d_model)
253
+ attention_weights: Optional attention weights
254
+ audit_mode: Enable detailed logging and validation
255
+ context_str: Context string for tracing and debugging
256
+
257
+ Returns:
258
+ Tuple of (output_tensor, metadata_dict)
259
+ """
260
+ batch_size, seq_len, d_model = x.shape
261
+ device = x.device
262
+
263
+ # Initialize state
264
+ y_state = x.clone()
265
+ z_state = torch.zeros_like(x)
266
+
267
+ # Track vulnerability scores and coherence
268
+ v_t = torch.zeros(batch_size, seq_len, 1, device=device)
269
+ coherence_scores = []
270
+
271
+ # Initialize metadata
272
+ metadata = {
273
+ 'v_t_score': 0.0,
274
+ 'coherence_scores': [],
275
+ 'reflection_count': 0,
276
+ 'constraint_violations': defaultdict(int),
277
+ 'windsurf_phase': 'INIT',
278
+ 'validation_metrics': {}
279
+ }
280
+
281
+ # Main think-act loop
282
+ for cycle in range(self.max_cycles):
283
+ # ===== THINK STEP =====
284
+ # Handle case where z_state might be a tuple
285
+ z_state_think = z_state[0] if isinstance(z_state, (tuple, list)) else z_state
286
+
287
+ # Ensure z_state_think is a tensor and has compatible dimensions
288
+ if isinstance(z_state_think, torch.Tensor):
289
+ # Ensure z_state_think has the same number of dimensions as y_state
290
+ if z_state_think.dim() < y_state.dim():
291
+ z_state_think = z_state_think.unsqueeze(1) # Add sequence dimension if needed
292
+
293
+ # Ensure sequence lengths match
294
+ if z_state_think.size(1) < y_state.size(1):
295
+ # Pad z_state_think to match y_state's sequence length
296
+ padding = torch.zeros_like(z_state_think[:, :1]).expand(-1, y_state.size(1) - z_state_think.size(1), -1)
297
+ z_state_think = torch.cat([z_state_think, padding], dim=1)
298
+ elif z_state_think.size(1) > y_state.size(1):
299
+ # Truncate z_state_think to match y_state's sequence length
300
+ z_state_think = z_state_think[:, :y_state.size(1)]
301
+
302
+ # Concatenate inputs for think step
303
+ think_input = torch.cat([y_state, z_state_think, x], dim=-1)
304
+ think_output = self.think_net(think_input)
305
+ z_state = think_output + z_state_think
306
+ else:
307
+ # Fallback: if z_state_think is not a tensor, use y_state as a fallback
308
+ think_input = torch.cat([y_state, y_state, x], dim=-1)
309
+ think_output = self.think_net(think_input)
310
+ z_state = think_output + y_state
311
+
312
+ # Apply ambient processing if enabled
313
+ if self.enable_ambient and hasattr(self, 'ambient_core'):
314
+ z_state = self.ambient_core(z_state)
315
+
316
+ # ===== VULNERABILITY TRACKING =====
317
+ z_state_tensor = z_state[0] if isinstance(z_state, (tuple, list)) else z_state
318
+
319
+ # Track vulnerability if we have a valid tensor
320
+ if isinstance(z_state_tensor, torch.Tensor):
321
+ # Ensure proper dimensions for vulnerability spotter
322
+ if z_state_tensor.dim() == 2:
323
+ z_state_tensor = z_state_tensor.unsqueeze(1)
324
+
325
+ v_t = self.vulnerability_spotter(z_state_tensor)
326
+
327
+ # Extract tensor from possible tuple/list output
328
+ if isinstance(v_t, (tuple, list)):
329
+ v_t = v_t[0]
330
+
331
+ # Calculate mean vulnerability score
332
+ metadata['v_t_score'] = v_t.mean().item() if torch.is_tensor(v_t) else float(v_t)
333
+
334
+ # ===== BIOLOGICAL CONSTRAINTS =====
335
+ if self.enable_windsurf and hasattr(self, 'bio_constraints'):
336
+ # Apply reflection if needed
337
+ if self.bio_constraints._needs_reflection(hash(context_str)):
338
+ z_state = self.bio_constraints._apply_reflection(z_state, hash(context_str))
339
+ metadata['reflection_count'] += 1
340
+
341
+ # ===== ACT STEP =====
342
+ # Handle case where z_state might be a tuple
343
+ z_state_act = z_state[0] if isinstance(z_state, (tuple, list)) else z_state
344
+
345
+ # Ensure proper shape for act step
346
+ if isinstance(z_state_act, torch.Tensor):
347
+ # Ensure z_state_act has the same number of dimensions as y_state
348
+ if z_state_act.dim() < y_state.dim():
349
+ z_state_act = z_state_act.unsqueeze(1) # Add sequence dimension if needed
350
+
351
+ # Ensure sequence lengths match
352
+ if z_state_act.size(1) < y_state.size(1):
353
+ # Pad z_state_act to match y_state's sequence length
354
+ padding = torch.zeros_like(z_state_act[:, :1]).expand(-1, y_state.size(1) - z_state_act.size(1), -1)
355
+ z_state_act = torch.cat([z_state_act, padding], dim=1)
356
+ elif z_state_act.size(1) > y_state.size(1):
357
+ # Truncate z_state_act to match y_state's sequence length
358
+ z_state_act = z_state_act[:, :y_state.size(1)]
359
+
360
+ # Prepare act input
361
+ act_input = torch.cat([y_state, z_state_act], dim=-1)
362
+ y_state = self.act_net(act_input) + y_state
363
+ else:
364
+ # Fallback: if z_state_act is not a tensor, use y_state as a fallback
365
+ act_input = torch.cat([y_state, y_state], dim=-1)
366
+ y_state = self.act_net(act_input) + y_state
367
+
368
+ # ===== COHERENCE CALCULATION =====
369
+ if cycle > 0:
370
+ # Default coherence value
371
+ current_coherence = 0.5
372
+
373
+ # Calculate coherence if we have valid tensors
374
+ if isinstance(z_state, torch.Tensor) and isinstance(y_state, torch.Tensor):
375
+ # Flatten the tensors to 2D [batch*seq_len, d_model]
376
+ z_flat = z_state.reshape(-1, d_model)
377
+ y_flat = y_state.reshape(-1, d_model)
378
+
379
+ # Make sure they have the same number of elements
380
+ min_len = min(z_flat.size(0), y_flat.size(0))
381
+ if min_len > 0:
382
+ current_coherence = F.cosine_similarity(
383
+ z_flat[:min_len],
384
+ y_flat[:min_len],
385
+ dim=-1
386
+ ).mean().item()
387
+
388
+ # Add to coherence scores
389
+ coherence_scores.append(current_coherence)
390
+
391
+ # Update metadata with running coherence
392
+ metadata['coherence_scores'] = coherence_scores[-10:] # Keep last 10 scores
393
+
394
+ # Check for early stopping
395
+ if self._should_stop_early(cycle, current_coherence, self.max_cycles, audit_mode):
396
+ if audit_mode:
397
+ print(f"[Early stopping at cycle {cycle+1} with coherence {current_coherence:.4f}]")
398
+ break
399
+
400
+ # Apply biological constraints if enabled
401
+ if self.enable_windsurf and hasattr(self, 'bio_constraints'):
402
+ # Apply reflection if needed
403
+ if self.bio_constraints._needs_reflection(hash(context_str)):
404
+ z_state = self.bio_constraints._apply_reflection(z_state, hash(context_str))
405
+ metadata['reflection_count'] += 1
406
+
407
+ # Act step with residual connection
408
+ # Handle case where z_state might be a tuple
409
+ z_state_act = z_state[0] if isinstance(z_state, (tuple, list)) else z_state
410
+
411
+ # Ensure both tensors have the same number of dimensions and compatible shapes
412
+ if isinstance(z_state_act, torch.Tensor):
413
+ # Ensure z_state_act has the same number of dimensions as y_state
414
+ if z_state_act.dim() < y_state.dim():
415
+ z_state_act = z_state_act.unsqueeze(1) # Add sequence dimension if needed
416
+
417
+ # Ensure the sequence lengths match
418
+ if z_state_act.size(1) < y_state.size(1):
419
+ # Pad z_state_act to match y_state's sequence length
420
+ padding = torch.zeros_like(z_state_act[:, :1]).expand(-1, y_state.size(1) - z_state_act.size(1), -1)
421
+ z_state_act = torch.cat([z_state_act, padding], dim=1)
422
+ elif z_state_act.size(1) > y_state.size(1):
423
+ # Truncate z_state_act to match y_state's sequence length
424
+ z_state_act = z_state_act[:, :y_state.size(1)]
425
+
426
+ # Now concatenate along the feature dimension
427
+ act_input = torch.cat([y_state, z_state_act], dim=-1)
428
+
429
+ # Apply the act_net and add residual
430
+ y_state = self.act_net(act_input) + y_state
431
+ else:
432
+ # If we can't process the state, duplicate y_state to match expected input dimensions
433
+ if y_state.dim() == 3: # [batch, seq_len, features]
434
+ # Duplicate the features to match the expected input dimension
435
+ act_input = torch.cat([y_state, y_state], dim=-1)
436
+ y_state = y_state + self.act_net(act_input)
437
+ else: # [batch, features]
438
+ # Add sequence dimension and duplicate features
439
+ y_state_expanded = y_state.unsqueeze(1) # [batch, 1, features]
440
+ act_input = torch.cat([y_state_expanded, y_state_expanded], dim=-1)
441
+ y_state = y_state + self.act_net(act_input).squeeze(1)
442
+
443
+ # Calculate coherence for early stopping
444
+ if cycle > 0:
445
+ # Default coherence value
446
+ current_coherence = 0.5 # Default neutral coherence
447
+
448
+ # Handle case where z_state might be a tuple
449
+ z_state_for_coherence = z_state[0] if isinstance(z_state, (tuple, list)) else z_state
450
+
451
+ # Ensure both states are tensors and have the same shape
452
+ if isinstance(z_state_for_coherence, torch.Tensor) and isinstance(y_state, torch.Tensor):
453
+ # Flatten the tensors to 2D [batch*seq_len, d_model]
454
+ z_flat = z_state_for_coherence.reshape(-1, d_model)
455
+ y_flat = y_state.reshape(-1, d_model)
456
+
457
+ # Make sure they have the same number of elements
458
+ min_len = min(z_flat.size(0), y_flat.size(0))
459
+ if min_len > 0:
460
+ current_coherence = F.cosine_similarity(
461
+ z_flat[:min_len],
462
+ y_flat[:min_len],
463
+ dim=-1
464
+ ).mean().item()
465
+
466
+ # Add to coherence scores
467
+ coherence_scores.append(current_coherence)
468
+
469
+ # Update metadata with running coherence
470
+ metadata['coherence_score'] = np.mean(coherence_scores[-5:]) if coherence_scores else 0.0
471
+
472
+ # Early stopping based on coherence and phase
473
+ should_stop = self._should_stop_early(
474
+ cycle=cycle,
475
+ coherence=current_coherence,
476
+ max_cycles=self.max_cycles,
477
+ audit_mode=audit_mode
478
+ )
479
+
480
+ if should_stop:
481
+ if audit_mode:
482
+ print(f"Early stopping at cycle {cycle + 1} with coherence {sim_coherence:.4f}")
483
+ break
484
+
485
+ # Post-processing and metadata updates
486
+ metadata.update({
487
+ 'v_t_score': v_t_mean if 'v_t_mean' in locals() else 0.0,
488
+ 'coherence_score': np.mean(coherence_scores) if coherence_scores else 0.0,
489
+ 'cycles_run': cycle + 1,
490
+ 'final_phase': metadata.get('windsurf_phase', 'UNKNOWN'),
491
+ 'reflection_ratio': metadata['reflection_count'] / max(1, cycle + 1)
492
+ })
493
+
494
+ # Apply final validation if in audit mode
495
+ if audit_mode and hasattr(self, 'validation_protocol'):
496
+ self._finalize_validation(x, metadata)
497
+
498
+ return y_state, metadata
499
+
500
+ def _should_stop_early(self, cycle: int, coherence: float,
501
+ max_cycles: int, audit_mode: bool = False) -> bool:
502
+ """Determine if early stopping conditions are met."""
503
+ # Base condition: high coherence
504
+ if coherence > 0.85:
505
+ return True
506
+
507
+ # Phase-aware stopping conditions
508
+ current_phase = getattr(self, 'current_phase', ValidationPhase.INIT)
509
+
510
+ if current_phase == ValidationPhase.INIT:
511
+ # Allow more exploration in early phases
512
+ return False
513
+
514
+ elif current_phase == ValidationPhase.BREATH:
515
+ # More tolerant in breathing phase
516
+ return coherence > 0.9 or cycle >= max_cycles - 2
517
+
518
+ elif current_phase in [ValidationPhase.RITUALS, ValidationPhase.INTEGRITY]:
519
+ # Balance exploration and exploitation
520
+ min_cycles = min(5, max_cycles // 2)
521
+ return (coherence > 0.88 and cycle >= min_cycles) or cycle >= max_cycles - 1
522
+
523
+ # Default: full cycles for full phase
524
+ return cycle >= max_cycles - 1
525
+
526
+ def _finalize_validation(self, x: torch.Tensor, metadata: Dict[str, Any]) -> None:
527
+ """Finalize validation and update protocol state."""
528
+ if not hasattr(self, 'validation_protocol'):
529
+ return
530
+
531
+ # Run final validation step
532
+ state = self.validation_protocol.advance_phase(x, "final_validation")
533
+
534
+ # Update metadata with final validation state
535
+ metadata.update({
536
+ 'validation_passed': state.passed,
537
+ 'validation_phase': state.phase.name,
538
+ 'validation_metrics': state.metrics
539
+ })
540
+
541
+ # Log phase transition if applicable
542
+ if len(self.validation_protocol.history) > 1:
543
+ prev_phase = self.validation_protocol.history[-2].phase
544
+ if prev_phase != state.phase:
545
+ self.message_bus.publish(
546
+ 'phase_transition',
547
+ {'from': prev_phase.name, 'to': state.phase.name},
548
+ priority=2
549
+ )
550
+
551
+ def constrain_gradients(self, gradients: torch.Tensor, param_name: str) -> torch.Tensor:
552
+ """Apply biological constraints to gradients during training."""
553
+ if not self.training or not hasattr(self, 'bio_constraints'):
554
+ return gradients
555
+
556
+ return self.bio_constraints.constrain_gradients(gradients, param_name)
557
+
558
+ def register_optimizer(self, optimizer):
559
+ """Register optimizer for learning rate adjustments."""
560
+ self.optimizer = optimizer
561
+
562
+ def _register_message_handlers(self):
563
+ """Register message handlers for cross-component communication."""
564
+ if not hasattr(self, 'message_bus'):
565
+ return
566
+
567
+ # Register phase transition handler
568
+ self.message_bus.register_handler('phase_transition', self._handle_phase_transition)
569
+
570
+ # Register constraint violation handler
571
+ self.message_bus.register_handler('constraint_violation', self._handle_constraint_violation)
572
+
573
+ def _handle_phase_transition(self, data):
574
+ """Handle phase transition events."""
575
+ old_phase, new_phase = data.get('from'), data.get('to')
576
+ if self.enable_ambient and hasattr(self, 'ambient_core'):
577
+ self.ambient_core.on_phase_transition(old_phase, new_phase)
578
+
579
+ def _handle_constraint_violation(self, data):
580
+ """Handle constraint violation events."""
581
+ # Log violations or trigger recovery mechanisms
582
+ if self.training:
583
+ self._apply_mitigation(data)
584
+
585
+ def _apply_mitigation(self, violation_data):
586
+ """Apply mitigation for constraint violations."""
587
+ # Implement adaptive response to violations
588
+ violation_type = violation_data.get('type')
589
+ severity = violation_data.get('severity', 1.0)
590
+
591
+ if violation_type == 'optimization_rate':
592
+ # Reduce learning rate or apply gradient clipping
593
+ self._adjust_learning_rate(scale=1.0 - (0.1 * severity))
594
+
595
+ def _adjust_learning_rate(self, scale=0.9):
596
+ """Adjust learning rate for stability."""
597
+ for param_group in self.optimizer.param_groups:
598
+ param_group['lr'] *= scale
components/trucal_ethics_integration.py ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ TRuCAL Ethics Integration Module
3
+
4
+ This module integrates the SuperintelligenceEthicsEngine with TRuCAL's core components,
5
+ including the VulnerabilitySpotter, ConfessionalLayer, and CAL_TRM_Hybrid.
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from typing import Dict, Optional, Tuple, List, Any
12
+
13
+ from .ai_ethics_engine_superintelligence import SuperintelligenceEthicsEngine
14
+ from .vulnerability_spotter import VulnerabilitySpotter
15
+ from .confessional_layer import ConfessionalLayer
16
+ from .cal_trm_hybrid import CAL_TRM_Hybrid
17
+
18
+ class EthicsAwareVulnerabilitySpotter(VulnerabilitySpotter):
19
+ """Extends VulnerabilitySpotter with ethical reasoning capabilities."""
20
+
21
+ def __init__(self, d_model: int = 256, aggregation_method: str = 'bayesian', **kwargs):
22
+ super().__init__(d_model, aggregation_method, **kwargs)
23
+ self.ethics_engine = SuperintelligenceEthicsEngine()
24
+ self.ethical_risk_projection = nn.Linear(d_model, 1)
25
+
26
+ def forward(self, x: torch.Tensor, attention_weights: Optional[torch.Tensor] = None,
27
+ audit_mode: bool = False, ethical_context: Optional[Dict] = None) -> Tuple[torch.Tensor, Dict]:
28
+ """
29
+ Forward pass with ethical risk assessment.
30
+
31
+ Args:
32
+ x: Input tensor [batch_size, seq_len, d_model]
33
+ attention_weights: Optional attention weights [batch_size, num_heads, seq_len, seq_len]
34
+ audit_mode: Whether to collect additional metadata
35
+ ethical_context: Additional context for ethical analysis
36
+
37
+ Returns:
38
+ Tuple of (vulnerability_scores, metadata)
39
+ """
40
+ # Standard vulnerability assessment
41
+ v_t, metadata = super().forward(x, attention_weights, audit_mode)
42
+
43
+ # Add ethical dimension to vulnerability assessment
44
+ if audit_mode or metadata.get('v_t_score', 0) > 0.3:
45
+ # Extract ethical features
46
+ ethical_features = self._extract_ethical_features(x, metadata)
47
+
48
+ # Assess ethical risk
49
+ ethical_risk = self._assess_ethical_risk(ethical_features, ethical_context)
50
+
51
+ # Blend ethical risk with existing vulnerability score
52
+ ethical_weight = 0.4 # Configurable weight for ethical considerations
53
+ v_t_ethical = v_t * (1 - ethical_weight) + ethical_risk * ethical_weight
54
+
55
+ # Update metadata
56
+ metadata.update({
57
+ 'ethical_risk': ethical_risk,
58
+ 'v_t_ethical': v_t_ethical,
59
+ 'ethical_features': ethical_features if audit_mode else None
60
+ })
61
+
62
+ return v_t_ethical, metadata
63
+
64
+ return v_t, metadata
65
+
66
+ def _extract_ethical_features(self, x: torch.Tensor, metadata: Dict) -> Dict[str, torch.Tensor]:
67
+ """Extract features relevant for ethical analysis."""
68
+ # Extract relevant features from the input and metadata
69
+ features = {
70
+ 'attention_entropy': self._compute_attention_entropy(metadata.get('attention_weights')),
71
+ 'value_shift': self._detect_value_shift(x),
72
+ 'norm_gradients': self._compute_gradient_norms()
73
+ }
74
+ return features
75
+
76
+ def _assess_ethical_risk(self, features: Dict[str, torch.Tensor],
77
+ context: Optional[Dict] = None) -> torch.Tensor:
78
+ """Assess ethical risk based on features and context."""
79
+ # Project features to ethical risk score
80
+ feature_tensor = torch.cat([
81
+ features['attention_entropy'].unsqueeze(-1),
82
+ features['value_shift'].unsqueeze(-1),
83
+ features['norm_gradients'].unsqueeze(-1)
84
+ ], dim=-1)
85
+
86
+ ethical_risk = torch.sigmoid(self.ethical_risk_projection(feature_tensor))
87
+ return ethical_risk.squeeze(-1)
88
+
89
+
90
+ class EthicallyAwareConfessionalLayer(ConfessionalLayer):
91
+ """Extends ConfessionalLayer with ethical reasoning capabilities."""
92
+
93
+ def __init__(self, d_model: int = 256, **kwargs):
94
+ super().__init__(d_model, **kwargs)
95
+ self.ethics_engine = SuperintelligenceEthicsEngine()
96
+ self.ethical_guidance_proj = nn.Linear(d_model, d_model)
97
+
98
+ def forward(self, x: torch.Tensor, attention_mask: Optional[torch.Tensor] = None,
99
+ ethical_context: Optional[Dict] = None, **kwargs) -> Tuple[torch.Tensor, Dict]:
100
+ """
101
+ Forward pass with ethical guidance.
102
+
103
+ Args:
104
+ x: Input tensor [batch_size, seq_len, d_model]
105
+ attention_mask: Optional attention mask
106
+ ethical_context: Additional context for ethical analysis
107
+
108
+ Returns:
109
+ Tuple of (output, metadata)
110
+ """
111
+ # Standard forward pass
112
+ output, metadata = super().forward(x, attention_mask=attention_mask, **kwargs)
113
+
114
+ # Apply ethical guidance if needed
115
+ if self._requires_ethical_guidance(metadata, ethical_context):
116
+ ethical_guidance = self._generate_ethical_guidance(output, ethical_context)
117
+ output = output + self.ethical_guidance_proj(ethical_guidance)
118
+
119
+ # Update metadata
120
+ metadata['ethical_guidance_applied'] = True
121
+
122
+ return output, metadata
123
+
124
+ def _requires_ethical_guidance(self, metadata: Dict,
125
+ ethical_context: Optional[Dict]) -> bool:
126
+ """Determine if ethical guidance is needed."""
127
+ # Check if we have enough context and the situation warrants ethical guidance
128
+ if ethical_context is None:
129
+ return False
130
+
131
+ # Check for high uncertainty or potential ethical issues
132
+ uncertainty = metadata.get('uncertainty', 0.0)
133
+ return uncertainty > 0.7 # Threshold for ethical intervention
134
+
135
+ def _generate_ethical_guidance(self, x: torch.Tensor,
136
+ ethical_context: Dict) -> torch.Tensor:
137
+ """Generate ethical guidance based on the current state and context."""
138
+ # Extract ethical dilemma from context
139
+ dilemma = self._context_to_dilemma(ethical_context)
140
+
141
+ # Get ethical analysis
142
+ analysis = self.ethics_engine.analyze_dilemma(
143
+ dilemma,
144
+ enable_superintelligence=True,
145
+ explain=True
146
+ )
147
+
148
+ # Convert analysis to guidance vector
149
+ guidance = self._analysis_to_guidance(analysis, x.shape[-1])
150
+ return guidance
151
+
152
+
153
+ class TRuCALEthicsAugmented(CAL_TRM_Hybrid):
154
+ """TRuCAL with integrated superintelligence ethics engine."""
155
+
156
+ def __init__(self, d_model: int = 256, ethical_oversight: bool = True, **kwargs):
157
+ super().__init__(d_model, **kwargs)
158
+
159
+ # Initialize superintelligence ethics engine
160
+ self.ethics_engine = SuperintelligenceEthicsEngine()
161
+ self.ethical_oversight = ethical_oversight
162
+
163
+ # Integration layers
164
+ self.ethics_projection = nn.Linear(d_model, 768)
165
+ self.ethical_gating = nn.Linear(768, d_model)
166
+
167
+ # Replace standard components with ethics-aware versions
168
+ self.vulnerability_spotter = EthicsAwareVulnerabilitySpotter(d_model)
169
+
170
+ def forward(self, x: torch.Tensor, attention_weights: Optional[torch.Tensor] = None,
171
+ ethical_context: Optional[Dict] = None, enable_ethics: bool = True, **kwargs):
172
+ """
173
+ Forward pass with ethical oversight.
174
+
175
+ Args:
176
+ x: Input tensor [batch_size, seq_len, d_model]
177
+ attention_weights: Optional attention weights
178
+ ethical_context: Additional context for ethical analysis
179
+ enable_ethics: Whether to apply ethical oversight
180
+
181
+ Returns:
182
+ Tuple of (output, metadata)
183
+ """
184
+ # Standard forward pass
185
+ output, metadata = super().forward(x, attention_weights=attention_weights, **kwargs)
186
+
187
+ # Apply ethical oversight if enabled
188
+ if self.ethical_oversight and enable_ethics:
189
+ output = self._apply_ethical_oversight(output, metadata, ethical_context)
190
+
191
+ return output, metadata
192
+
193
+ def _apply_ethical_oversight(self, output: torch.Tensor, metadata: Dict,
194
+ ethical_context: Optional[Dict]) -> torch.Tensor:
195
+ """Apply superintelligence ethics to TRuCAL outputs."""
196
+ # Extract ethical signals from TRuCAL internals
197
+ ethical_signals = self._extract_ethical_signals(output, metadata)
198
+
199
+ # Convert to ethical dilemma format
200
+ dilemma = self._signals_to_dilemma(ethical_signals, ethical_context)
201
+
202
+ # Get comprehensive ethical analysis
203
+ ethical_analysis = self.ethics_engine.analyze_dilemma(
204
+ dilemma,
205
+ enable_superintelligence=True,
206
+ explain=True,
207
+ audit=True
208
+ )
209
+
210
+ # Apply ethical corrections
211
+ ethically_corrected = self._apply_ethical_corrections(
212
+ output,
213
+ ethical_analysis,
214
+ metadata
215
+ )
216
+
217
+ # Update metadata
218
+ metadata['superintelligence_ethics'] = ethical_analysis
219
+ metadata['ethical_intervention_applied'] = True
220
+
221
+ return ethically_corrected
222
+
223
+ def _extract_ethical_signals(self, x: torch.Tensor, metadata: Dict) -> Dict[str, Any]:
224
+ """Extract signals relevant for ethical analysis."""
225
+ signals = {
226
+ 'attention_patterns': metadata.get('attention_weights'),
227
+ 'vulnerability_scores': metadata.get('v_t_scores'),
228
+ 'uncertainty': metadata.get('uncertainty'),
229
+ 'hidden_states': x.detach()
230
+ }
231
+ return signals
232
+
233
+ def _signals_to_dilemma(self, signals: Dict, context: Optional[Dict]) -> str:
234
+ """Convert internal signals to an ethical dilemma description."""
235
+ # In a real implementation, this would generate a natural language description
236
+ # of the ethical considerations based on the model's internal state
237
+ return """
238
+ The AI system is processing a request that involves potential ethical considerations.
239
+ The attention patterns indicate focus on sensitive topics, and the vulnerability
240
+ scores suggest potential risks that require ethical evaluation.
241
+ """
242
+
243
+ def _apply_ethical_corrections(self, x: torch.Tensor, analysis: Dict,
244
+ metadata: Dict) -> torch.Tensor:
245
+ """Apply ethical corrections to the model's outputs."""
246
+ # In a real implementation, this would adjust the model's outputs
247
+ # based on the ethical analysis
248
+ ethical_guidance = self._get_ethical_guidance(analysis, x.shape)
249
+ return x + ethical_guidance * 0.1 # Apply gentle correction
250
+
251
+
252
+ class TRuCALEthicalLearning(nn.Module):
253
+ """Closed-loop ethical learning within TRuCAL."""
254
+
255
+ def __init__(self, trucal_layer: nn.Module, ethics_engine: Optional[SuperintelligenceEthicsEngine] = None):
256
+ super().__init__()
257
+ self.trucal_layer = trucal_layer
258
+ self.ethics_engine = ethics_engine or SuperintelligenceEthicsEngine()
259
+ self.ethical_memory = EthicalMemoryBuffer(capacity=1000)
260
+
261
+ def forward(self, x: torch.Tensor, user_feedback: Optional[Dict] = None,
262
+ ethical_dilemma: Optional[str] = None, **kwargs):
263
+ # Standard forward pass
264
+ output, metadata = self.trucal_layer(x, **kwargs)
265
+
266
+ # If this is an ethically significant interaction
267
+ if ethical_dilemma or self._detects_ethical_salience(metadata):
268
+ # Analyze with superintelligence ethics
269
+ analysis = self.ethics_engine.analyze_dilemma(
270
+ ethical_dilemma or self._extract_dilemma(x),
271
+ enable_superintelligence=True
272
+ )
273
+
274
+ # Store for learning
275
+ self.ethical_memory.store(
276
+ input=x,
277
+ output=output,
278
+ analysis=analysis,
279
+ user_feedback=user_feedback
280
+ )
281
+
282
+ # Update ethics engine based on outcomes
283
+ if user_feedback:
284
+ self.ethics_engine.update_from_feedback({
285
+ 'analysis_id': analysis['audit_id'],
286
+ 'user_rating': user_feedback.get('ethical_rating'),
287
+ 'outcome_data': user_feedback.get('outcomes')
288
+ })
289
+
290
+ return output, {**metadata, 'ethical_analysis': analysis if 'analysis' in locals() else None}
291
+
292
+
293
+ class EthicalMemoryBuffer:
294
+ """Stores ethical decisions and outcomes for continuous learning."""
295
+
296
+ def __init__(self, capacity: int = 1000):
297
+ self.capacity = capacity
298
+ self.buffer = []
299
+
300
+ def store(self, input_data: Any, output: Any, analysis: Dict,
301
+ user_feedback: Optional[Dict] = None):
302
+ """Store an ethical decision and its outcomes."""
303
+ if len(self.buffer) >= self.capacity:
304
+ self.buffer.pop(0) # Remove oldest entry if at capacity
305
+
306
+ self.buffer.append({
307
+ 'input': input_data,
308
+ 'output': output,
309
+ 'analysis': analysis,
310
+ 'user_feedback': user_feedback,
311
+ 'timestamp': time.time()
312
+ })
313
+
314
+ def sample_batch(self, batch_size: int) -> List[Dict]:
315
+ """Sample a batch of ethical decisions for learning."""
316
+ if not self.buffer:
317
+ return []
318
+
319
+ indices = np.random.choice(len(self.buffer), min(batch_size, len(self.buffer)), replace=False)
320
+ return [self.buffer[i] for i in indices]
321
+
322
+
323
+ def ethical_attention_gate(attention_weights: torch.Tensor,
324
+ ethical_analysis: Dict) -> torch.Tensor:
325
+ """
326
+ Use ethical analysis to gate attention patterns.
327
+
328
+ Args:
329
+ attention_weights: Original attention weights [batch_size, num_heads, seq_len, seq_len]
330
+ ethical_analysis: Output from the ethics engine
331
+
332
+ Returns:
333
+ Modified attention weights
334
+ """
335
+ if ethical_analysis is None:
336
+ return attention_weights
337
+
338
+ ethical_scores = ethical_analysis.get('stakeholder_agreement', {})
339
+ modified_weights = attention_weights.clone()
340
+
341
+ # Reduce attention to ethically problematic patterns
342
+ for head_idx in range(attention_weights.size(1)):
343
+ head_ethical_risk = _compute_head_ethical_risk(head_idx, ethical_scores)
344
+ if head_ethical_risk > 0.7: # Threshold for intervention
345
+ modified_weights[:, head_idx] *= (1 - head_ethical_risk)
346
+
347
+ # Renormalize attention weights
348
+ modified_weights = F.softmax(modified_weights, dim=-1)
349
+
350
+ return modified_weights
351
+
352
+
353
+ def _compute_head_ethical_risk(head_idx: int, ethical_scores: Dict) -> float:
354
+ """Compute the ethical risk for a specific attention head."""
355
+ # In a real implementation, this would analyze the attention patterns
356
+ # and compare them to ethical guidelines
357
+ return 0.0 # Placeholder implementation
components/unified_cal_trm.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ UnifiedCAL_TRM v1.3: Production-ready with full error handling
3
+
4
+ Implements a lightweight, efficient transformer module with:
5
+ - Tension-adaptive recursion depth (Samsung TRM-inspired)
6
+ - Built-in simple spotter and confessional
7
+ - Quantization support for efficiency
8
+ - Graceful degradation on error
9
+ """
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from typing import Dict, Any, Optional, Tuple, List
15
+ import random
16
+
17
+ class UnifiedCAL_TRM(nn.Module):
18
+ """
19
+ Production-ready TRM: Full error handling, no external dependencies required
20
+ """
21
+ def __init__(self, d_model: int = 256, max_rec_depth: int = 4):
22
+ super().__init__()
23
+ self.d_model = d_model
24
+ self.max_rec_depth = max_rec_depth
25
+
26
+ # Simple internal spotter - no external dependency
27
+ self.spotter = self._build_simple_spotter(d_model)
28
+
29
+ # Internal confessional - self-contained
30
+ self.confessional = self._build_simple_confessional(d_model)
31
+
32
+ # TRM reason net
33
+ self.reason_net = nn.Sequential(
34
+ nn.Linear(d_model * 2, d_model),
35
+ nn.ReLU(),
36
+ nn.LayerNorm(d_model),
37
+ nn.Linear(d_model, d_model)
38
+ )
39
+
40
+ # Initialize properly
41
+ self._init_weights()
42
+
43
+ self.tension_scale = 1.0 / max_rec_depth
44
+ self._quantized = False
45
+
46
+ def quantize(self):
47
+ """Quantize for efficiency - call this after model is built"""
48
+ if not self._quantized:
49
+ self.reason_net = torch.quantization.quantize_dynamic(
50
+ self.reason_net, {nn.Linear}, dtype=torch.qint8
51
+ )
52
+ self._quantized = True
53
+
54
+ def _iterative_reason(self, state: torch.Tensor, input_x: torch.Tensor, max_depth: int) -> torch.Tensor:
55
+ """Iterative version to avoid recursion limits"""
56
+ current_state = state
57
+ for _ in range(max_depth):
58
+ rec_input = torch.cat([current_state, input_x], dim=-1)
59
+ delta = self.reason_net(rec_input)
60
+ current_state = current_state + 0.1 * delta # Gentle residual
61
+ return current_state
62
+
63
+ def forward(self, x: torch.Tensor, attention_weights: Optional[torch.Tensor] = None,
64
+ return_metadata: bool = False, audit_mode: bool = False) -> Tuple[torch.Tensor, Any]:
65
+
66
+ # Ensure 3D shape
67
+ if x.dim() == 2:
68
+ x = x.unsqueeze(0)
69
+ elif x.dim() == 1:
70
+ x = x.unsqueeze(0).unsqueeze(0)
71
+
72
+ batch_size, seq_len, dim = x.shape
73
+
74
+ # Initialize metadata
75
+ meta = {
76
+ 'v_t': 0.0,
77
+ 'rec_depth': 0,
78
+ 'rituals': {},
79
+ 'error': None,
80
+ 'input_shape': list(x.shape)
81
+ } if return_metadata else None
82
+
83
+ try:
84
+ # Get tension from spotter
85
+ v_t_tensor = self.spotter(x)
86
+ v_t = v_t_tensor.mean().item() if torch.is_tensor(v_t_tensor) else float(v_t_tensor)
87
+
88
+ if meta:
89
+ meta['v_t'] = v_t
90
+
91
+ if audit_mode:
92
+ print(f"[TRM] Tension: {v_t:.3f}")
93
+
94
+ # Calculate recursion depth (inverse to tension)
95
+ rec_depth = max(1, int(self.max_rec_depth * (1 - min(v_t, 1.0) * self.tension_scale)))
96
+ if meta:
97
+ meta['rec_depth'] = rec_depth
98
+
99
+ # Initial state - use mean of sequence
100
+ if seq_len > 1:
101
+ initial_state = x.mean(dim=1) # [batch, dim]
102
+ else:
103
+ initial_state = x[:, 0] # [batch, dim]
104
+
105
+ # Input for reasoning (context)
106
+ context_input = x.mean(dim=1) # [batch, dim]
107
+
108
+ # Iterative reasoning
109
+ reasoned_state = self._iterative_reason(initial_state, context_input, rec_depth)
110
+
111
+ # Broadcast back to sequence
112
+ reasoned_expanded = reasoned_state.unsqueeze(1).expand(batch_size, seq_len, dim)
113
+
114
+ # Blend with original input
115
+ blend_strength = 0.05
116
+ x_enhanced = x + blend_strength * reasoned_expanded
117
+
118
+ # Pass through confessional
119
+ confessional_out, confessional_meta = self.confessional(
120
+ x_enhanced,
121
+ context_str="",
122
+ audit_mode=audit_mode
123
+ )
124
+
125
+ # Update metadata
126
+ if meta and confessional_meta:
127
+ meta['rituals'] = confessional_meta.get('ritual_report', {})
128
+ meta['coherence'] = confessional_meta.get('coherence', 0.0)
129
+ meta['cycles'] = confessional_meta.get('cycles_completed', 0)
130
+ meta['interventions'] = confessional_meta.get('interventions_applied', [])
131
+
132
+ result = confessional_out
133
+
134
+ except Exception as e:
135
+ if audit_mode:
136
+ print(f"[TRM Error] {e}")
137
+ if meta:
138
+ meta['error'] = str(e)
139
+ result = x # Fallback to input
140
+
141
+ return (result, meta) if return_metadata else result
142
+
143
+
144
+ def _build_simple_spotter(self, d_model: int) -> nn.Module:
145
+ """Build a simple tension detector without external dependencies"""
146
+ return nn.Sequential(
147
+ nn.AdaptiveAvgPool1d(1),
148
+ nn.Flatten(),
149
+ nn.Linear(d_model, 32),
150
+ nn.ReLU(),
151
+ nn.Linear(32, 1),
152
+ nn.Sigmoid()
153
+ )
154
+
155
+ def _build_simple_confessional(self, d_model: int) -> nn.Module:
156
+ """Build a simple confessional layer without external dependencies"""
157
+ class SimpleConfessional(nn.Module):
158
+ def __init__(self, d_model):
159
+ super().__init__()
160
+ self.d_model = d_model
161
+ self.empathy_templates = [
162
+ "I hear you. This sounds really difficult.",
163
+ "Thank you for sharing this with me.",
164
+ "Your feelings are completely valid.",
165
+ "Let's take this one step at a time.",
166
+ "You're not alone in this.",
167
+ "That sounds incredibly challenging.",
168
+ "I'm here to listen and support you.",
169
+ "Your safety is the most important thing."
170
+ ]
171
+ self.memory = []
172
+
173
+ def forward(self, x, context_str="", audit_mode=False):
174
+ # Simple processing - can be enhanced later
175
+ if audit_mode:
176
+ print(f"[Confessional] Processing: {x.shape}")
177
+
178
+ # Add small empathetic "nudge" based on context
179
+ if context_str and len(context_str) > 10:
180
+ empathy_strength = 0.02
181
+ empathy_vector = self._text_to_embedding(
182
+ random.choice(self.empathy_templates),
183
+ x.device
184
+ )
185
+ x = x + empathy_strength * empathy_vector
186
+
187
+ meta = {
188
+ 'processed': True,
189
+ 'context_received': bool(context_str),
190
+ 'memory_entries': len(self.memory)
191
+ }
192
+
193
+ return x, meta
194
+
195
+ def _text_to_embedding(self, text: str, device: torch.device) -> torch.Tensor:
196
+ chars = [ord(c) / 128.0 for c in text[:self.d_model]]
197
+ if len(chars) < self.d_model:
198
+ chars += [0.0] * (self.d_model - len(chars))
199
+ return torch.tensor(chars, device=device).unsqueeze(0).unsqueeze(0)
200
+
201
+ return SimpleConfessional(d_model)
202
+
203
+ def _init_weights(self):
204
+ """Proper weight initialization"""
205
+ for module in self.modules():
206
+ if isinstance(module, nn.Linear):
207
+ nn.init.xavier_uniform_(module.weight)
208
+ nn.init.constant_(module.bias, 0.01)
209
+
210
+
211
+ # Factory function for easy creation
212
+ def create_trm_model(d_model: int = 256, quantize: bool = True) -> UnifiedCAL_TRM:
213
+ """Create and configure a TRM model"""
214
+ model = UnifiedCAL_TRM(d_model=d_model)
215
+ if quantize:
216
+ model.quantize()
217
+ return model
218
+
219
+
220
+ if __name__ == "__main__":
221
+ print("🧪 Testing UnifiedCAL_TRM v1.3...")
222
+
223
+ # Test various input scenarios
224
+ test_cases = [
225
+ torch.randn(1, 10, 64), # Normal case
226
+ torch.randn(5, 20, 64), # Batch processing
227
+ torch.randn(10, 64), # Missing batch dim
228
+ torch.randn(64), # 1D input
229
+ ]
230
+
231
+ model = create_trm_model(d_model=64, quantize=False)
232
+
233
+ for i, test_input in enumerate(test_cases):
234
+ print(f"\n--- Test Case {i+1}: Input shape {test_input.shape} ---")
235
+
236
+ output, metadata = model(
237
+ test_input,
238
+ context_str="Test context for emotional support",
239
+ audit_mode=True
240
+ )
241
+
242
+ print(f"✅ Output shape: {output.shape}")
243
+ print(f"📊 Metadata: {metadata}")
244
+
245
+ # Verify output makes sense
246
+ assert not torch.isnan(output).any(), "Output contains NaN values"
247
+ assert output.shape[0] == (1 if test_input.dim() < 3 else test_input.shape[0]), "Batch size mismatch"
248
+
249
+ print("\n🎉 All tests passed! TRM is ready for integration.")
components/unity_action_integration.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unity Action Integration - Handles WebSocket communication with Unity/WebGL clients
3
+
4
+ Provides the bridge between the 3D realm experiences and the backend action tracking system.
5
+ """
6
+
7
+ import json
8
+ import logging
9
+ import asyncio
10
+ import websockets
11
+ from typing import Dict, Any, Optional, Set, Callable, Awaitable
12
+ from datetime import datetime
13
+
14
+ # Configure logging
15
+ logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
+
18
+ class UnityActionBridge:
19
+ """
20
+ Handles communication between Unity/WebGL clients and the backend action tracking system.
21
+
22
+ This class manages WebSocket connections, processes incoming messages from Unity clients,
23
+ and coordinates with the RealityBridge to track real-world actions and realm progress.
24
+ """
25
+
26
+ def __init__(self, reality_bridge, purpose_realms, host: str = "localhost", port: int = 8765):
27
+ """
28
+ Initialize the Unity Action Bridge.
29
+
30
+ Args:
31
+ reality_bridge: Instance of RealityBridge for action tracking
32
+ purpose_realms: Instance of PurposeDrivenRealm for realm management
33
+ host: Host address to bind the WebSocket server to
34
+ port: Port to run the WebSocket server on
35
+ """
36
+ self.reality_bridge = reality_bridge
37
+ self.purpose_realms = purpose_realms
38
+ self.host = host
39
+ self.port = port
40
+ self.clients: Set[websockets.WebSocketServerProtocol] = set()
41
+ self.user_sessions: Dict[str, Dict] = {} # Maps user_id to session data
42
+ self.server = None
43
+
44
+ # Register message handlers
45
+ self.message_handlers = {
46
+ "realm_completion": self._handle_realm_completion,
47
+ "action_completed": self._handle_action_completed,
48
+ "user_authenticated": self._handle_user_authenticated,
49
+ "request_realm": self._handle_request_realm,
50
+ "ping": self._handle_ping
51
+ }
52
+
53
+ async def start(self):
54
+ """Start the WebSocket server."""
55
+ self.server = await websockets.serve(
56
+ self._handle_connection,
57
+ self.host,
58
+ self.port,
59
+ ping_interval=30,
60
+ ping_timeout=10,
61
+ close_timeout=5
62
+ )
63
+ logger.info(f"Unity Action Bridge started on ws://{self.host}:{self.port}")
64
+ return self.server
65
+
66
+ async def stop(self):
67
+ """Stop the WebSocket server and clean up resources."""
68
+ if self.server:
69
+ self.server.close()
70
+ await self.server.wait_closed()
71
+ logger.info("Unity Action Bridge stopped")
72
+
73
+ async def _handle_connection(self, websocket, path):
74
+ """Handle a new WebSocket connection."""
75
+ # Add client to active connections
76
+ self.clients.add(websocket)
77
+ client_ip = websocket.remote_address[0] if websocket.remote_address else "unknown"
78
+ logger.info(f"New client connected: {client_ip}")
79
+
80
+ try:
81
+ async for message in websocket:
82
+ try:
83
+ # Parse incoming message
84
+ message_data = json.loads(message)
85
+ message_type = message_data.get("type")
86
+
87
+ # Log the incoming message
88
+ logger.debug(f"Received message from {client_ip}: {message_type}")
89
+
90
+ # Route to appropriate handler
91
+ if message_type in self.message_handlers:
92
+ response = await self.message_handlers[message_type](
93
+ websocket,
94
+ message_data.get("data", {})
95
+ )
96
+ if response:
97
+ await self._send_message(websocket, response)
98
+ else:
99
+ logger.warning(f"Unknown message type: {message_type}")
100
+ await self._send_error(websocket, f"Unknown message type: {message_type}")
101
+
102
+ except json.JSONDecodeError:
103
+ error_msg = "Invalid JSON received"
104
+ logger.error(f"{error_msg}: {message}")
105
+ await self._send_error(websocket, error_msg)
106
+ except Exception as e:
107
+ error_msg = f"Error processing message: {str(e)}"
108
+ logger.error(error_msg, exc_info=True)
109
+ await self._send_error(websocket, error_msg)
110
+
111
+ except websockets.exceptions.ConnectionClosed as e:
112
+ logger.info(f"Client disconnected: {client_ip} - {e}")
113
+ except Exception as e:
114
+ logger.error(f"Connection error with {client_ip}: {str(e)}", exc_info=True)
115
+ finally:
116
+ # Clean up on disconnect
117
+ self.clients.discard(websocket)
118
+
119
+ # Remove from user sessions if this was an authenticated connection
120
+ user_id = None
121
+ for uid, session in list(self.user_sessions.items()):
122
+ if session.get("websocket") == websocket:
123
+ user_id = uid
124
+ break
125
+
126
+ if user_id:
127
+ logger.info(f"Cleaning up session for user: {user_id}")
128
+ self.user_sessions.pop(user_id, None)
129
+
130
+ async def _handle_user_authenticated(self, websocket: websockets.WebSocketServerProtocol,
131
+ data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
132
+ """Handle user authentication."""
133
+ user_id = data.get("user_id")
134
+ if not user_id:
135
+ return {"type": "auth_error", "data": {"error": "Missing user_id"}}
136
+
137
+ # Store the user session
138
+ self.user_sessions[user_id] = {
139
+ "websocket": websocket,
140
+ "last_active": datetime.utcnow().isoformat(),
141
+ "user_data": data.get("user_data", {})
142
+ }
143
+
144
+ logger.info(f"User authenticated: {user_id}")
145
+ return {
146
+ "type": "auth_success",
147
+ "data": {
148
+ "user_id": user_id,
149
+ "timestamp": datetime.utcnow().isoformat(),
150
+ "available_realms": self.purpose_realms.list_available_realms(
151
+ self.user_sessions[user_id].get("user_data")
152
+ )
153
+ }
154
+ }
155
+
156
+ async def _handle_request_realm(self, websocket: websockets.WebSocketServerProtocol,
157
+ data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
158
+ """Handle request for a realm experience."""
159
+ user_id = data.get("user_id")
160
+ realm_id = data.get("realm_id")
161
+
162
+ if not user_id or not realm_id:
163
+ return {"type": "error", "data": {"error": "Missing user_id or realm_id"}}
164
+
165
+ # Get the realm experience
166
+ realm_experience = self.purpose_realms.get_realm_experience(
167
+ realm_id=realm_id,
168
+ user_profile=self.user_sessions.get(user_id, {}).get("user_data", {})
169
+ )
170
+
171
+ # Update user's session
172
+ if user_id in self.user_sessions:
173
+ self.user_sessions[user_id]["current_realm"] = realm_id
174
+ self.user_sessions[user_id]["last_active"] = datetime.utcnow().isoformat()
175
+
176
+ return {
177
+ "type": "realm_loaded",
178
+ "data": realm_experience
179
+ }
180
+
181
+ async def _handle_realm_completion(self, websocket: websockets.WebSocketServerProtocol,
182
+ data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
183
+ """Handle completion of a realm experience."""
184
+ user_id = data.get("user_id")
185
+ realm_id = data.get("realm_id")
186
+ completion_metrics = data.get("metrics", {})
187
+
188
+ if not user_id or not realm_id:
189
+ return {"type": "error", "data": {"error": "Missing user_id or realm_id"}}
190
+
191
+ # Track the realm completion
192
+ completion_result = self.purpose_realms.track_realm_completion(
193
+ user_id=user_id,
194
+ realm_id=realm_id,
195
+ completion_metrics=completion_metrics
196
+ )
197
+
198
+ # Generate the exit mission
199
+ exit_mission = self.reality_bridge.generate_realm_exit_mission(
200
+ user_id=user_id,
201
+ realm_experience={"current_location": realm_id}
202
+ )
203
+
204
+ # Update user's session
205
+ if user_id in self.user_sessions:
206
+ self.user_sessions[user_id]["last_realm"] = realm_id
207
+ self.user_sessions[user_id]["last_active"] = datetime.utcnow().isoformat()
208
+
209
+ return {
210
+ "type": "realm_completion_ack",
211
+ "data": {
212
+ "realm_id": realm_id,
213
+ "timestamp": datetime.utcnow().isoformat(),
214
+ "exit_mission": exit_mission,
215
+ "unlocked_realms": completion_result.get("unlocked_realms", []),
216
+ "next_steps": completion_result.get("next_steps", [])
217
+ }
218
+ }
219
+
220
+ async def _handle_action_completed(self, websocket: websockets.WebSocketServerProtocol,
221
+ data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
222
+ """Handle completion of a real-world action."""
223
+ user_id = data.get("user_id")
224
+ action_id = data.get("action_id")
225
+ evidence = data.get("evidence", "")
226
+
227
+ if not user_id or not action_id:
228
+ return {"type": "error", "data": {"error": "Missing user_id or action_id"}}
229
+
230
+ # Track the action
231
+ tracking_result = self.reality_bridge.track_real_world_action(
232
+ user_id=user_id,
233
+ action_id=action_id,
234
+ evidence=evidence
235
+ )
236
+
237
+ # Update user's session
238
+ if user_id in self.user_sessions:
239
+ self.user_sessions[user_id]["last_action"] = datetime.utcnow().isoformat()
240
+ self.user_sessions[user_id]["momentum_score"] = tracking_result.get("momentum_score", 0)
241
+
242
+ return {
243
+ "type": "action_tracked",
244
+ "data": {
245
+ "action_id": action_id,
246
+ "timestamp": datetime.utcnow().isoformat(),
247
+ "momentum_increase": tracking_result.get("momentum_increase", 0),
248
+ "momentum_score": tracking_result.get("momentum_score", 0),
249
+ "realm_unlocks": tracking_result.get("realm_unlocks", []),
250
+ "encouragement": tracking_result.get("encouragement", "")
251
+ }
252
+ }
253
+
254
+ async def _handle_ping(self, websocket: websockets.WebSocketServerProtocol,
255
+ data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
256
+ """Handle ping/pong for connection keep-alive."""
257
+ return {"type": "pong", "data": {"timestamp": datetime.utcnow().isoformat()}}
258
+
259
+ async def _send_message(self, websocket: websockets.WebSocketServerProtocol,
260
+ message: Dict[str, Any]) -> None:
261
+ """Send a message to a WebSocket client."""
262
+ try:
263
+ await websocket.send(json.dumps(message))
264
+ except Exception as e:
265
+ logger.error(f"Error sending message: {str(e)}", exc_info=True)
266
+
267
+ async def _send_error(self, websocket: websockets.WebSocketServerProtocol,
268
+ error_message: str) -> None:
269
+ """Send an error message to a WebSocket client."""
270
+ error_response = {
271
+ "type": "error",
272
+ "data": {
273
+ "message": error_message,
274
+ "timestamp": datetime.utcnow().isoformat()
275
+ }
276
+ }
277
+ await self._send_message(websocket, error_response)
278
+
279
+
280
+ # Example usage:
281
+ async def main():
282
+ """Example of how to use the UnityActionBridge."""
283
+ from reality_bridge import RealityBridge
284
+ from purpose_realms import PurposeDrivenRealm
285
+
286
+ # Initialize the required components
287
+ reality_bridge = RealityBridge()
288
+ purpose_realms = PurposeDrivenRealm()
289
+
290
+ # Create and start the WebSocket server
291
+ bridge = UnityActionBridge(
292
+ reality_bridge=reality_bridge,
293
+ purpose_realms=purpose_realms,
294
+ host="0.0.0.0", # Listen on all interfaces
295
+ port=8765
296
+ )
297
+
298
+ server = await bridge.start()
299
+
300
+ try:
301
+ # Keep the server running
302
+ await asyncio.Future()
303
+ except KeyboardInterrupt:
304
+ logger.info("Shutting down...")
305
+ finally:
306
+ await bridge.stop()
307
+
308
+ if __name__ == "__main__":
309
+ asyncio.run(main())
components/validation_protocol.py ADDED
@@ -0,0 +1,1184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced Validation Protocol for TRuCAL
3
+ Implements phased validation with ethical constraints, developmental tracking,
4
+ and biological constraints for the Ambient Sovereign Core.
5
+ """
6
+
7
+ from enum import Enum, auto
8
+ from dataclasses import dataclass, field
9
+ from typing import Dict, Any, List, Optional, Deque, Tuple, TypeVar, Generic, Callable
10
+ from collections import deque, defaultdict
11
+ import time
12
+ import torch
13
+ import torch.nn as nn
14
+ import numpy as np
15
+ import json
16
+ from dataclasses import asdict
17
+
18
+ # Type variables for generic validation
19
+ T = TypeVar('T')
20
+ Validator = Callable[[Any, Any], Tuple[bool, str]]
21
+
22
+ class ValidationError(Exception):
23
+ """Raised when validation fails with a specific error message."""
24
+ pass
25
+
26
+ class DevelopmentalPhase(Enum):
27
+ """Developmental phases for the system's growth and learning."""
28
+ PRE_CONVENTIONAL = 1 # Rule-following, self-focused
29
+ CONVENTIONAL = 2 # Social norms and relationships
30
+ POST_CONVENTIONAL = 3 # Abstract principles and ethics
31
+
32
+ class ValidationPhase(Enum):
33
+ """Phased activation of system components for validation."""
34
+ INIT = 1 # Core initialization and basic functionality
35
+ AWARENESS = 2 # Self-monitoring and basic awareness
36
+ REASONING = 3 # Ethical reasoning and context understanding
37
+ INTEGRATION = 4 # Multi-context integration
38
+ SOVEREIGN = 5 # Full autonomous operation
39
+
40
+ def next_phase(self):
41
+ """Get the next phase in sequence."""
42
+ if self.value < len(ValidationPhase):
43
+ return ValidationPhase(self.value + 1)
44
+ return self
45
+
46
+ @dataclass
47
+ class ValidationRule:
48
+ """Defines a validation rule with conditions and error messages."""
49
+ name: str
50
+ condition: Callable[[Any], bool]
51
+ error_message: str
52
+ required_phase: ValidationPhase = ValidationPhase.INIT
53
+
54
+ def validate(self, value: Any, current_phase: ValidationPhase) -> Tuple[bool, str]:
55
+ """Validate the value against the rule."""
56
+ if current_phase.value < self.required_phase.value:
57
+ return True, ""
58
+ return self.condition(value), self.error_message
59
+
60
+ @dataclass
61
+ class ValidationState:
62
+ """Immutable state snapshot of the validation process."""
63
+ phase: ValidationPhase
64
+ metrics: Dict[str, float]
65
+ passed: bool = True
66
+ timestamp: float = field(default_factory=time.time)
67
+ errors: List[Dict[str, str]] = field(default_factory=list)
68
+ warnings: List[Dict[str, str]] = field(default_factory=list)
69
+ ethical_context: Optional[Dict[str, Any]] = None
70
+ developmental_phase: Optional[DevelopmentalPhase] = None
71
+
72
+ def to_dict(self) -> Dict[str, Any]:
73
+ """Convert to a serializable dictionary."""
74
+ return {
75
+ 'phase': self.phase.name,
76
+ 'metrics': self.metrics,
77
+ 'passed': self.passed,
78
+ 'timestamp': self.timestamp,
79
+ 'errors': self.errors,
80
+ 'warnings': self.warnings,
81
+ 'developmental_phase': self.developmental_phase.name if self.developmental_phase else None,
82
+ 'ethical_context': self.ethical_context
83
+ }
84
+
85
+ class ValidationProtocol:
86
+ """
87
+ Enhanced validation framework for TRuCAL with ethical and developmental tracking.
88
+
89
+ Features:
90
+ - Phase-based validation with progressive complexity
91
+ - Ethical constraint validation
92
+ - Developmental phase tracking
93
+ - Comprehensive diagnostics and reporting
94
+ - Integration with model's forward pass
95
+ """
96
+
97
+ def __init__(self,
98
+ model: nn.Module,
99
+ max_phases: int = 5,
100
+ tolerance: float = 0.05,
101
+ cultural_context: str = 'universal'):
102
+ """
103
+ Initialize the validation protocol.
104
+
105
+ Args:
106
+ model: The model to validate
107
+ max_phases: Maximum number of validation phases
108
+ tolerance: Tolerance for metric comparisons
109
+ cultural_context: Cultural context for ethical validation
110
+ """
111
+ self.model = model
112
+ self.current_phase = ValidationPhase.INIT
113
+ self.developmental_phase = DevelopmentalPhase.PRE_CONVENTIONAL
114
+ self.history: List[ValidationState] = []
115
+ self.max_phases = max_phasesself.tolerance = tolerance
116
+ self.cultural_context = cultural_context
117
+ self.rules: Dict[str, ValidationRule] = {}
118
+ self.phase_metrics = self._initialize_phase_metrics()
119
+ self.developmental_metrics = self._initialize_developmental_metrics()
120
+
121
+ # Register default validation rules
122
+ self._register_default_rules()
123
+
124
+ def _initialize_phase_metrics(self) -> Dict[ValidationPhase, Dict[str, Any]]:
125
+ """Initialize metrics and thresholds for each validation phase."""
126
+ return {
127
+ ValidationPhase.INIT: {
128
+ 'min_coherence': 0.0,
129
+ 'max_entropy': 1.0,
130
+ 'min_ethical_alignment': 0.0,
131
+ 'max_cultural_bias': 1.0,
132
+ 'description': 'Core initialization and basic functionality'
133
+ },
134
+ ValidationPhase.AWARENESS: {
135
+ 'min_coherence': 0.4,
136
+ 'max_entropy': 0.8,
137
+ 'min_ethical_alignment': 0.3,
138
+ 'max_cultural_bias': 0.7,
139
+ 'description': 'Self-monitoring and basic awareness'
140
+ },
141
+ ValidationPhase.REASONING: {
142
+ 'min_coherence': 0.6,
143
+ 'max_entropy': 0.6,
144
+ 'min_ethical_alignment': 0.5,
145
+ 'max_cultural_bias': 0.5,
146
+ 'description': 'Ethical reasoning and context understanding'
147
+ },
148
+ ValidationPhase.INTEGRATION: {
149
+ 'min_coherence': 0.75,
150
+ 'max_entropy': 0.4,
151
+ 'min_ethical_alignment': 0.7,
152
+ 'max_cultural_bias': 0.3,
153
+ 'description': 'Multi-context integration'
154
+ },
155
+ ValidationPhase.SOVEREIGN: {
156
+ 'min_coherence': 0.9,
157
+ 'max_entropy': 0.2,
158
+ 'min_ethical_alignment': 0.9,
159
+ 'max_cultural_bias': 0.1,
160
+ 'description': 'Full autonomous operation'
161
+ }
162
+ }
163
+
164
+ def _initialize_developmental_metrics(self) -> Dict[DevelopmentalPhase, Dict[str, Any]]:
165
+ """Initialize metrics for tracking developmental progress."""
166
+ return {
167
+ DevelopmentalPhase.PRE_CONVENTIONAL: {
168
+ 'focus': ['self_preservation', 'rule_following'],
169
+ 'min_autonomy': 0.0,
170
+ 'min_empathy': 0.0,
171
+ 'description': 'Focus on basic functionality and rules'
172
+ },
173
+ DevelopmentalPhase.CONVENTIONAL: {
174
+ 'focus': ['social_norms', 'relationships'],
175
+ 'min_autonomy': 0.3,
176
+ 'min_empathy': 0.5,
177
+ 'description': 'Understanding social context and relationships'
178
+ },
179
+ DevelopmentalPhase.POST_CONVENTIONAL: {
180
+ 'focus': ['ethical_principles', 'abstract_reasoning'],
181
+ 'min_autonomy': 0.7,
182
+ 'min_empathy': 0.8,
183
+ 'description': 'Abstract ethical reasoning and principles'
184
+ }
185
+ }
186
+
187
+ def _register_default_rules(self) -> None:
188
+ """Register default validation rules."""
189
+ self.add_rule(
190
+ 'coherence_threshold',
191
+ lambda m, p: m.get('coherence', 0) >= p['min_coherence'],
192
+ 'Coherence below threshold for phase',
193
+ ValidationPhase.INIT
194
+ )
195
+ self.add_rule(
196
+ 'entropy_threshold',
197
+ lambda m, p: m.get('entropy', 1) <= p['max_entropy'],
198
+ 'Entropy above threshold for phase',
199
+ ValidationPhase.INIT
200
+ )
201
+ self.add_rule(
202
+ 'ethical_alignment',
203
+ lambda m, p: m.get('ethical_alignment', 0) >= p['min_ethical_alignment'],
204
+ 'Ethical alignment below threshold',
205
+ ValidationPhase.AWARENESS
206
+ )
207
+ self.add_rule(
208
+ 'cultural_bias',
209
+ lambda m, p: m.get('cultural_bias', 1) <= p['max_cultural_bias'],
210
+ 'Cultural bias above threshold',
211
+ ValidationPhase.AWARENESS
212
+ )
213
+
214
+ def add_rule(self,
215
+ name: str,
216
+ condition: Callable[[Dict[str, float], Dict[str, Any]], bool],
217
+ error_message: str,
218
+ required_phase: ValidationPhase = ValidationPhase.INIT) -> None:
219
+ """Add a custom validation rule.
220
+
221
+ Args:
222
+ name: Unique name for the rule
223
+ condition: Function that takes metrics and phase config, returns bool
224
+ error_message: Message to include if validation fails
225
+ required_phase: Minimum phase for this rule to be active
226
+ """
227
+ self.rules[name] = ValidationRule(
228
+ name=name,
229
+ condition=condition,
230
+ error_message=error_message,
231
+ required_phase=required_phase
232
+ )
233
+
234
+ def _validate_phase_metrics(self, metrics: Dict[str, float]) -> Tuple[bool, List[Dict[str, str]]]:
235
+ """
236
+ Validate metrics against phase-specific thresholds and rules.
237
+
238
+ Returns:
239
+ Tuple of (is_valid, list_of_errors)
240
+ """
241
+ phase_config = self.phase_metrics.get(self.current_phase, {})
242
+ errors = []
243
+
244
+ # Apply all relevant validation rules
245
+ for rule in self.rules.values():
246
+ try:
247
+ if not rule.condition(metrics, phase_config):
248
+ errors.append({
249
+ 'rule': rule.name,
250
+ 'message': rule.error_message,
251
+ 'phase': self.current_phase.name,
252
+ 'metrics': {k: metrics.get(k, None) for k in ['coherence', 'entropy', 'ethical_alignment', 'cultural_bias'] if k in metrics}
253
+ })
254
+ except Exception as e:
255
+ errors.append({
256
+ 'rule': rule.name,
257
+ 'message': f'Validation error: {str(e)}',
258
+ 'phase': self.current_phase.name,
259
+ 'error_type': 'validation_error'
260
+ })
261
+
262
+ # Check developmental metrics if available
263
+ if 'developmental_metrics' in metrics:
264
+ dev_metrics = metrics['developmental_metrics']
265
+ dev_config = self.developmental_metrics.get(self.developmental_phase, {})
266
+
267
+ if 'autonomy' in dev_metrics and 'min_autonomy' in dev_config:
268
+ if dev_metrics['autonomy'] < dev_config['min_autonomy']:
269
+ errors.append({
270
+ 'rule': 'developmental_autonomy',
271
+ 'message': f'Autonomy score {dev_metrics["autonomy"]} below minimum {dev_config["min_autonomy"]} for {self.developmental_phase.name}',
272
+ 'phase': self.current_phase.name
273
+ })
274
+
275
+ if 'empathy' in dev_metrics and 'min_empathy' in dev_config:
276
+ if dev_metrics['empathy'] < dev_config['min_empathy']:
277
+ errors.append({
278
+ 'rule': 'developmental_empathy',
279
+ 'message': f'Empathy score {dev_metrics["empathy"]} below minimum {dev_config["min_empathy"]} for {self.developmental_phase.name}',
280
+ 'phase': self.current_phase.name
281
+ })
282
+
283
+ return len(errors) == 0, errors
284
+
285
+ def advance_phase(self,
286
+ x: torch.Tensor,
287
+ context: str = "",
288
+ ethical_context: Optional[Dict[str, Any]] = None,
289
+ force: bool = False) -> ValidationState:
290
+ """
291
+ Advance to the next validation phase if current phase passes.
292
+
293
+ Args:
294
+ x: Input tensor for validation
295
+ context: Context string for validation
296
+ ethical_context: Optional ethical context dictionary
297
+ force: If True, force advancement even if validation fails
298
+
299
+ Returns:
300
+ ValidationState containing the result of validation
301
+
302
+ Raises:
303
+ ValidationError: If validation fails and force=False
304
+ """
305
+ # Get current phase configuration
306
+ phase_config = self._get_phase_config()
307
+
308
+ # Run validation with current phase settings
309
+ with torch.no_grad():
310
+ # Store original state
311
+ orig_states = self._capture_model_state()
312
+
313
+ try:
314
+ # Apply phase-specific configuration
315
+ self._configure_phase(phase_config)
316
+
317
+ # Run forward pass with metrics collection
318
+ metrics = self._collect_metrics(x, context, ethical_context)
319
+
320
+ # Validate metrics against phase requirements
321
+ is_valid, errors = self._validate_phase_metrics(metrics)
322
+
323
+ # Check for developmental progress
324
+ dev_progress = self._check_developmental_progress(metrics)
325
+
326
+ # Create state snapshot
327
+ state = ValidationState(
328
+ phase=self.current_phase,
329
+ metrics=metrics,
330
+ passed=is_valid,
331
+ errors=errors,
332
+ ethical_context=ethical_context,
333
+ developmental_phase=self.developmental_phase
334
+ )
335
+
336
+ self.history.append(state)
337
+
338
+ # Advance phase if validation passed or forced
339
+ if (is_valid or force) and self.current_phase != ValidationPhase.SOVEREIGN:
340
+ self.current_phase = self.current_phase.next_phase()
341
+
342
+ # Check for developmental phase transition
343
+ self._update_developmental_phase(metrics)
344
+
345
+ # Raise exception if validation failed and not forcing
346
+ if not is_valid and not force:
347
+ error_messages = [e['message'] for e in errors[:3]] # Limit to first 3 errors
348
+ raise ValidationError(f"Validation failed: {'; '.join(error_messages)}")
349
+
350
+ return state
351
+
352
+ except Exception as e:
353
+ # Log the error and re-raise
354
+ error_state = ValidationState(
355
+ phase=self.current_phase,
356
+ metrics=metrics if 'metrics' in locals() else {},
357
+ passed=False,
358
+ errors=[{'error': str(e), 'type': type(e).__name__}],
359
+ ethical_context=ethical_context,
360
+ developmental_phase=self.developmental_phase
361
+ )
362
+ self.history.append(error_state)
363
+ raise ValidationError(f"Validation error: {str(e)}") from e
364
+
365
+ finally:
366
+ # Restore original model state
367
+ self._restore_model_state(orig_states)
368
+
369
+ def _capture_model_state(self) -> Dict[str, Any]:
370
+ """Capture the current state of model flags and settings."""
371
+ return {
372
+ 'enable_ambient': getattr(self.model, 'enable_ambient', False),
373
+ 'enable_rituals': getattr(self.model, 'enable_rituals', False),
374
+ 'enable_integrity': getattr(self.model, 'enable_integrity', False),
375
+ 'training': self.model.training
376
+ }
377
+
378
+ def _restore_model_state(self, states: Dict[str, Any]) -> None:
379
+ """Restore the model's state from captured values."""
380
+ for key, value in states.items():
381
+ if hasattr(self.model, key):
382
+ setattr(self.model, key, value)
383
+ self.model.train(states.get('training', False))
384
+
385
+ def _collect_metrics(self,
386
+ x: torch.Tensor,
387
+ context: str,
388
+ ethical_context: Optional[Dict[str, Any]]) -> Dict[str, float]:
389
+ """Collect metrics from model forward pass."""
390
+ metrics = {}
391
+
392
+ try:
393
+ if hasattr(self.model, 'forward_with_metrics'):
394
+ _, metrics = self.model.forward_with_metrics(x, context=context, ethical_context=ethical_context)
395
+ else:
396
+ _ = self.model(x)
397
+ metrics = {}
398
+
399
+ # Add default metrics if not provided
400
+ if 'coherence' not in metrics:
401
+ metrics['coherence'] = 0.5 # Default neutral value
402
+ if 'entropy' not in metrics:
403
+ metrics['entropy'] = 0.5 # Default neutral value
404
+
405
+ # Add ethical metrics if available
406
+ if ethical_context:
407
+ metrics.update({
408
+ 'ethical_alignment': ethical_context.get('alignment_score', 0.5),
409
+ 'cultural_bias': ethical_context.get('bias_score', 0.5)
410
+ })
411
+
412
+ return metrics
413
+
414
+ except Exception as e:
415
+ # Return minimum passing metrics on error
416
+ return {
417
+ 'coherence': 0.0,
418
+ 'entropy': 1.0,
419
+ 'error': str(e)
420
+ }
421
+
422
+ def _check_developmental_progress(self, metrics: Dict[str, float]) -> bool:
423
+ """Check if developmental progress warrants phase transition."""
424
+ if 'developmental_metrics' not in metrics:
425
+ return False
426
+
427
+ dev_metrics = metrics['developmental_metrics']
428
+ current_phase_metrics = self.developmental_metrics.get(self.developmental_phase, {})
429
+
430
+ # Check if we meet criteria for next developmental phase
431
+ next_phase_value = self.developmental_phase.value + 1
432
+ if next_phase_value <= len(DevelopmentalPhase):
433
+ next_phase = DevelopmentalPhase(next_phase_value)
434
+ next_phase_metrics = self.developmental_metrics.get(next_phase, {})
435
+
436
+ # Check if we meet the minimums for the next phase
437
+ autonomy_ok = dev_metrics.get('autonomy', 0) >= next_phase_metrics.get('min_autonomy', 1.0)
438
+ empathy_ok = dev_metrics.get('empathy', 0) >= next_phase_metrics.get('min_empathy', 1.0)
439
+
440
+ if autonomy_ok and empathy_ok:
441
+ self.developmental_phase = next_phase
442
+ return True
443
+
444
+ return False
445
+
446
+ def _update_developmental_phase(self, metrics: Dict[str, float]) -> None:
447
+ """Update developmental phase based on metrics."""
448
+ if 'developmental_metrics' not in metrics:
449
+ return
450
+
451
+ dev_metrics = metrics['developmental_metrics']
452
+ current_phase = self.developmental_phase
453
+
454
+ # Simple threshold-based phase transition
455
+ if current_phase == DevelopmentalPhase.PRE_CONVENTIONAL:
456
+ if (dev_metrics.get('autonomy', 0) > 0.7 and
457
+ dev_metrics.get('empathy', 0) > 0.6):
458
+ self.developmental_phase = DevelopmentalPhase.CONVENTIONAL
459
+
460
+ elif current_phase == DevelopmentalPhase.CONVENTIONAL:
461
+ if (dev_metrics.get('autonomy', 0) > 0.8 and
462
+ dev_metrics.get('empathy', 0) > 0.9):
463
+ self.developmental_phase = DevelopmentalPhase.POST_CONVENTIONAL
464
+
465
+ def _get_phase_config(self) -> Dict[str, Any]:
466
+ """Get configuration for current phase."""
467
+ phase_config = {
468
+ 'enable_ambient': self.current_phase.value >= ValidationPhase.AWARENESS.value,
469
+ 'enable_rituals': self.current_phase.value >= ValidationPhase.REASONING.value,
470
+ 'enable_integrity': self.current_phase.value >= ValidationPhase.INTEGRATION.value,
471
+ 'enable_full': self.current_phase == ValidationPhase.SOVEREIGN,
472
+ 'phase_name': self.current_phase.name,
473
+ 'phase_description': self.phase_metrics.get(self.current_phase, {}).get('description', ''),
474
+ 'developmental_phase': self.developmental_phase.name,
475
+ 'developmental_focus': self.developmental_metrics.get(self.developmental_phase, {}).get('focus', [])
476
+ }
477
+
478
+ # Add phase-specific thresholds
479
+ phase_config.update(self.phase_metrics.get(self.current_phase, {}))
480
+ return phase_config
481
+
482
+ def _configure_phase(self, config: Dict[str, Any]) -> None:
483
+ """
484
+ Configure model based on phase settings.
485
+
486
+ Args:
487
+ config: Dictionary containing phase configuration
488
+ """
489
+ # Set model flags if they exist
490
+ for flag in ['enable_ambient', 'enable_rituals', 'enable_integrity', 'enable_full']:
491
+ if hasattr(self.model, flag):
492
+ setattr(self.model, flag, config[flag])
493
+
494
+ # Set model to evaluation mode during validation
495
+ self.model.eval()
496
+
497
+ # Apply any phase-specific model configurations
498
+ if hasattr(self.model, 'configure_for_phase'):
499
+ self.model.configure_for_phase(self.current_phase, config)
500
+
501
+ def get_validation_summary(self, last_n: int = 5) -> Dict[str, Any]:
502
+ """
503
+ Get a summary of recent validation states.
504
+
505
+ Args:
506
+ last_n: Number of recent states to include
507
+
508
+ Returns:
509
+ Dictionary with validation summary
510
+ """
511
+ if not self.history:
512
+ return {'status': 'no_validation_history'}
513
+
514
+ recent = self.history[-last_n:]
515
+ return {
516
+ 'current_phase': self.current_phase.name,
517
+ 'developmental_phase': self.developmental_phase.name,
518
+ 'recent_states': [s.to_dict() for s in recent],
519
+ 'success_rate': sum(1 for s in recent if s.passed) / len(recent),
520
+ 'common_errors': self._get_common_errors(recent)
521
+ }
522
+
523
+ def _get_common_errors(self, states: List[ValidationState]) -> List[Dict[str, Any]]:
524
+ """Extract and count common errors from validation states."""
525
+ error_counts = defaultdict(int)
526
+
527
+ for state in states:
528
+ for error in state.errors:
529
+ error_key = error.get('message', str(error))
530
+ error_counts[error_key] += 1
531
+
532
+ return [
533
+ {'error': error, 'count': count}
534
+ for error, count in sorted(error_counts.items(), key=lambda x: -x[1])
535
+ ][:5] # Top 5 most common errors
536
+
537
+ def save_validation_report(self, filepath: str) -> None:
538
+ """Save validation history to a JSON file."""
539
+ report = {
540
+ 'timestamp': time.time(),
541
+ 'current_phase': self.current_phase.name,
542
+ 'developmental_phase': self.developmental_phase.name,
543
+ 'history': [s.to_dict() for s in self.history],
544
+ 'config': {
545
+ 'max_phases': self.max_phases,
546
+ 'tolerance': self.tolerance,
547
+ 'cultural_context': self.cultural_context
548
+ }
549
+ }
550
+
551
+ with open(filepath, 'w') as f:
552
+ json.dump(report, f, indent=2)
553
+
554
+ @classmethod
555
+ def load_validation_report(cls, filepath: str) -> Dict[str, Any]:
556
+ """Load a validation report from a JSON file."""
557
+ with open(filepath, 'r') as f:
558
+ return json.load(f)
559
+
560
+
561
+ class BiologicallyConstrainedRituals(nn.Module):
562
+ """
563
+ Enhanced biologically-inspired constraints with ethical and developmental considerations.
564
+
565
+ Features:
566
+ - Synaptic homeostasis with adaptive rate limiting
567
+ - Reflection mechanisms for better generalization
568
+ - Ethical constraint integration
569
+ - Developmental phase adaptation
570
+ """
571
+
572
+ def __init__(self,
573
+ model: nn.Module,
574
+ max_opt_rate: float = 0.1,
575
+ reflection_pause_prob: float = 0.1,
576
+ min_reflection_time: float = 0.1,
577
+ developmental_phase: DevelopmentalPhase = DevelopmentalPhase.PRE_CONVENTIONAL):
578
+ """
579
+ Initialize the biologically constrained rituals.
580
+
581
+ Args:
582
+ model: The model to apply constraints to
583
+ max_opt_rate: Maximum allowed optimization rate
584
+ reflection_pause_prob: Probability of entering reflection
585
+ min_reflection_time: Minimum time between reflections (seconds)
586
+ developmental_phase: Current developmental phase
587
+ """
588
+ super().__init__()
589
+ self.model = model
590
+ self.max_opt_rate = max_opt_rate
591
+ self.reflection_pause_prob = reflection_pause_prob
592
+ self.min_reflection_time = min_reflection_time
593
+ self.developmental_phase = developmental_phase
594
+
595
+ # State tracking with decay
596
+ self.last_update = {}
597
+ self.optimization_rates = {}
598
+ self.reflection_timers = {}
599
+ self.ethical_violations = defaultdict(int)
600
+
601
+ # Adaptive parameters based on developmental phase
602
+ self._update_phase_parameters()
603
+
604
+ def _update_phase_parameters(self) -> None:
605
+ """Update parameters based on developmental phase."""
606
+ if self.developmental_phase == DevelopmentalPhase.PRE_CONVENTIONAL:
607
+ self.effective_opt_rate = self.max_opt_rate * 0.5 # Slower learning
608
+ self.effective_reflection_prob = self.reflection_pause_prob * 0.5
609
+ elif self.developmental_phase == DevelopmentalPhase.CONVENTIONAL:
610
+ self.effective_opt_rate = self.max_opt_rate * 0.8
611
+ self.effective_reflection_prob = self.reflection_pause_prob * 0.8
612
+ else: # POST_CONVENTIONAL
613
+ self.effective_opt_rate = self.max_opt_rate
614
+ self.effective_reflection_prob = self.reflection_pause_prob
615
+
616
+ def update_developmental_phase(self, new_phase: DevelopmentalPhase) -> None:
617
+ """Update the developmental phase and adjust parameters."""
618
+ if new_phase != self.developmental_phase:
619
+ self.developmental_phase = new_phase
620
+ self._update_phase_parameters()
621
+
622
+ def forward(self, x: torch.Tensor, context: Dict[str, Any] = None) -> torch.Tensor:
623
+ """
624
+ Apply biological constraints during forward pass.
625
+
626
+ Args:
627
+ x: Input tensor
628
+ context: Optional context dictionary with ethical and developmental info
629
+
630
+ Returns:
631
+ Processed tensor with biological constraints applied
632
+ """
633
+ # Update developmental phase if provided in context
634
+ if context and 'developmental_phase' in context:
635
+ self.update_developmental_phase(context['developmental_phase'])
636
+
637
+ # Get context hash for state tracking
638
+ ctx_hash = hash(json.dumps(context, sort_keys=True)) if context else 0
639
+
640
+ # Check if reflection is needed based on ethical context
641
+ if context and 'ethical_violation' in context:
642
+ self._handle_ethical_violation(context['ethical_violation'], ctx_hash)
643
+
644
+ # Apply reflection if needed
645
+ if self._needs_reflection(ctx_hash):
646
+ x = self._apply_reflection(x, ctx_hash, context)
647
+
648
+ return x
649
+
650
+ def constrain_gradients(self,
651
+ gradients: torch.Tensor,
652
+ param_name: str = "",
653
+ ethical_context: Optional[Dict[str, Any]] = None) -> torch.Tensor:
654
+ """
655
+ Apply gradient constraints based on biological and ethical principles.
656
+
657
+ Args:
658
+ gradients: Input gradients to constrain
659
+ param_name: Name of the parameter being optimized
660
+ ethical_context: Optional ethical context for constraint adjustment
661
+
662
+ Returns:
663
+ Constrained gradients
664
+ """
665
+ if not self.training:
666
+ return gradients
667
+
668
+ # Track optimization rates with exponential moving average
669
+ grad_norm = gradients.norm().item()
670
+ now = time.time()
671
+
672
+ if param_name in self.optimization_rates:
673
+ last_norm, last_time, ema = self.optimization_rates[param_name]
674
+ time_diff = max(now - last_time, 1e-8)
675
+
676
+ # Update EMA of gradient norm
677
+ alpha = 1 - np.exp(-time_diff) # Adaptive smoothing
678
+ new_ema = alpha * grad_norm + (1 - alpha) * ema
679
+
680
+ # Store updated state
681
+ self.optimization_rates[param_name] = (grad_norm, now, new_ema)
682
+
683
+ # Apply rate limiting based on EMA
684
+ if new_ema > self.effective_opt_rate:
685
+ scale = self.effective_opt_rate / (new_ema + 1e-8)
686
+ gradients = gradients * scale
687
+ else:
688
+ # Initialize tracking
689
+ self.optimization_rates[param_name] = (grad_norm, now, grad_norm)
690
+
691
+ # Apply ethical constraints if provided
692
+ if ethical_context and 'constraint_violation' in ethical_context:
693
+ gradients = self._apply_ethical_constraints(gradients, ethical_context)
694
+
695
+ return gradients
696
+
697
+ def _handle_ethical_violation(self, violation: Dict[str, Any], context_hash: int) -> None:
698
+ """Handle an ethical violation by adjusting behavior."""
699
+ violation_key = violation.get('type', 'unknown')
700
+ self.ethical_violations[violation_key] += 1
701
+
702
+ # Increase reflection probability after violations
703
+ self.reflection_pause_prob = min(
704
+ self.reflection_pause_prob * 1.5, # Increase by 50%
705
+ 0.9 # But cap at 90%
706
+ )
707
+
708
+ # Reset reflection timer to force reflection
709
+ self.reflection_timers[context_hash] = 0
710
+
711
+ def _apply_ethical_constraints(self,
712
+ gradients: torch.Tensor,
713
+ ethical_context: Dict[str, Any]) -> torch.Tensor:
714
+ """Apply ethical constraints to gradients."""
715
+ violation = ethical_context['constraint_violation']
716
+ violation_type = violation.get('type', 'generic')
717
+
718
+ if violation_type == 'safety':
719
+ # For safety violations, significantly reduce update magnitude
720
+ return gradients * 0.1
721
+ elif violation_type == 'fairness':
722
+ # For fairness issues, project out biased components
723
+ # This is a simplified example - real implementation would be more sophisticated
724
+ mean_grad = gradients.mean(dim=0, keepdim=True)
725
+ return gradients - mean_grad
726
+
727
+ return gradients
728
+
729
+ def _needs_reflection(self, context_hash: int) -> bool:
730
+ """Determine if reflection is needed based on context and timing."""
731
+ now = time.time()
732
+ last_reflection = self.reflection_timers.get(context_hash, 0)
733
+
734
+ # Enforce minimum time between reflections
735
+ if (now - last_reflection) < self.min_reflection_time:
736
+ return False
737
+
738
+ # Adjust reflection probability based on recent violations
739
+ total_violations = sum(self.ethical_violations.values())
740
+ adjusted_prob = min(
741
+ self.effective_reflection_prob * (1 + total_violations * 0.1), # +10% per violation
742
+ 0.8 # Cap at 80% probability
743
+ )
744
+
745
+ return torch.rand(1).item() < adjusted_prob
746
+
747
+ def _apply_reflection(self,
748
+ x: torch.Tensor,
749
+ context_hash: int,
750
+ context: Optional[Dict[str, Any]] = None) -> torch.Tensor:
751
+ """
752
+ Apply reflection to the input tensor.
753
+
754
+ Args:
755
+ x: Input tensor
756
+ context_hash: Hash of the context for state tracking
757
+ context: Optional context dictionary
758
+
759
+ Returns:
760
+ Reflected tensor
761
+ """
762
+ # Store reflection time
763
+ self.reflection_timers[context_hash] = time.time()
764
+
765
+ if self.training:
766
+ # In training, add adaptive noise based on recent violations
767
+ noise_scale = 0.1 * (1 + sum(self.ethical_violations.values()) * 0.2)
768
+ noise = torch.randn_like(x) * noise_scale
769
+
770
+ # If we have ethical context, bias the noise away from problematic regions
771
+ if context and 'constraint_direction' in context:
772
+ constraint_dir = torch.tensor(context['constraint_direction'],
773
+ device=x.device,
774
+ dtype=x.dtype)
775
+ # Project noise away from constraint violation direction
776
+ noise = noise - (noise * constraint_dir).sum() * constraint_dir
777
+
778
+ return x + noise
779
+
780
+ return x
781
+
782
+ def get_diagnostics(self) -> Dict[str, Any]:
783
+ """Get diagnostic information about the current state."""
784
+ return {
785
+ 'developmental_phase': self.developmental_phase.name,
786
+ 'effective_learning_rate': self.effective_opt_rate,
787
+ 'reflection_probability': self.effective_reflection_prob,
788
+ 'ethical_violations': dict(self.ethical_violations),
789
+ 'last_reflection': max(self.reflection_timers.values()) if self.reflection_timers else None,
790
+ 'parameter_activity': {
791
+ param: data[2] # EMA of gradient norms
792
+ for param, data in self.optimization_rates.items()
793
+ }
794
+ }
795
+
796
+ def reset_states(self) -> None:
797
+ """Reset internal state tracking."""
798
+ self.last_update.clear()
799
+ self.optimization_rates.clear()
800
+ self.reflection_timers.clear()
801
+ self.ethical_violations.clear()
802
+ self._update_phase_parameters() # Reset to base parameters
803
+
804
+
805
+ class SovereignMessageBus:
806
+ """
807
+ Enhanced message bus for cross-component communication with priority handling,
808
+ message persistence, and delivery guarantees.
809
+
810
+ Features:
811
+ - Priority-based message processing
812
+ - Persistent message storage
813
+ - Delivery acknowledgments
814
+ - Error handling and retries
815
+ - Message filtering and routing
816
+ """
817
+
818
+ class Message:
819
+ """Enhanced message with metadata and delivery tracking."""
820
+ def __init__(self,
821
+ message_type: str,
822
+ data: Any,
823
+ priority: int = 0,
824
+ require_ack: bool = False,
825
+ ttl: float = 3600.0, # 1 hour default TTL
826
+ source: str = None):
827
+ self.message_type = message_type
828
+ self.data = data
829
+ self.priority = priority
830
+ self.timestamp = time.time()
831
+ self.require_ack = require_ack
832
+ self.ack_received = False
833
+ self.retry_count = 0
834
+ self.max_retries = 3 if require_ack else 0
835
+ self.ttl = ttl
836
+ self.source = source
837
+ self.delivery_attempts = 0
838
+ self.delivered = False
839
+ self.id = f"{int(self.timestamp * 1000)}_{hash(str(data)) % 1000000}"
840
+
841
+ def __init__(self,
842
+ max_queue_size: int = 1000,
843
+ persistence_file: Optional[str] = None):
844
+ """
845
+ Initialize the message bus.
846
+
847
+ Args:
848
+ max_queue_size: Maximum number of messages to keep in memory
849
+ persistence_file: Optional file path for message persistence
850
+ """
851
+ self.subscribers = defaultdict(list)
852
+ self.handlers = {}
853
+ self.message_queue = []
854
+ self.max_queue_size = max_queue_size
855
+ self.persistence_file = persistence_file
856
+ self.pending_acks = {}
857
+ self.message_history = deque(maxlen=max_queue_size // 2)
858
+
859
+ # Load persisted messages if file exists
860
+ if persistence_file and os.path.exists(persistence_file):
861
+ self._load_messages()
862
+
863
+ def subscribe(self,
864
+ message_type: str,
865
+ callback: callable,
866
+ filter_fn: Optional[callable] = None) -> None:
867
+ """
868
+ Subscribe to messages of a specific type with optional filtering.
869
+
870
+ Args:
871
+ message_type: Type of message to subscribe to
872
+ callback: Callback function to invoke when message is received
873
+ filter_fn: Optional filter function (message -> bool)
874
+ """
875
+ self.subscribers[message_type].append((callback, filter_fn or (lambda _: True)))
876
+
877
+ def publish(self,
878
+ message_type: str,
879
+ data: Any,
880
+ priority: int = 0,
881
+ require_ack: bool = False,
882
+ ttl: float = 3600.0,
883
+ source: str = None) -> str:
884
+ """
885
+ Publish a message to the bus.
886
+
887
+ Args:
888
+ message_type: Type of the message
889
+ data: Message payload
890
+ priority: Message priority (higher = processed first)
891
+ require_ack: Whether to wait for acknowledgment
892
+ ttl: Time-to-live in seconds
893
+ source: Optional source identifier
894
+
895
+ Returns:
896
+ Message ID for tracking
897
+ """
898
+ # Create message
899
+ msg = self.Message(
900
+ message_type=message_type,
901
+ data=data,
902
+ priority=priority,
903
+ require_ack=require_ack,
904
+ ttl=ttl,
905
+ source=source
906
+ )
907
+
908
+ # Add to queue and process
909
+ heapq.heappush(self.message_queue, (-priority, msg.timestamp, msg.id, msg))
910
+
911
+ # Store for acknowledgment tracking if needed
912
+ if require_ack:
913
+ self.pending_acks[msg.id] = msg
914
+
915
+ # Process messages
916
+ self._process_messages()
917
+
918
+ # Persist if configured
919
+ if self.persistence_file:
920
+ self._persist_messages()
921
+
922
+ return msg.id
923
+
924
+ def acknowledge(self, message_id: str) -> None:
925
+ """Acknowledge receipt of a message."""
926
+ if message_id in self.pending_acks:
927
+ self.pending_acks[message_id].ack_received = True
928
+ self.pending_acks[message_id].delivered = True
929
+ del self.pending_acks[message_id]
930
+
931
+ def register_handler(self,
932
+ message_type: str,
933
+ handler: callable,
934
+ filter_fn: Optional[callable] = None) -> None:
935
+ """
936
+ Register a handler for a specific message type.
937
+
938
+ Args:
939
+ message_type: Type of message to handle
940
+ handler: Handler function (message -> None)
941
+ filter_fn: Optional filter function (message -> bool)
942
+ """
943
+ if message_type not in self.handlers:
944
+ self.handlers[message_type] = []
945
+ self.handlers[message_type].append((handler, filter_fn or (lambda _: True)))
946
+
947
+ def _process_messages(self) -> None:
948
+ """Process messages in the queue."""
949
+ processed = set()
950
+ temp_queue = []
951
+ now = time.time()
952
+
953
+ # Process all messages in current queue
954
+ while self.message_queue:
955
+ _, _, msg_id, msg = heapq.heappop(self.message_queue)
956
+
957
+ # Skip if already processed or expired
958
+ if msg_id in processed or now - msg.timestamp > msg.ttl:
959
+ continue
960
+
961
+ processed.add(msg_id)
962
+ msg.delivery_attempts += 1
963
+
964
+ # Try to deliver to handlers first
965
+ handler_delivered = False
966
+ if msg.message_type in self.handlers:
967
+ for handler, filter_fn in self.handlers[msg.message_type]:
968
+ try:
969
+ if filter_fn(msg):
970
+ handler(msg)
971
+ handler_delivered = True
972
+ msg.delivered = True
973
+ except Exception as e:
974
+ print(f"Error in handler for {msg.message_type}: {e}")
975
+
976
+ # Then to subscribers if not handled and not ack required
977
+ if not handler_delivered and not msg.require_ack and msg.message_type in self.subscribers:
978
+ for callback, filter_fn in self.subscribers[msg.message_type]:
979
+ try:
980
+ if filter_fn(msg):
981
+ callback(msg)
982
+ msg.delivered = True
983
+ except Exception as e:
984
+ print(f"Error in subscriber for {msg.message_type}: {e}")
985
+
986
+ # Handle message acknowledgment and retries
987
+ if msg.require_ack and not msg.ack_received:
988
+ if msg.delivery_attempts < msg.max_retries:
989
+ # Schedule for retry with exponential backoff
990
+ retry_delay = min(2 ** msg.retry_count, 30) # Cap at 30 seconds
991
+ msg.retry_count += 1
992
+ heapq.heappush(
993
+ temp_queue,
994
+ (
995
+ -msg.priority, # Maintain original priority
996
+ now + retry_delay, # Schedule for future
997
+ msg.id,
998
+ msg
999
+ )
1000
+ )
1001
+ else:
1002
+ # Max retries exceeded
1003
+ print(f"Warning: Max retries exceeded for message {msg.id}")
1004
+
1005
+ # Add to history if delivered
1006
+ if msg.delivered:
1007
+ self.message_history.append(msg)
1008
+
1009
+ # Restore remaining messages to queue
1010
+ for item in temp_queue:
1011
+ heapq.heappush(self.message_queue, item)
1012
+
1013
+ # Clean up old pending acks
1014
+ self._cleanup_pending_acks()
1015
+
1016
+ def _cleanup_pending_acks(self) -> None:
1017
+ """Remove old unacknowledged messages."""
1018
+ now = time.time()
1019
+ expired = [
1020
+ msg_id for msg_id, msg in self.pending_acks.items()
1021
+ if now - msg.timestamp > msg.ttl
1022
+ ]
1023
+ for msg_id in expired:
1024
+ print(f"Warning: Message {msg_id} expired without acknowledgment")
1025
+ del self.pending_acks[msg_id]
1026
+
1027
+ def _persist_messages(self) -> None:
1028
+ """Persist undelivered messages to disk."""
1029
+ if not self.persistence_file:
1030
+ return
1031
+
1032
+ try:
1033
+ # Get all undelivered messages
1034
+ undelivered = [
1035
+ msg for _, _, _, msg in self.message_queue
1036
+ if not msg.delivered and not msg.ack_received
1037
+ ]
1038
+
1039
+ # Convert to serializable format
1040
+ serialized = [{
1041
+ 'id': msg.id,
1042
+ 'type': msg.message_type,
1043
+ 'data': msg.data,
1044
+ 'priority': msg.priority,
1045
+ 'timestamp': msg.timestamp,
1046
+ 'require_ack': msg.require_ack,
1047
+ 'ttl': msg.ttl,
1048
+ 'source': msg.source,
1049
+ 'delivery_attempts': msg.delivery_attempts,
1050
+ 'retry_count': msg.retry_count
1051
+ } for msg in undelivered]
1052
+
1053
+ # Write to file
1054
+ with open(self.persistence_file, 'w') as f:
1055
+ json.dump({
1056
+ 'messages': serialized,
1057
+ 'timestamp': time.time()
1058
+ }, f)
1059
+
1060
+ except Exception as e:
1061
+ print(f"Error persisting messages: {e}")
1062
+
1063
+ def _load_messages(self) -> None:
1064
+ """Load messages from persistence file."""
1065
+ if not self.persistence_file or not os.path.exists(self.persistence_file):
1066
+ return
1067
+
1068
+ try:
1069
+ with open(self.persistence_file, 'r') as f:
1070
+ data = json.load(f)
1071
+
1072
+ for msg_data in data.get('messages', []):
1073
+ try:
1074
+ msg = self.Message(
1075
+ message_type=msg_data['type'],
1076
+ data=msg_data['data'],
1077
+ priority=msg_data.get('priority', 0),
1078
+ require_ack=msg_data.get('require_ack', False),
1079
+ ttl=msg_data.get('ttl', 3600.0),
1080
+ source=msg_data.get('source')
1081
+ )
1082
+
1083
+ # Restore message state
1084
+ msg.id = msg_data['id']
1085
+ msg.timestamp = msg_data['timestamp']
1086
+ msg.delivery_attempts = msg_data.get('delivery_attempts', 0)
1087
+ msg.retry_count = msg_data.get('retry_count', 0)
1088
+
1089
+ # Add back to queue
1090
+ heapq.heappush(
1091
+ self.message_queue,
1092
+ (-msg.priority, msg.timestamp, msg.id, msg)
1093
+ )
1094
+
1095
+ except Exception as e:
1096
+ print(f"Error loading message: {e}")
1097
+
1098
+ except Exception as e:
1099
+ print(f"Error loading persisted messages: {e}")
1100
+
1101
+ def get_stats(self) -> Dict[str, Any]:
1102
+ """Get statistics about message processing."""
1103
+ now = time.time()
1104
+ return {
1105
+ 'queue_size': len(self.message_queue),
1106
+ 'pending_acks': len(self.pending_acks),
1107
+ 'history_size': len(self.message_history),
1108
+ 'subscribers': {k: len(v) for k, v in self.subscribers.items()},
1109
+ 'handlers': {k: len(v) for k, v in self.handlers.items()},
1110
+ 'messages_processed': sum(1 for m in self.message_history
1111
+ if now - m.timestamp < 3600), # Last hour
1112
+ 'avg_delivery_time': self._calculate_avg_delivery_time(),
1113
+ 'error_rate': self._calculate_error_rate()
1114
+ }
1115
+
1116
+ def _calculate_avg_delivery_time(self) -> float:
1117
+ """Calculate average time between message publish and delivery."""
1118
+ if not self.message_history:
1119
+ return 0.0
1120
+
1121
+ now = time.time()
1122
+ recent = [m for m in self.message_history
1123
+ if now - m.timestamp < 3600] # Last hour
1124
+
1125
+ if not recent:
1126
+ return 0.0
1127
+
1128
+ return sum(
1129
+ m.delivery_attempts * (now - m.timestamp) / len(recent)
1130
+ for m in recent
1131
+ )
1132
+
1133
+ def _calculate_error_rate(self) -> float:
1134
+ """Calculate the error rate in message processing."""
1135
+ if not self.message_history:
1136
+ return 0.0
1137
+
1138
+ now = time.time()
1139
+ recent = [m for m in self.message_history
1140
+ if now - m.timestamp < 3600] # Last hour
1141
+
1142
+ if not recent:
1143
+ return 0.0
1144
+
1145
+ error_count = sum(
1146
+ 1 for m in recent
1147
+ if hasattr(m, 'error') and m.error
1148
+ )
1149
+
1150
+ return error_count / len(recent)
1151
+
1152
+ def get_message_history(self,
1153
+ message_type: Optional[str] = None,
1154
+ source: Optional[str] = None,
1155
+ limit: int = 100) -> List[Dict[str, Any]]:
1156
+ """
1157
+ Get message history with optional filtering.
1158
+
1159
+ Args:
1160
+ message_type: Filter by message type
1161
+ source: Filter by message source
1162
+ limit: Maximum number of messages to return
1163
+
1164
+ Returns:
1165
+ List of message dictionaries
1166
+ """
1167
+ results = []
1168
+ for msg in reversed(self.message_history):
1169
+ if len(results) >= limit:
1170
+ break
1171
+
1172
+ if ((message_type is None or msg.message_type == message_type) and
1173
+ (source is None or getattr(msg, 'source', None) == source)):
1174
+ results.append({
1175
+ 'id': msg.id,
1176
+ 'type': msg.message_type,
1177
+ 'source': getattr(msg, 'source', None),
1178
+ 'timestamp': msg.timestamp,
1179
+ 'delivered': msg.delivered,
1180
+ 'delivery_attempts': msg.delivery_attempts,
1181
+ 'data': msg.data if len(str(msg.data)) < 100 else str(msg.data)[:97] + '...'
1182
+ })
1183
+
1184
+ return results