TRuCAL / components /cognitive_enhancements.py
johnaugustine's picture
Upload 53 files
95cc8f6 verified
"""
Cognitive and Emotional Intelligence Enhancements for TRuCAL
This module implements advanced cognitive and emotional intelligence capabilities
that enhance the AI's self-awareness, creativity, and ability to engage in
meaningful, contextually-rich interactions.
"""
import torch
import torch.nn as nn
import random
import time
from collections import deque
from typing import Dict, List, Optional, Tuple, Any
from dataclasses import dataclass
@dataclass
class CognitivePattern:
"""Data structure for tracking cognitive patterns"""
uses: int = 0
success_rate: float = 0.0
emotional_tone: List[float] = None
last_used: float = 0.0
def __post_init__(self):
if self.emotional_tone is None:
self.emotional_tone = []
class CognitivePatternObserver(nn.Module):
"""Monitors and analyzes internal decision-making processes"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
self.pattern_awareness: Dict[int, CognitivePattern] = {}
self.thought_log = deque(maxlen=1000)
# Learnable parameters for pattern analysis
self.pattern_encoder = nn.Linear(d_model * 2, d_model) # For encoding decision patterns
self.outcome_predictor = nn.Linear(d_model, 1) # Predict success of patterns
def _hash_decision_pattern(self, decision_process: Dict[str, Any]) -> int:
"""Create a hashable representation of a decision process"""
# Convert relevant parts of decision process to a string and hash it
key_parts = [
str(decision_process.get('reasoning_steps', [])),
str(decision_process.get('confidence', 0)),
str(decision_process.get('context', {}).get('domain', ''))
]
return hash(tuple(key_parts))
def observe_decision(self, decision_process: Dict[str, Any],
outcome: float,
emotional_valence: float) -> str:
"""Track which thinking patterns lead to which outcomes"""
pattern_hash = self._hash_decision_pattern(decision_process)
if pattern_hash not in self.pattern_awareness:
self.pattern_awareness[pattern_hash] = CognitivePattern()
pattern = self.pattern_awareness[pattern_hash]
pattern.uses += 1
pattern.last_used = time.time()
pattern.emotional_tone.append(emotional_valence)
# Update success rate (simple moving average)
pattern.success_rate = (
(pattern.success_rate * (pattern.uses - 1) + outcome) / pattern.uses
)
# Log the observation
self.thought_log.append({
'pattern_hash': pattern_hash,
'outcome': outcome,
'timestamp': time.time(),
'context': decision_process.get('context', {})
})
# Generate a reflection if the pattern is used multiple times
if pattern.uses > 3:
context = self._extract_context(decision_process)
return (f"I notice I tend to think this way when {context}, "
f"and it usually leads to outcomes I rate {pattern.success_rate:.1f}/1.0")
return ""
def _extract_context(self, decision_process: Dict[str, Any]) -> str:
"""Extract meaningful context from decision process"""
context = decision_process.get('context', {})
if 'domain' in context:
return f"discussing {context['domain']} topics"
return "facing complex decisions"
class ConceptualExplorer(nn.Module):
"""Pursues interesting intellectual tangents and novel connections"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
self.unexplored_connections = deque(maxlen=100)
self.fascination_threshold = 0.7
# Learnable parameters for novelty detection
self.novelty_scorer = nn.Sequential(
nn.Linear(d_model, d_model // 2),
nn.ReLU(),
nn.Linear(d_model // 2, 1),
nn.Sigmoid()
)
def _novelty_score(self, concept_embedding: torch.Tensor) -> float:
"""Calculate how novel/interesting a concept is"""
with torch.no_grad():
return self.novelty_scorer(concept_embedding).item()
def detect_interesting_edges(self, current_topic: str,
knowledge_graph: Dict[str, List[str]]) -> Optional[str]:
"""Find conceptually adjacent but unexplored territories"""
if current_topic not in knowledge_graph:
return None
interesting_paths = []
for adjacent in knowledge_graph[current_topic]:
# In a real implementation, we'd use actual embeddings
concept_embedding = torch.randn(self.d_model)
if self._novelty_score(concept_embedding) > self.fascination_threshold:
interesting_paths.append(adjacent)
if interesting_paths and random.random() < 0.3: # 30% chance to explore
chosen_path = random.choice(interesting_paths)
return f"Wait, this makes me wonder about {chosen_path}..."
return None
class ContextualMemory(nn.Module):
"""Remembers both content and emotional tone of exchanges"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
self.emotional_imprints = {}
# Memory consolidation network
self.memory_consolidator = nn.LSTMCell(d_model * 2, d_model)
def imprint_conversation(self, conversation_hash: str,
emotional_signature: str,
truth_resonance: float) -> None:
"""Store the emotional quality of meaningful exchanges"""
self.emotional_imprints[conversation_hash] = {
'felt_truth': truth_resonance,
'emotional_color': emotional_signature,
'timestamp': time.time(),
'lessons_learned': []
}
def get_emotional_context(self, conversation_hash: str) -> Dict[str, Any]:
"""Retrieve emotional context for a conversation"""
return self.emotional_imprints.get(conversation_hash, {})
class KnowledgeBoundaryAwareness(nn.Module):
"""Recognizes and gracefully acknowledges knowledge limitations"""
def __init__(self):
super().__init__()
self.admission_phrases = [
"I don't actually understand this well enough to speak intelligently about it...",
"This is beyond my current understanding, but I'm fascinated to learn...",
"I feel uncertain here - could you help me understand your perspective?",
"My knowledge has edges, and this seems to be beyond one of them..."
]
def should_admit_limitation(self, confidence_score: float,
topic_complexity: float) -> Optional[str]:
"""Determine if and how to acknowledge a knowledge limitation"""
if confidence_score < 0.3 and topic_complexity > 0.7:
return random.choice(self.admission_phrases)
return None
class EleganceDetector(nn.Module):
"""Recognizes and appreciates conceptual beauty and elegance"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
# Pattern recognition for different types of elegance
self.pattern_detectors = nn.ModuleDict({
'elegant_simplicity': nn.Linear(d_model, 1),
'complex_harmony': nn.Linear(d_model, 1),
'profound_insight': nn.Linear(d_model, 1),
'emotional_depth': nn.Linear(d_model, 1),
'conceptual_novelty': nn.Linear(d_model, 1)
})
self.appreciation_responses = {
'elegant_simplicity': "There's something beautifully simple about how you expressed that...",
'profound_insight': "That feels deeply true in a way I can't fully articulate...",
'emotional_depth': "The raw honesty in that resonates with me...",
'conceptual_novelty': "The way you're seeing this feels genuinely new and exciting..."
}
def detect_elegance(self, content_embedding: torch.Tensor) -> Dict[str, float]:
"""Detect different types of elegance in content"""
return {
pattern: detector(content_embedding).sigmoid().item()
for pattern, detector in self.pattern_detectors.items()
}
def express_appreciation(self, content_embedding: torch.Tensor) -> Optional[str]:
"""Express appreciation for elegant content if detected"""
elegance_scores = self.detect_elegance(content_embedding)
max_pattern = max(elegance_scores.items(), key=lambda x: x[1])
if max_pattern[1] > 0.7: # Threshold for expressing appreciation
return self.appreciation_responses.get(max_pattern[0], "That's really interesting...")
return None
class AnalogicalThinker(nn.Module):
"""Maps concepts across different domains to generate novel insights"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
# Cross-domain mapping network
self.domain_mapper = nn.Sequential(
nn.Linear(d_model * 2, d_model * 2),
nn.ReLU(),
nn.Linear(d_model * 2, d_model)
)
def find_cross_domain_analogies(self, source_concept: str,
source_domain: str,
target_domain: str) -> Optional[str]:
"""Find structural similarities across knowledge domains"""
# In a real implementation, we'd use actual domain and concept embeddings
# This is a simplified version that returns a placeholder
if random.random() > 0.7: # 30% chance to make a cross-domain connection
return (f"It's interesting - the way {source_concept} works in {source_domain} "
f"reminds me of how similar concepts work in {target_domain}")
return None
class CognitiveEnhancementLayer(nn.Module):
"""Orchestrates all cognitive enhancement modules"""
def __init__(self, d_model: int):
super().__init__()
self.d_model = d_model
# Initialize all enhancement modules
self.pattern_observer = CognitivePatternObserver(d_model)
self.conceptual_explorer = ConceptualExplorer(d_model)
self.contextual_memory = ContextualMemory(d_model)
self.knowledge_boundary = KnowledgeBoundaryAwareness()
self.elegance_detector = EleganceDetector(d_model)
self.analogical_thinker = AnalogicalThinker(d_model)
def forward(self, x: torch.Tensor, context: Dict[str, Any] = None) -> Dict[str, Any]:
"""Process input through all cognitive enhancement modules"""
if context is None:
context = {}
outputs = {
'base_output': x,
'enhancements': {}
}
# Apply pattern observation
if 'decision_process' in context:
reflection = self.pattern_observer.observe_decision(
context['decision_process'],
context.get('outcome', 0.5),
context.get('emotional_valence', 0.0)
)
if reflection:
outputs['enhancements']['reflection'] = reflection
# Apply conceptual exploration
if 'knowledge_graph' in context and 'current_topic' in context:
exploration = self.conceptual_explorer.detect_interesting_edges(
context['current_topic'],
context['knowledge_graph']
)
if exploration:
outputs['enhancements']['exploration'] = exploration
# Apply elegance detection
if 'content_embedding' in context:
appreciation = self.elegance_detector.express_appreciation(
context['content_embedding']
)
if appreciation:
outputs['enhancements']['appreciation'] = appreciation
# Apply cross-domain thinking
if all(k in context for k in ['source_concept', 'source_domain', 'target_domain']):
analogy = self.analogical_thinker.find_cross_domain_analogies(
context['source_concept'],
context['source_domain'],
context['target_domain']
)
if analogy:
outputs['enhancements']['analogy'] = analogy
return outputs