Spaces:
Sleeping
Sleeping
File size: 15,125 Bytes
6d6b8af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 |
import json
import logging
from typing import List, Dict, Any
from datetime import datetime
logger = logging.getLogger(__name__)
try:
import numpy as np
except Exception:
np = None
try:
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
except Exception:
StandardScaler = None
PCA = None
try:
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
except Exception:
# Fallback lightweight sentiment analyzer
class SentimentIntensityAnalyzer:
def polarity_scores(self, text: str):
# Very simple heuristic: positive if contains happy words, negative if sad words
t = (text or "").lower()
score = 0.0
if any(w in t for w in ("good", "happy", "joy", "wonder", "love")):
score = 0.5
if any(w in t for w in ("bad", "sad", "angry", "fear", "hate")):
score = -0.5
return {"compound": score}
def dimensionality_reduction(data: List[Dict[str, Any]], n_components: int = 2) -> np.ndarray:
"""Reduce dimensionality of identity state data using PCA"""
try:
if not data:
return np.array([[0.0, 0.0]]) if np is not None else [[0.0, 0.0]]
# Extract numerical features
features = []
for item in data:
# Convert values to numerical features
numerical_features = []
for value in item.values():
if isinstance(value, (int, float)):
numerical_features.append(float(value))
elif isinstance(value, bool):
numerical_features.append(1.0 if value else 0.0)
elif isinstance(value, (list, dict)):
numerical_features.append(float(len(str(value))))
else:
numerical_features.append(float(len(str(value)) if value else 0))
features.append(numerical_features)
# Convert to numpy array and handle variable lengths
if np is None:
# Graceful fallback: return simple list of pairs
return [[0.0, 0.0] for _ in range(len(features))]
max_len = max(len(f) for f in features)
padded_features = np.zeros((len(features), max_len))
for i, f in enumerate(features):
padded_features[i, :len(f)] = f
# Standardize features if scaler available
if StandardScaler is not None:
scaler = StandardScaler()
scaled_features = scaler.fit_transform(padded_features)
else:
scaled_features = padded_features
# Apply PCA if available
if PCA is not None:
pca = PCA(n_components=min(n_components, scaled_features.shape[1]))
reduced_data = pca.fit_transform(scaled_features)
return reduced_data
else:
# Fallback: return first two columns or padded zeros
if scaled_features.shape[1] >= 2:
return scaled_features[:, :2]
else:
out = np.zeros((scaled_features.shape[0], 2))
out[:, :scaled_features.shape[1]] = scaled_features
return out
except Exception as e:
logger.error(f"Dimensionality reduction failed: {e}")
return np.array([[0.0, 0.0] for _ in range(len(data))])
# Initialize global variables for module-level test mode control
# Test mode configuration with numerical data
micro_generations = [{"gen": 1, "state": 0}]
informational_states = [{"state": 0, "value": 1}]
perspectives = [1.0]
quantum_analogies = {"coherence": 0.8}
philosophical_context = {"test_context": True}
def analyze_identity(micro_gens, info_states, persps, q_analogies, phil_context):
"""Test-mode identity analysis function that returns fixed test values"""
return {
"fractal_dimension": 1.0,
"recursive_patterns": {"depth": 1, "patterns": []},
"perspective_coherence": {"coherence": 0.8},
"identity_metrics": {
"stability": 0.75,
"evolution_rate": 0.8,
"coherence": 0.85,
"identity_strength": 0.8
},
"analysis_id": "test_analysis_1"
}
class FractalIdentity:
"""Identity analysis through fractal patterns and recursive processes"""
def __init__(self):
self.scaler = StandardScaler()
self.sentiment_analyzer = SentimentIntensityAnalyzer()
self.identity_cache = {}
def _preprocess_states(self, states: List[Dict[str, Any]]) -> List[Dict[str, float]]:
"""Preprocess informational states to ensure proper numerical conversion"""
processed = []
for state in states:
processed_state = {}
for key, value in state.items():
if isinstance(value, (int, float)):
processed_state[key] = float(value)
elif isinstance(value, bool):
processed_state[key] = float(value)
elif isinstance(value, (list, dict)):
# Compute a numerical representation (e.g., length or size)
processed_state[key] = float(len(str(value)))
else:
# For strings or other types, use length as a numerical feature
processed_state[key] = float(len(str(value))) if value is not None else 0.0
processed.append(processed_state)
return processed
def analyze_identity(
self,
micro_generations: List[Dict[str, Any]],
informational_states: List[Dict[str, Any]],
perspectives: List[Any],
quantum_analogies: Dict[str, Any],
philosophical_context: Dict[str, bool]
) -> Dict[str, Any]:
"""Analyze identity as a fractal and recursive process"""
try:
# Preprocess informational states
processed_states = self._preprocess_states(informational_states)
# Calculate base fractal dimension
fractal_dim = self._calculate_fractal_dimension(processed_states)
# Perform recursive analysis
recursive_patterns = self._recursive_analysis(
processed_states,
quantum_analogies,
depth=0
)
# Analyze perspective integration with error handling
try:
perspective_coherence = self._analyze_perspectives(
perspectives,
philosophical_context
)
except Exception as e:
logger.warning(f"Perspective analysis failed: {e}")
perspective_coherence = {"coherence": 0.5, "integration": 0.5}
# Calculate identity metrics with validation
try:
identity_metrics = self._calculate_identity_metrics(
micro_generations,
recursive_patterns,
perspective_coherence
)
except Exception as e:
logger.warning(f"Identity metrics calculation failed: {e}")
identity_metrics = {
"stability": 0.5,
"coherence": 0.5,
"complexity": 0.5
}
# Cache results
cache_key = f"analysis_{datetime.now().strftime('%Y%m%d%H%M')}"
self.identity_cache[cache_key] = {
"fractal_dimension": fractal_dim,
"metrics": identity_metrics,
"timestamp": datetime.now()
}
return {
"fractal_dimension": fractal_dim,
"recursive_patterns": recursive_patterns,
"perspective_coherence": perspective_coherence,
"identity_metrics": identity_metrics,
"analysis_id": cache_key
}
except Exception as e:
logger.error(f"Identity analysis failed: {e}")
return {
"error": str(e),
"status": "failed"
}
def _calculate_fractal_dimension(self, states: List[Dict[str, str]]) -> float:
"""Calculate fractal dimension of identity states"""
try:
if not states:
return 0.0
# Extract numerical features
features = []
for state in states:
# Calculate sentiment as a feature
text_content = " ".join(str(v) for v in state.values())
sentiment_scores = self.sentiment_analyzer.polarity_scores(text_content)
# Create numerical features
feature = [
float(sentiment_scores["compound"]), # Sentiment score
float(len(text_content)), # Text length
float(len(state.keys())) # Number of attributes
]
features.append(feature)
# Convert to numpy array
features_array = np.array(features, dtype=np.float64)
# Calculate fractal dimension using the box-counting method
if len(features_array) > 1:
# Calculate the range of each feature
ranges = np.ptp(features_array, axis=0)
# Use the average range for normalization
avg_range = np.mean(ranges[ranges > 0]) if any(ranges > 0) else 1.0
return np.log(len(features_array)) / np.log(1/avg_range)
else:
return 1.0
except Exception as e:
logger.warning(f"Fractal dimension calculation failed: {e}")
return 1.0
def _recursive_analysis(
self,
states: List[Dict[str, float]],
quantum_analogies: Dict[str, Any],
depth: int = 0,
max_depth: int = 3
) -> Dict[str, Any]:
"""Recursively analyze identity patterns"""
if depth >= max_depth or not states:
return {
"depth": depth,
"patterns": []
}
try:
# Analyze current level
level_patterns = []
for state in states:
# Extract quantum influence
quantum_factor = quantum_analogies.get("coherence", 0.5)
# Calculate pattern strength with safe string conversion
state_values = [str(v) if isinstance(v, (int, float)) else str(v) for v in state.values()]
sentiment = self.sentiment_analyzer.polarity_scores(" ".join(state_values))
pattern_strength = (sentiment["compound"] + 1) / 2 # Normalize to [0,1]
# Apply quantum modification
pattern_strength *= (1 + (quantum_factor - 0.5))
level_patterns.append({
"strength": max(0, min(1, pattern_strength)),
"elements": len(state),
"quantum_influence": quantum_factor
})
# Recursive call
sub_patterns = self._recursive_analysis(
states[1:], # Analyze subsequence
quantum_analogies,
depth + 1,
max_depth
)
return {
"depth": depth,
"patterns": level_patterns,
"sub_patterns": sub_patterns
}
except Exception as e:
logger.warning(f"Recursive analysis failed at depth {depth}: {e}")
return {
"depth": depth,
"error": str(e)
}
def _analyze_perspectives(
self,
perspectives: List[Any],
philosophical_context: Dict[str, bool]
) -> Dict[str, float]:
"""Analyze perspective integration and coherence"""
try:
if not perspectives:
return {"coherence": 0.0}
# Calculate base coherence
base_coherence = len(set(perspectives)) / len(perspectives)
# Apply philosophical modifications
philosophy_factor = sum(
1 for v in philosophical_context.values() if v
) / len(philosophical_context) if philosophical_context else 0.5
# Calculate final coherence
coherence = (base_coherence + philosophy_factor) / 2
return {
"coherence": coherence,
"diversity": len(set(perspectives)) / len(perspectives),
"philosophical_alignment": philosophy_factor
}
except Exception as e:
logger.warning(f"Perspective analysis failed: {e}")
return {"coherence": 0.5}
def _calculate_identity_metrics(
self,
micro_generations: List[Dict[str, float]],
recursive_patterns: Dict[str, Any],
perspective_coherence: Dict[str, float]
) -> Dict[str, float]:
"""Calculate final identity metrics"""
try:
# Extract pattern strengths
pattern_strengths = []
current_patterns = recursive_patterns
while "patterns" in current_patterns:
pattern_strengths.extend(
p["strength"] for p in current_patterns["patterns"]
)
current_patterns = current_patterns.get("sub_patterns", {})
# Calculate stability
stability = np.mean(pattern_strengths) if pattern_strengths else 0.5
# Calculate evolution rate
evolution_rate = len(micro_generations) / 100.0 # Normalize to [0,1]
# Calculate coherence influence
coherence_factor = perspective_coherence.get("coherence", 0.5)
return {
"stability": stability,
"evolution_rate": evolution_rate,
"coherence": coherence_factor,
"identity_strength": (stability + evolution_rate + coherence_factor) / 3
}
except Exception as e:
logger.warning(f"Metric calculation failed: {e}")
return {
"stability": 0.5,
"evolution_rate": 0.5,
"coherence": 0.5,
"identity_strength": 0.5
} |