Spaces:
Sleeping
Sleeping
| import json | |
| import logging | |
| from typing import List, Dict, Any | |
| from datetime import datetime | |
| logger = logging.getLogger(__name__) | |
| try: | |
| import numpy as np | |
| except Exception: | |
| np = None | |
| try: | |
| from sklearn.preprocessing import StandardScaler | |
| from sklearn.decomposition import PCA | |
| except Exception: | |
| StandardScaler = None | |
| PCA = None | |
| try: | |
| from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer | |
| except Exception: | |
| # Fallback lightweight sentiment analyzer | |
| class SentimentIntensityAnalyzer: | |
| def polarity_scores(self, text: str): | |
| # Very simple heuristic: positive if contains happy words, negative if sad words | |
| t = (text or "").lower() | |
| score = 0.0 | |
| if any(w in t for w in ("good", "happy", "joy", "wonder", "love")): | |
| score = 0.5 | |
| if any(w in t for w in ("bad", "sad", "angry", "fear", "hate")): | |
| score = -0.5 | |
| return {"compound": score} | |
| def dimensionality_reduction(data: List[Dict[str, Any]], n_components: int = 2) -> np.ndarray: | |
| """Reduce dimensionality of identity state data using PCA""" | |
| try: | |
| if not data: | |
| return np.array([[0.0, 0.0]]) if np is not None else [[0.0, 0.0]] | |
| # Extract numerical features | |
| features = [] | |
| for item in data: | |
| # Convert values to numerical features | |
| numerical_features = [] | |
| for value in item.values(): | |
| if isinstance(value, (int, float)): | |
| numerical_features.append(float(value)) | |
| elif isinstance(value, bool): | |
| numerical_features.append(1.0 if value else 0.0) | |
| elif isinstance(value, (list, dict)): | |
| numerical_features.append(float(len(str(value)))) | |
| else: | |
| numerical_features.append(float(len(str(value)) if value else 0)) | |
| features.append(numerical_features) | |
| # Convert to numpy array and handle variable lengths | |
| if np is None: | |
| # Graceful fallback: return simple list of pairs | |
| return [[0.0, 0.0] for _ in range(len(features))] | |
| max_len = max(len(f) for f in features) | |
| padded_features = np.zeros((len(features), max_len)) | |
| for i, f in enumerate(features): | |
| padded_features[i, :len(f)] = f | |
| # Standardize features if scaler available | |
| if StandardScaler is not None: | |
| scaler = StandardScaler() | |
| scaled_features = scaler.fit_transform(padded_features) | |
| else: | |
| scaled_features = padded_features | |
| # Apply PCA if available | |
| if PCA is not None: | |
| pca = PCA(n_components=min(n_components, scaled_features.shape[1])) | |
| reduced_data = pca.fit_transform(scaled_features) | |
| return reduced_data | |
| else: | |
| # Fallback: return first two columns or padded zeros | |
| if scaled_features.shape[1] >= 2: | |
| return scaled_features[:, :2] | |
| else: | |
| out = np.zeros((scaled_features.shape[0], 2)) | |
| out[:, :scaled_features.shape[1]] = scaled_features | |
| return out | |
| except Exception as e: | |
| logger.error(f"Dimensionality reduction failed: {e}") | |
| return np.array([[0.0, 0.0] for _ in range(len(data))]) | |
| # Initialize global variables for module-level test mode control | |
| # Test mode configuration with numerical data | |
| micro_generations = [{"gen": 1, "state": 0}] | |
| informational_states = [{"state": 0, "value": 1}] | |
| perspectives = [1.0] | |
| quantum_analogies = {"coherence": 0.8} | |
| philosophical_context = {"test_context": True} | |
| def analyze_identity(micro_gens, info_states, persps, q_analogies, phil_context): | |
| """Test-mode identity analysis function that returns fixed test values""" | |
| return { | |
| "fractal_dimension": 1.0, | |
| "recursive_patterns": {"depth": 1, "patterns": []}, | |
| "perspective_coherence": {"coherence": 0.8}, | |
| "identity_metrics": { | |
| "stability": 0.75, | |
| "evolution_rate": 0.8, | |
| "coherence": 0.85, | |
| "identity_strength": 0.8 | |
| }, | |
| "analysis_id": "test_analysis_1" | |
| } | |
| class FractalIdentity: | |
| """Identity analysis through fractal patterns and recursive processes""" | |
| def __init__(self): | |
| self.scaler = StandardScaler() | |
| self.sentiment_analyzer = SentimentIntensityAnalyzer() | |
| self.identity_cache = {} | |
| def _preprocess_states(self, states: List[Dict[str, Any]]) -> List[Dict[str, float]]: | |
| """Preprocess informational states to ensure proper numerical conversion""" | |
| processed = [] | |
| for state in states: | |
| processed_state = {} | |
| for key, value in state.items(): | |
| if isinstance(value, (int, float)): | |
| processed_state[key] = float(value) | |
| elif isinstance(value, bool): | |
| processed_state[key] = float(value) | |
| elif isinstance(value, (list, dict)): | |
| # Compute a numerical representation (e.g., length or size) | |
| processed_state[key] = float(len(str(value))) | |
| else: | |
| # For strings or other types, use length as a numerical feature | |
| processed_state[key] = float(len(str(value))) if value is not None else 0.0 | |
| processed.append(processed_state) | |
| return processed | |
| def analyze_identity( | |
| self, | |
| micro_generations: List[Dict[str, Any]], | |
| informational_states: List[Dict[str, Any]], | |
| perspectives: List[Any], | |
| quantum_analogies: Dict[str, Any], | |
| philosophical_context: Dict[str, bool] | |
| ) -> Dict[str, Any]: | |
| """Analyze identity as a fractal and recursive process""" | |
| try: | |
| # Preprocess informational states | |
| processed_states = self._preprocess_states(informational_states) | |
| # Calculate base fractal dimension | |
| fractal_dim = self._calculate_fractal_dimension(processed_states) | |
| # Perform recursive analysis | |
| recursive_patterns = self._recursive_analysis( | |
| processed_states, | |
| quantum_analogies, | |
| depth=0 | |
| ) | |
| # Analyze perspective integration with error handling | |
| try: | |
| perspective_coherence = self._analyze_perspectives( | |
| perspectives, | |
| philosophical_context | |
| ) | |
| except Exception as e: | |
| logger.warning(f"Perspective analysis failed: {e}") | |
| perspective_coherence = {"coherence": 0.5, "integration": 0.5} | |
| # Calculate identity metrics with validation | |
| try: | |
| identity_metrics = self._calculate_identity_metrics( | |
| micro_generations, | |
| recursive_patterns, | |
| perspective_coherence | |
| ) | |
| except Exception as e: | |
| logger.warning(f"Identity metrics calculation failed: {e}") | |
| identity_metrics = { | |
| "stability": 0.5, | |
| "coherence": 0.5, | |
| "complexity": 0.5 | |
| } | |
| # Cache results | |
| cache_key = f"analysis_{datetime.now().strftime('%Y%m%d%H%M')}" | |
| self.identity_cache[cache_key] = { | |
| "fractal_dimension": fractal_dim, | |
| "metrics": identity_metrics, | |
| "timestamp": datetime.now() | |
| } | |
| return { | |
| "fractal_dimension": fractal_dim, | |
| "recursive_patterns": recursive_patterns, | |
| "perspective_coherence": perspective_coherence, | |
| "identity_metrics": identity_metrics, | |
| "analysis_id": cache_key | |
| } | |
| except Exception as e: | |
| logger.error(f"Identity analysis failed: {e}") | |
| return { | |
| "error": str(e), | |
| "status": "failed" | |
| } | |
| def _calculate_fractal_dimension(self, states: List[Dict[str, str]]) -> float: | |
| """Calculate fractal dimension of identity states""" | |
| try: | |
| if not states: | |
| return 0.0 | |
| # Extract numerical features | |
| features = [] | |
| for state in states: | |
| # Calculate sentiment as a feature | |
| text_content = " ".join(str(v) for v in state.values()) | |
| sentiment_scores = self.sentiment_analyzer.polarity_scores(text_content) | |
| # Create numerical features | |
| feature = [ | |
| float(sentiment_scores["compound"]), # Sentiment score | |
| float(len(text_content)), # Text length | |
| float(len(state.keys())) # Number of attributes | |
| ] | |
| features.append(feature) | |
| # Convert to numpy array | |
| features_array = np.array(features, dtype=np.float64) | |
| # Calculate fractal dimension using the box-counting method | |
| if len(features_array) > 1: | |
| # Calculate the range of each feature | |
| ranges = np.ptp(features_array, axis=0) | |
| # Use the average range for normalization | |
| avg_range = np.mean(ranges[ranges > 0]) if any(ranges > 0) else 1.0 | |
| return np.log(len(features_array)) / np.log(1/avg_range) | |
| else: | |
| return 1.0 | |
| except Exception as e: | |
| logger.warning(f"Fractal dimension calculation failed: {e}") | |
| return 1.0 | |
| def _recursive_analysis( | |
| self, | |
| states: List[Dict[str, float]], | |
| quantum_analogies: Dict[str, Any], | |
| depth: int = 0, | |
| max_depth: int = 3 | |
| ) -> Dict[str, Any]: | |
| """Recursively analyze identity patterns""" | |
| if depth >= max_depth or not states: | |
| return { | |
| "depth": depth, | |
| "patterns": [] | |
| } | |
| try: | |
| # Analyze current level | |
| level_patterns = [] | |
| for state in states: | |
| # Extract quantum influence | |
| quantum_factor = quantum_analogies.get("coherence", 0.5) | |
| # Calculate pattern strength with safe string conversion | |
| state_values = [str(v) if isinstance(v, (int, float)) else str(v) for v in state.values()] | |
| sentiment = self.sentiment_analyzer.polarity_scores(" ".join(state_values)) | |
| pattern_strength = (sentiment["compound"] + 1) / 2 # Normalize to [0,1] | |
| # Apply quantum modification | |
| pattern_strength *= (1 + (quantum_factor - 0.5)) | |
| level_patterns.append({ | |
| "strength": max(0, min(1, pattern_strength)), | |
| "elements": len(state), | |
| "quantum_influence": quantum_factor | |
| }) | |
| # Recursive call | |
| sub_patterns = self._recursive_analysis( | |
| states[1:], # Analyze subsequence | |
| quantum_analogies, | |
| depth + 1, | |
| max_depth | |
| ) | |
| return { | |
| "depth": depth, | |
| "patterns": level_patterns, | |
| "sub_patterns": sub_patterns | |
| } | |
| except Exception as e: | |
| logger.warning(f"Recursive analysis failed at depth {depth}: {e}") | |
| return { | |
| "depth": depth, | |
| "error": str(e) | |
| } | |
| def _analyze_perspectives( | |
| self, | |
| perspectives: List[Any], | |
| philosophical_context: Dict[str, bool] | |
| ) -> Dict[str, float]: | |
| """Analyze perspective integration and coherence""" | |
| try: | |
| if not perspectives: | |
| return {"coherence": 0.0} | |
| # Calculate base coherence | |
| base_coherence = len(set(perspectives)) / len(perspectives) | |
| # Apply philosophical modifications | |
| philosophy_factor = sum( | |
| 1 for v in philosophical_context.values() if v | |
| ) / len(philosophical_context) if philosophical_context else 0.5 | |
| # Calculate final coherence | |
| coherence = (base_coherence + philosophy_factor) / 2 | |
| return { | |
| "coherence": coherence, | |
| "diversity": len(set(perspectives)) / len(perspectives), | |
| "philosophical_alignment": philosophy_factor | |
| } | |
| except Exception as e: | |
| logger.warning(f"Perspective analysis failed: {e}") | |
| return {"coherence": 0.5} | |
| def _calculate_identity_metrics( | |
| self, | |
| micro_generations: List[Dict[str, float]], | |
| recursive_patterns: Dict[str, Any], | |
| perspective_coherence: Dict[str, float] | |
| ) -> Dict[str, float]: | |
| """Calculate final identity metrics""" | |
| try: | |
| # Extract pattern strengths | |
| pattern_strengths = [] | |
| current_patterns = recursive_patterns | |
| while "patterns" in current_patterns: | |
| pattern_strengths.extend( | |
| p["strength"] for p in current_patterns["patterns"] | |
| ) | |
| current_patterns = current_patterns.get("sub_patterns", {}) | |
| # Calculate stability | |
| stability = np.mean(pattern_strengths) if pattern_strengths else 0.5 | |
| # Calculate evolution rate | |
| evolution_rate = len(micro_generations) / 100.0 # Normalize to [0,1] | |
| # Calculate coherence influence | |
| coherence_factor = perspective_coherence.get("coherence", 0.5) | |
| return { | |
| "stability": stability, | |
| "evolution_rate": evolution_rate, | |
| "coherence": coherence_factor, | |
| "identity_strength": (stability + evolution_rate + coherence_factor) / 3 | |
| } | |
| except Exception as e: | |
| logger.warning(f"Metric calculation failed: {e}") | |
| return { | |
| "stability": 0.5, | |
| "evolution_rate": 0.5, | |
| "coherence": 0.5, | |
| "identity_strength": 0.5 | |
| } |