|
|
|
|
|
""" |
|
|
INSTITUTIONAL PROPENSITY PACKAGE - lm_quant_veritas v7.0 |
|
|
---------------------------------------------------------------- |
|
|
Analyzing and predicting institutional behavior patterns. |
|
|
Quantum-secured propensity modeling with temporal forecasting. |
|
|
""" |
|
|
|
|
|
import numpy as np |
|
|
from dataclasses import dataclass, field |
|
|
from datetime import datetime, timedelta |
|
|
from typing import Dict, Any, List, Optional, Tuple |
|
|
import hashlib |
|
|
import asyncio |
|
|
from enum import Enum |
|
|
import secrets |
|
|
from cryptography.fernet import Fernet |
|
|
import logging |
|
|
from collections import defaultdict, deque |
|
|
import json |
|
|
import statistics |
|
|
from scipy import stats |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class PropensityType(Enum): |
|
|
"""Types of institutional propensities""" |
|
|
BUREAUCRATIC_INERTIA = "bureaucratic_inertia" |
|
|
RISK_AVERSION = "risk_aversion" |
|
|
POWER_CONSOLIDATION = "power_consolidation" |
|
|
INNOVATION_RESISTANCE = "innovation_resistance" |
|
|
SELF_PRESERVATION = "self_preservation" |
|
|
MISSION_DRIFT = "mission_drift" |
|
|
GROUPTHINK = "groupthink" |
|
|
REGULATORY_CAPTURE = "regulatory_capture" |
|
|
|
|
|
class SecurityLevel(Enum): |
|
|
STANDARD = "standard" |
|
|
QUANTUM_RESISTANT = "quantum_resistant" |
|
|
TEMPORAL_SECURE = "temporal_secure" |
|
|
|
|
|
@dataclass |
|
|
class InstitutionalVector: |
|
|
"""Quantum-secured institutional propensity vector""" |
|
|
institution_hash: str |
|
|
propensity_scores: Dict[PropensityType, float] |
|
|
behavioral_patterns: Dict[str, List[float]] |
|
|
temporal_trajectory: List[Tuple[datetime, float]] |
|
|
security_signature: str |
|
|
forecast_horizon: Dict[str, float] = field(default_factory=dict) |
|
|
risk_factors: List[str] = field(default_factory=list) |
|
|
|
|
|
def __post_init__(self): |
|
|
"""Validate quantum security and calculate composite propensity""" |
|
|
if not self._validate_security(): |
|
|
raise SecurityError("Institutional vector security validation failed") |
|
|
|
|
|
self.composite_propensity = self._calculate_composite_score() |
|
|
self.volatility_metric = self._calculate_volatility() |
|
|
|
|
|
def _validate_security(self) -> bool: |
|
|
"""Validate quantum security signature""" |
|
|
validation_string = f"{self.institution_hash}{json.dumps(self.propensity_scores, sort_keys=True)}" |
|
|
expected_hash = hashlib.sha3_512(validation_string.encode()).hexdigest() |
|
|
return secrets.compare_digest(expected_hash[:64], self.security_signature[:64]) |
|
|
|
|
|
def _calculate_composite_score(self) -> float: |
|
|
"""Calculate overall institutional propensity score""" |
|
|
scores = list(self.propensity_scores.values()) |
|
|
return float(np.mean(scores)) |
|
|
|
|
|
def _calculate_volatility(self) -> float: |
|
|
"""Calculate behavioral volatility across time""" |
|
|
if len(self.temporal_trajectory) < 2: |
|
|
return 0.0 |
|
|
|
|
|
scores = [score for _, score in self.temporal_trajectory] |
|
|
return float(np.std(scores)) |
|
|
|
|
|
class PropensityEngine: |
|
|
""" |
|
|
INSTITUTIONAL PROPENSITY ENGINE |
|
|
Analyzes and forecasts institutional behavior patterns |
|
|
with quantum security and temporal coherence |
|
|
""" |
|
|
|
|
|
def __init__(self, security_level: SecurityLevel = SecurityLevel.QUANTUM_RESISTANT): |
|
|
self.security_level = security_level |
|
|
self.encryption_key = self._generate_quantum_key() |
|
|
self.institutional_vectors: Dict[str, InstitutionalVector] = {} |
|
|
self.propensity_models = self._initialize_models() |
|
|
self.behavioral_cache = {} |
|
|
self.temporal_window = 365 |
|
|
|
|
|
|
|
|
self.temporal_signature = hashlib.sha3_256(datetime.now().isoformat().encode()).hexdigest() |
|
|
|
|
|
def _generate_quantum_key(self) -> bytes: |
|
|
"""Generate quantum-resistant encryption key""" |
|
|
if self.security_level == SecurityLevel.QUANTUM_RESISTANT: |
|
|
return secrets.token_bytes(32) |
|
|
elif self.security_level == SecurityLevel.TEMPORAL_SECURE: |
|
|
return secrets.token_bytes(64) |
|
|
else: |
|
|
return secrets.token_bytes(16) |
|
|
|
|
|
def _initialize_models(self) -> Dict[str, Any]: |
|
|
"""Initialize propensity prediction models""" |
|
|
return { |
|
|
'bureaucratic_inertia': { |
|
|
'indicators': ['decision_latency', 'procedure_complexity', 'hierarchy_depth'], |
|
|
'weight': 0.8, |
|
|
'decay_rate': 0.1 |
|
|
}, |
|
|
'risk_aversion': { |
|
|
'indicators': ['failure_consequences', 'innovation_penalties', 'success_rewards'], |
|
|
'weight': 0.9, |
|
|
'decay_rate': 0.05 |
|
|
}, |
|
|
'power_consolidation': { |
|
|
'indicators': ['centralization_trends', 'authority_concentration', 'autonomy_reduction'], |
|
|
'weight': 0.7, |
|
|
'decay_rate': 0.15 |
|
|
}, |
|
|
'innovation_resistance': { |
|
|
'indicators': ['change_rejection_rate', 'tradition_weight', 'new_method_adoption'], |
|
|
'weight': 0.6, |
|
|
'decay_rate': 0.2 |
|
|
} |
|
|
} |
|
|
|
|
|
async def analyze_institutional_behavior(self, |
|
|
institution_data: Dict[str, Any], |
|
|
historical_context: List[Dict] = None) -> Dict[str, Any]: |
|
|
""" |
|
|
Analyze institutional propensity with quantum-secured forecasting |
|
|
""" |
|
|
|
|
|
try: |
|
|
|
|
|
if not await self._validate_data_security(institution_data): |
|
|
raise SecurityError("Institutional data security validation failed") |
|
|
|
|
|
|
|
|
pattern_analysis = await self._extract_behavioral_patterns(institution_data, historical_context) |
|
|
|
|
|
|
|
|
propensity_scores = await self._calculate_propensity_scores(pattern_analysis) |
|
|
|
|
|
|
|
|
trajectory_analysis = await self._analyze_temporal_trajectory(pattern_analysis, propensity_scores) |
|
|
|
|
|
|
|
|
risk_analysis = await self._identify_risk_factors(propensity_scores, pattern_analysis) |
|
|
|
|
|
|
|
|
forecast_analysis = await self._generate_behavioral_forecast(propensity_scores, trajectory_analysis) |
|
|
|
|
|
|
|
|
institution_vector = await self._create_institutional_vector( |
|
|
institution_data, propensity_scores, pattern_analysis, |
|
|
trajectory_analysis, forecast_analysis, risk_analysis |
|
|
) |
|
|
|
|
|
return { |
|
|
"success": True, |
|
|
"institutional_vector": institution_vector, |
|
|
"composite_propensity": institution_vector.composite_propensity, |
|
|
"volatility_metric": institution_vector.volatility_metric, |
|
|
"primary_risk_factors": risk_analysis["primary_risks"], |
|
|
"forecast_confidence": forecast_analysis["confidence"], |
|
|
"security_validated": True, |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Institutional analysis failed: {e}") |
|
|
return await self._handle_analysis_failure(institution_data, e) |
|
|
|
|
|
async def _extract_behavioral_patterns(self, |
|
|
institution_data: Dict[str, Any], |
|
|
historical_context: List[Dict]) -> Dict[str, Any]: |
|
|
"""Extract behavioral patterns from institutional data""" |
|
|
|
|
|
patterns = { |
|
|
"decision_making": await self._analyze_decision_patterns(institution_data), |
|
|
"resource_allocation": await self._analyze_resource_patterns(institution_data), |
|
|
"risk_behavior": await self._analyze_risk_patterns(institution_data), |
|
|
"innovation_trends": await self._analyze_innovation_patterns(institution_data), |
|
|
"power_dynamics": await self._analyze_power_patterns(institution_data) |
|
|
} |
|
|
|
|
|
|
|
|
if historical_context: |
|
|
patterns["historical_trends"] = await self._analyze_historical_trends(historical_context) |
|
|
patterns["temporal_consistency"] = await self._assess_temporal_consistency(patterns, historical_context) |
|
|
|
|
|
|
|
|
patterns["stability_metrics"] = await self._calculate_pattern_stability(patterns) |
|
|
|
|
|
return patterns |
|
|
|
|
|
async def _calculate_propensity_scores(self, pattern_analysis: Dict[str, Any]) -> Dict[PropensityType, float]: |
|
|
"""Calculate propensity scores based on behavioral patterns""" |
|
|
|
|
|
scores = {} |
|
|
|
|
|
for propensity_type, model in self.propensity_models.items(): |
|
|
score = await self._calculate_specific_propensity(propensity_type, pattern_analysis, model) |
|
|
scores[PropensityType(propensity_type)] = score |
|
|
|
|
|
return scores |
|
|
|
|
|
async def _calculate_specific_propensity(self, |
|
|
propensity_type: str, |
|
|
pattern_analysis: Dict[str, Any], |
|
|
model: Dict[str, Any]) -> float: |
|
|
"""Calculate specific propensity score""" |
|
|
|
|
|
indicators = model['indicators'] |
|
|
weight = model['weight'] |
|
|
|
|
|
indicator_scores = [] |
|
|
for indicator in indicators: |
|
|
score = await self._extract_indicator_score(indicator, pattern_analysis) |
|
|
indicator_scores.append(score) |
|
|
|
|
|
|
|
|
base_score = np.mean(indicator_scores) if indicator_scores else 0.5 |
|
|
|
|
|
|
|
|
adjusted_score = base_score * weight |
|
|
|
|
|
return min(1.0, max(0.0, adjusted_score)) |
|
|
|
|
|
async def _analyze_temporal_trajectory(self, |
|
|
pattern_analysis: Dict[str, Any], |
|
|
propensity_scores: Dict[PropensityType, float]) -> Dict[str, Any]: |
|
|
"""Analyze temporal trajectory of institutional behavior""" |
|
|
|
|
|
trajectory_data = [] |
|
|
current_time = datetime.now() |
|
|
|
|
|
|
|
|
|
|
|
for days_ago in range(0, self.temporal_window, 30): |
|
|
point_time = current_time - timedelta(days=days_ago) |
|
|
|
|
|
|
|
|
stability = pattern_analysis.get("stability_metrics", {}).get("overall_stability", 0.7) |
|
|
noise = (1 - stability) * np.random.normal(0, 0.1) |
|
|
|
|
|
composite_score = np.mean(list(propensity_scores.values())) + noise |
|
|
composite_score = max(0.0, min(1.0, composite_score)) |
|
|
|
|
|
trajectory_data.append((point_time, composite_score)) |
|
|
|
|
|
|
|
|
scores = [score for _, score in trajectory_data] |
|
|
|
|
|
return { |
|
|
"trajectory_points": trajectory_data, |
|
|
"trend_direction": await self._calculate_trend_direction(scores), |
|
|
"volatility": np.std(scores) if len(scores) > 1 else 0.0, |
|
|
"acceleration": await self._calculate_trend_acceleration(scores) |
|
|
} |
|
|
|
|
|
async def _identify_risk_factors(self, |
|
|
propensity_scores: Dict[PropensityType, float], |
|
|
pattern_analysis: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Identify institutional risk factors""" |
|
|
|
|
|
risk_factors = [] |
|
|
|
|
|
|
|
|
if propensity_scores.get(PropensityType.BUREAUCRATIC_INERTIA, 0) > 0.8: |
|
|
risk_factors.append("high_bureaucratic_inertia") |
|
|
|
|
|
|
|
|
if propensity_scores.get(PropensityType.RISK_AVERSION, 0) > 0.9: |
|
|
risk_factors.append("extreme_risk_aversion") |
|
|
|
|
|
|
|
|
if propensity_scores.get(PropensityType.POWER_CONSOLIDATION, 0) > 0.7: |
|
|
risk_factors.append("power_centralization") |
|
|
|
|
|
|
|
|
if propensity_scores.get(PropensityType.INNOVATION_RESISTANCE, 0) > 0.8: |
|
|
risk_factors.append("innovation_stagnation") |
|
|
|
|
|
|
|
|
stability = pattern_analysis.get("stability_metrics", {}).get("overall_stability", 1.0) |
|
|
if stability < 0.5: |
|
|
risk_factors.append("behavioral_instability") |
|
|
|
|
|
return { |
|
|
"primary_risks": risk_factors, |
|
|
"risk_severity": len(risk_factors), |
|
|
"mitigation_priority": await self._prioritize_risks(risk_factors, propensity_scores) |
|
|
} |
|
|
|
|
|
async def _generate_behavioral_forecast(self, |
|
|
propensity_scores: Dict[PropensityType, float], |
|
|
trajectory_analysis: Dict[str, Any]) -> Dict[str, Any]: |
|
|
"""Generate behavioral forecasts with confidence intervals""" |
|
|
|
|
|
base_propensity = np.mean(list(propensity_scores.values())) |
|
|
trend = trajectory_analysis["trend_direction"] |
|
|
volatility = trajectory_analysis["volatility"] |
|
|
|
|
|
|
|
|
forecast_horizons = { |
|
|
"30_days": self._forecast_point(base_propensity, trend, volatility, 30), |
|
|
"90_days": self._forecast_point(base_propensity, trend, volatility, 90), |
|
|
"365_days": self._forecast_point(base_propensity, trend, volatility, 365) |
|
|
} |
|
|
|
|
|
confidence = max(0.0, 1.0 - volatility * 2) |
|
|
|
|
|
return { |
|
|
"forecast_values": forecast_horizons, |
|
|
"confidence": confidence, |
|
|
"trend_persistence": await self._assess_trend_persistence(trajectory_analysis), |
|
|
"forecast_volatility": volatility * 1.5 |
|
|
} |
|
|
|
|
|
async def _create_institutional_vector(self, |
|
|
institution_data: Dict[str, Any], |
|
|
propensity_scores: Dict[PropensityType, float], |
|
|
pattern_analysis: Dict[str, Any], |
|
|
trajectory_analysis: Dict[str, Any], |
|
|
forecast_analysis: Dict[str, Any], |
|
|
risk_analysis: Dict[str, Any]) -> InstitutionalVector: |
|
|
"""Create quantum-secured institutional vector""" |
|
|
|
|
|
institution_hash = hashlib.sha3_256( |
|
|
json.dumps(institution_data, sort_keys=True).encode() |
|
|
).hexdigest() |
|
|
|
|
|
|
|
|
behavioral_patterns = { |
|
|
"decision_latency": pattern_analysis["decision_making"].get("average_latency", 0.5), |
|
|
"risk_tolerance": pattern_analysis["risk_behavior"].get("tolerance_level", 0.5), |
|
|
"innovation_rate": pattern_analysis["innovation_trends"].get("adoption_rate", 0.5), |
|
|
"centralization_index": pattern_analysis["power_dynamics"].get("centralization", 0.5) |
|
|
} |
|
|
|
|
|
|
|
|
security_base = f"{institution_hash}{json.dumps(propensity_scores, sort_keys=True)}" |
|
|
security_signature = hashlib.sha3_512(security_base.encode()).hexdigest() |
|
|
|
|
|
vector = InstitutionalVector( |
|
|
institution_hash=institution_hash, |
|
|
propensity_scores=propensity_scores, |
|
|
behavioral_patterns=behavioral_patterns, |
|
|
temporal_trajectory=trajectory_analysis["trajectory_points"], |
|
|
security_signature=security_signature, |
|
|
forecast_horizon=forecast_analysis["forecast_values"], |
|
|
risk_factors=risk_analysis["primary_risks"] |
|
|
) |
|
|
|
|
|
|
|
|
self.institutional_vectors[institution_hash] = vector |
|
|
|
|
|
return vector |
|
|
|
|
|
|
|
|
async def _validate_data_security(self, data: Dict[str, Any]) -> bool: |
|
|
"""Validate institutional data security""" |
|
|
required_fields = ['institution_id', 'behavioral_metrics'] |
|
|
return all(field in data for field in required_fields) |
|
|
|
|
|
async def _analyze_decision_patterns(self, data: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"average_latency": data.get('decision_latency', 0.5), |
|
|
"consensus_requirement": data.get('consensus_level', 0.7), |
|
|
"hierarchy_influence": data.get('hierarchy_weight', 0.6) |
|
|
} |
|
|
|
|
|
async def _analyze_resource_patterns(self, data: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"efficiency": data.get('resource_efficiency', 0.5), |
|
|
"allocation_fairness": data.get('allocation_fairness', 0.5), |
|
|
"budget_flexibility": data.get('budget_flexibility', 0.5) |
|
|
} |
|
|
|
|
|
async def _analyze_risk_patterns(self, data: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"tolerance_level": data.get('risk_tolerance', 0.3), |
|
|
"assessment_rigor": data.get('risk_assessment', 0.7), |
|
|
"mitigation_investment": data.get('risk_mitigation', 0.5) |
|
|
} |
|
|
|
|
|
async def _analyze_innovation_patterns(self, data: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"adoption_rate": data.get('innovation_adoption', 0.4), |
|
|
"experimentation_budget": data.get('experimentation_funding', 0.3), |
|
|
"failure_tolerance": data.get('failure_tolerance', 0.4) |
|
|
} |
|
|
|
|
|
async def _analyze_power_patterns(self, data: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"centralization": data.get('power_centralization', 0.6), |
|
|
"autonomy_level": data.get('unit_autonomy', 0.4), |
|
|
"decision_delegation": data.get('decision_delegation', 0.5) |
|
|
} |
|
|
|
|
|
async def _analyze_historical_trends(self, historical_data: List[Dict]) -> Dict[str, Any]: |
|
|
return {"trend_analysis": "simplified"} |
|
|
|
|
|
async def _assess_temporal_consistency(self, patterns: Dict[str, Any], historical: List[Dict]) -> float: |
|
|
return 0.8 |
|
|
|
|
|
async def _calculate_pattern_stability(self, patterns: Dict[str, Any]) -> Dict[str, float]: |
|
|
return { |
|
|
"decision_stability": 0.7, |
|
|
"resource_stability": 0.8, |
|
|
"risk_stability": 0.6, |
|
|
"innovation_stability": 0.5, |
|
|
"overall_stability": 0.65 |
|
|
} |
|
|
|
|
|
async def _extract_indicator_score(self, indicator: str, patterns: Dict[str, Any]) -> float: |
|
|
"""Extract score for specific indicator from patterns""" |
|
|
|
|
|
indicator_map = { |
|
|
'decision_latency': ('decision_making', 'average_latency'), |
|
|
'procedure_complexity': ('decision_making', 'consensus_requirement'), |
|
|
'hierarchy_depth': ('decision_making', 'hierarchy_influence'), |
|
|
'failure_consequences': ('risk_behavior', 'tolerance_level'), |
|
|
'innovation_penalties': ('innovation_trends', 'failure_tolerance'), |
|
|
'success_rewards': ('innovation_trends', 'adoption_rate'), |
|
|
'centralization_trends': ('power_dynamics', 'centralization'), |
|
|
'authority_concentration': ('power_dynamics', 'centralization'), |
|
|
'autonomy_reduction': ('power_dynamics', 'autonomy_level'), |
|
|
'change_rejection_rate': ('innovation_trends', 'adoption_rate'), |
|
|
'tradition_weight': ('decision_making', 'consensus_requirement'), |
|
|
'new_method_adoption': ('innovation_trends', 'adoption_rate') |
|
|
} |
|
|
|
|
|
if indicator in indicator_map: |
|
|
category, metric = indicator_map[indicator] |
|
|
return patterns.get(category, {}).get(metric, 0.5) |
|
|
|
|
|
return 0.5 |
|
|
|
|
|
async def _calculate_trend_direction(self, scores: List[float]) -> float: |
|
|
"""Calculate trend direction (-1 to 1)""" |
|
|
if len(scores) < 2: |
|
|
return 0.0 |
|
|
|
|
|
x = list(range(len(scores))) |
|
|
slope, _, _, _, _ = stats.linregress(x, scores) |
|
|
return float(slope * 10) |
|
|
|
|
|
async def _calculate_trend_acceleration(self, scores: List[float]) -> float: |
|
|
"""Calculate trend acceleration""" |
|
|
if len(scores) < 3: |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
first_deriv = np.diff(scores) |
|
|
second_deriv = np.diff(first_deriv) |
|
|
return float(np.mean(second_deriv)) if len(second_deriv) > 0 else 0.0 |
|
|
|
|
|
async def _prioritize_risks(self, risks: List[str], scores: Dict[PropensityType, float]) -> List[str]: |
|
|
"""Prioritize risks based on propensity scores""" |
|
|
risk_weights = { |
|
|
"high_bureaucratic_inertia": scores.get(PropensityType.BUREAUCRATIC_INERTIA, 0), |
|
|
"extreme_risk_aversion": scores.get(PropensityType.RISK_AVERSION, 0), |
|
|
"power_centralization": scores.get(PropensityType.POWER_CONSOLIDATION, 0), |
|
|
"innovation_stagnation": scores.get(PropensityType.INNOVATION_RESISTANCE, 0), |
|
|
"behavioral_instability": 0.5 |
|
|
} |
|
|
|
|
|
prioritized = sorted(risks, key=lambda r: risk_weights.get(r, 0), reverse=True) |
|
|
return prioritized |
|
|
|
|
|
def _forecast_point(self, base: float, trend: float, volatility: float, days: int) -> float: |
|
|
"""Forecast propensity at specific horizon""" |
|
|
trend_effect = trend * (days / 30) |
|
|
noise = volatility * np.random.normal(0, 0.5) |
|
|
|
|
|
forecast = base + trend_effect + noise |
|
|
return max(0.0, min(1.0, forecast)) |
|
|
|
|
|
async def _assess_trend_persistence(self, trajectory_analysis: Dict[str, Any]) -> float: |
|
|
"""Assess likelihood of trend persistence""" |
|
|
volatility = trajectory_analysis["volatility"] |
|
|
acceleration = trajectory_analysis["acceleration"] |
|
|
|
|
|
|
|
|
persistence = 1.0 - (volatility * 0.5) - (max(0, -acceleration) * 2) |
|
|
return max(0.0, min(1.0, persistence)) |
|
|
|
|
|
async def _handle_analysis_failure(self, data: Dict[str, Any], error: Exception) -> Dict[str, Any]: |
|
|
"""Handle analysis failures gracefully""" |
|
|
return { |
|
|
"success": False, |
|
|
"error": str(error), |
|
|
"institution_id": data.get('institution_id', 'unknown'), |
|
|
"fallback_metrics": {"status": "analysis_failed"}, |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
|
|
|
class SecurityError(Exception): |
|
|
"""Data security validation failed""" |
|
|
pass |
|
|
|
|
|
class AnalysisError(Exception): |
|
|
"""Institutional analysis failed""" |
|
|
pass |
|
|
|
|
|
|
|
|
async def demonstrate_propensity_analysis(): |
|
|
"""Demonstrate institutional propensity analysis""" |
|
|
|
|
|
engine = PropensityEngine(SecurityLevel.QUANTUM_RESISTANT) |
|
|
|
|
|
|
|
|
government_agency = { |
|
|
"institution_id": "federal_agency_001", |
|
|
"behavioral_metrics": { |
|
|
"decision_latency": 0.8, |
|
|
"consensus_level": 0.9, |
|
|
"hierarchy_weight": 0.7, |
|
|
"risk_tolerance": 0.2, |
|
|
"risk_assessment": 0.8, |
|
|
"innovation_adoption": 0.3, |
|
|
"power_centralization": 0.6, |
|
|
"unit_autonomy": 0.3 |
|
|
}, |
|
|
"institution_type": "government_agency", |
|
|
"size_category": "large" |
|
|
} |
|
|
|
|
|
result = await engine.analyze_institutional_behavior(government_agency) |
|
|
|
|
|
print("๐๏ธ INSTITUTIONAL PROPENSITY ANALYSIS") |
|
|
print(f"๐ Composite Propensity: {result['composite_propensity']:.3f}") |
|
|
print(f"๐ฏ Volatility: {result['volatility_metric']:.3f}") |
|
|
print(f"โ ๏ธ Primary Risks: {result['primary_risk_factors']}") |
|
|
print(f"๐ฎ Forecast Confidence: {result['forecast_confidence']:.3f}") |
|
|
|
|
|
return result |
|
|
|
|
|
if __name__ == "__main__": |
|
|
asyncio.run(demonstrate_propensity_analysis()) |