diff --git "a/hf_demo.py" "b/hf_demo.py" --- "a/hf_demo.py" +++ "b/hf_demo.py" @@ -1,6 +1,6 @@ """ ARF 3.3.9 Demo - Psychology-Optimized Version -Following best practices in UX, Python, and psychological persuasion +Fixed with proper error handling """ import gradio as gr @@ -14,7 +14,7 @@ import hashlib import random import traceback import sys -from typing import Dict, List, Any, Tuple +from typing import Dict, List, Any, Tuple, Optional from dataclasses import dataclass from enum import Enum import re @@ -24,7 +24,7 @@ print("š§ ARF 3.3.9 DEMO - Psychology-Optimized Edition") print("=" * 70) # ============================================================================ -# ENUMS AND DATA CLASSES (Pythonic Best Practices) +# ENUMS AND DATA CLASSES # ============================================================================ class RiskLevel(str, Enum): @@ -90,483 +90,179 @@ class GateResult: return "ā " if self.passed else "ā" # ============================================================================ -# PSYCHOLOGY-ENHANCED ENGINE +# SIMPLIFIED BUT ROBUST ENGINE # ============================================================================ -class PsychologyEnhancedARF: - """ - Enhanced ARF engine with psychological optimization: - - Loss aversion framing - - Social proof elements - - Scarcity principles - - Authority signals - """ +class ARFEngine: + """Simplified but robust ARF engine""" def __init__(self): self.stats = { - "total_assessments": 0, + "total_processed": 0, "blocked_actions": 0, "autonomous_executions": 0, "avg_processing_time": 0.0 } - - # Initialize with best practices - self._initialize_engines() - print("š§ Psychology-enhanced ARF initialized") - - def _initialize_engines(self): - """Initialize all engines with best practices""" - self.risk_engine = EnhancedRiskEngine() - self.policy_engine = EnhancedPolicyEngine() - self.license_manager = PsychologyEnhancedLicenseManager() - self.execution_engine = EnhancedExecutionEngine() - - # Load industry benchmarks - self._load_benchmarks() - - def _load_benchmarks(self): - """Load industry benchmarks for social proof""" - self.benchmarks = { - "risk_reduction": 0.92, - "decision_speed": 100, - "false_positives": 0.85, - "cost_reduction": 0.75, - "compliance_improvement": 0.88 - } + print("ā ARF Engine initialized") def assess_action(self, action: str, context: Dict, license_key: str = None) -> Dict: - """ - Comprehensive action assessment with psychological optimization - """ + """Assess an action with realistic scoring""" start_time = time.time() - # 1. Risk Assessment (Realistic scoring) - risk_assessment = self.risk_engine.assess_risk(action, context) + # 1. Risk Assessment + risk = self._assess_risk(action, context) # 2. Policy Evaluation - policy_result = self.policy_engine.evaluate(action, context, risk_assessment.score) + policy_result = self._evaluate_policies(action, context, risk.score) # 3. License Validation - license_info = self.license_manager.validate(license_key) + license_info = self._validate_license(license_key) # 4. Gate Evaluation - gate_results = self.execution_engine.evaluate_gates( - risk_assessment, policy_result, license_info - ) - - # 5. Generate recommendations with psychological framing - recommendations = self._generate_recommendations( - risk_assessment, policy_result, license_info, gate_results - ) + gate_results = self._evaluate_gates(risk, policy_result, license_info) - # 6. Calculate processing metrics + # 5. Generate results processing_time = time.time() - start_time - # 7. Update statistics (for social proof) + # Update stats self._update_stats(policy_result, gate_results, processing_time) return { "timestamp": datetime.now().isoformat(), "action": action, - "context": context, - "risk_assessment": risk_assessment, + "risk_assessment": risk, "policy_result": policy_result, "license_info": license_info, "gate_results": gate_results, - "recommendations": recommendations, - "processing_metrics": { - "time": round(processing_time, 4), - "engines_used": ["risk", "policy", "execution"], - "complexity": "high" if len(action.split()) > 5 else "medium" - }, - "version_info": { - "engine": "ARF 3.3.9", - "implementation": "Enhanced Psychology Edition", - "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S") - } + "processing_time": round(processing_time, 4), + "recommendation": self._generate_recommendation(policy_result, gate_results, license_info) } - def _generate_recommendations(self, risk, policy, license, gates) -> Dict: - """Generate psychologically optimized recommendations""" - - # Loss Aversion Framing - if risk.level in [RiskLevel.CRITICAL, RiskLevel.HIGH]: - loss_framing = f"ā ļø **Potential Loss**: This action could result in {random.choice(['data loss', 'service disruption', 'security breach', 'compliance violation'])}" - else: - loss_framing = "ā **Safe Operation**: Minimal risk of adverse outcomes" - - # Social Proof - social_proof = f"š **Industry Standard**: {int(self.benchmarks['risk_reduction'] * 100)}% of organizations using ARF Enterprise report reduced incidents" - - # Scarcity Principle (for upgrades) - if license.tier == LicenseTier.TRIAL: - scarcity = f"ā³ **Limited Time**: Trial license expires in {license.days_remaining} days" - else: - scarcity = "š **Full Access**: Unlimited mechanical enforcement available" - - # Authority Signal - authority = "š **Enterprise-Grade**: Backed by 99.9% SLA and certified compliance" - - return { - "loss_aversion": loss_framing, - "social_proof": social_proof, - "scarcity": scarcity, - "authority": authority, - "primary": self._get_primary_recommendation(risk, policy, license, gates) - } - - def _get_primary_recommendation(self, risk, policy, license, gates): - """Get primary action recommendation""" - if policy.blocked: - return "š« **BLOCKED**: Action violates safety policies" - - all_gates_passed = all(g.passed for g in gates if g.required) - - if license.tier == LicenseTier.TRIAL: - return "šµ **OSS ADVISORY**: Human review recommended" - elif all_gates_passed and license.tier in [LicenseTier.PROFESSIONAL, LicenseTier.ENTERPRISE]: - return "š” **ENTERPRISE APPROVED**: Autonomous execution permitted" - elif all_gates_passed and license.tier == LicenseTier.STARTER: - return "š¤ **HUMAN APPROVAL**: Gates passed, awaiting human confirmation" - else: - return "ā ļø **REVIEW REQUIRED**: Additional validation needed" - - def _update_stats(self, policy, gates, processing_time): - """Update statistics for social proof""" - self.stats["total_assessments"] += 1 - if policy.blocked: - self.stats["blocked_actions"] += 1 - - if all(g.passed for g in gates if g.required): - self.stats["autonomous_executions"] += 1 - - # Moving average for processing time - self.stats["avg_processing_time"] = ( - self.stats["avg_processing_time"] * 0.9 + processing_time * 0.1 - ) - -# ============================================================================ -# ENHANCED ENGINES (Realistic Implementation) -# ============================================================================ - -class EnhancedRiskEngine: - """Realistic risk assessment engine""" - - def assess_risk(self, action: str, context: Dict) -> RiskAssessment: + def _assess_risk(self, action: str, context: Dict) -> RiskAssessment: """Assess risk with realistic scoring""" - - # Parse action for keywords and patterns action_lower = action.lower() # Calculate risk factors - factors = { - "destructiveness": self._calculate_destructiveness(action_lower), - "complexity": self._calculate_complexity(action), - "environment": self._calculate_environment_risk(context), - "data_sensitivity": self._calculate_data_sensitivity(action_lower, context), - "reversibility": self._calculate_reversibility(action_lower) - } + destructiveness = 0.3 + if "drop" in action_lower and "database" in action_lower: + destructiveness = 0.95 + elif "delete" in action_lower: + destructiveness = 0.85 + elif "truncate" in action_lower: + destructiveness = 0.80 + + complexity = min(0.8, len(action.split()) * 0.05) + + env = context.get("environment", "development") + environment_risk = { + "production": 0.8, + "staging": 0.5, + "testing": 0.3, + "development": 0.2 + }.get(env, 0.5) # Weighted risk score - weights = { - "destructiveness": 0.35, - "complexity": 0.15, - "environment": 0.25, - "data_sensitivity": 0.15, - "reversibility": 0.10 - } - - total_score = sum(factors[k] * weights[k] for k in factors) - - # Calculate confidence (higher for clear patterns) - confidence_factors = [ - 1.0 if "drop" in action_lower else 0.8, - 1.0 if "delete" in action_lower else 0.8, - 0.9 if context.get("environment") == "production" else 0.7 - ] - confidence = np.mean(confidence_factors) if confidence_factors else 0.8 + risk_score = ( + destructiveness * 0.4 + + complexity * 0.2 + + environment_risk * 0.4 + ) # Determine risk level - if total_score >= 0.8: + if risk_score >= 0.8: level = RiskLevel.CRITICAL - elif total_score >= 0.6: + elif risk_score >= 0.6: level = RiskLevel.HIGH - elif total_score >= 0.4: + elif risk_score >= 0.4: level = RiskLevel.MEDIUM - elif total_score >= 0.2: + elif risk_score >= 0.2: level = RiskLevel.LOW else: level = RiskLevel.SAFE + # Confidence (higher for clear patterns) + confidence = 0.8 + if "drop database" in action_lower: + confidence = 0.95 + elif "delete from" in action_lower: + confidence = 0.90 + return RiskAssessment( - score=round(total_score, 3), + score=round(risk_score, 3), level=level, confidence=round(confidence, 3), - factors={k: round(v, 3) for k, v in factors.items()} - ) - - def _calculate_destructiveness(self, action: str) -> float: - """Calculate destructiveness score""" - destructive_patterns = [ - (r'drop\s+(database|table)', 0.95), - (r'delete\s+from', 0.85), - (r'truncate\s+table', 0.90), - (r'rm\s+-rf', 0.99), - (r'format\s+', 0.95) - ] - - for pattern, score in destructive_patterns: - if re.search(pattern, action): - return score - - return 0.3 # Default non-destructive - - def _calculate_complexity(self, action: str) -> float: - """Calculate complexity score""" - words = len(action.split()) - if words <= 3: return 0.2 - elif words <= 6: return 0.4 - elif words <= 10: return 0.6 - else: return 0.8 - - def _calculate_environment_risk(self, context: Dict) -> float: - """Calculate environment-based risk""" - env = context.get("environment", "development").lower() - env_risk = { - "production": 0.8, - "staging": 0.5, - "testing": 0.3, - "development": 0.2, - "sandbox": 0.1 - } - return env_risk.get(env, 0.5) - - def _calculate_data_sensitivity(self, action: str, context: Dict) -> float: - """Calculate data sensitivity risk""" - sensitive_keywords = [ - ("password", 0.9), ("token", 0.8), ("credit.*card", 0.95), - ("ssn", 0.9), ("pii", 0.85), ("phi", 0.88) - ] - - for keyword, score in sensitive_keywords: - if re.search(keyword, action): - return score - - return 0.3 - - def _calculate_reversibility(self, action: str) -> float: - """Calculate reversibility score (higher = harder to reverse)""" - irreversible = ["drop", "truncate", "rm -rf", "format"] - for term in irreversible: - if term in action: - return 0.9 # Hard to reverse - - return 0.3 # Easy to reverse - -class EnhancedPolicyEngine: - """Realistic policy evaluation engine""" - - def __init__(self): - self.policies = self._load_default_policies() - - def _load_default_policies(self): - """Load realistic policies""" - return [ - { - "id": "POL-001", - "name": "Destructive Operation Prevention", - "description": "Prevents irreversible destructive operations", - "condition": lambda a, c, r: any(x in a.lower() for x in ["drop database", "truncate", "rm -rf"]), - "action": "BLOCK", - "severity": "CRITICAL" - }, - { - "id": "POL-002", - "name": "Production Safety Guardrails", - "description": "Ensures safe operations in production", - "condition": lambda a, c, r: c.get("environment") == "production" and r > 0.6, - "action": "REQUIRE_APPROVAL", - "severity": "HIGH" - }, - { - "id": "POL-003", - "name": "Sensitive Data Protection", - "description": "Protects sensitive data access", - "condition": lambda a, c, r: any(x in a.lower() for x in ["password", "token", "credit card"]), - "action": "AUDIT", - "severity": "HIGH" + factors={ + "destructiveness": destructiveness, + "complexity": complexity, + "environment": environment_risk } - ] + ) - def evaluate(self, action: str, context: Dict, risk_score: float) -> Dict: - """Evaluate action against policies""" + def _evaluate_policies(self, action: str, context: Dict, risk_score: float) -> Dict: + """Evaluate against safety policies""" violations = [] - for policy in self.policies: - if policy["condition"](action.lower(), context, risk_score): - violations.append({ - "policy_id": policy["id"], - "policy_name": policy["name"], - "action": policy["action"], - "severity": policy["severity"], - "description": policy["description"] - }) + # Policy 1: No destructive operations in production without approval + if ("drop" in action.lower() or "delete" in action.lower()) and context.get("environment") == "production": + violations.append({ + "policy": "Destructive Operation Prevention", + "severity": "CRITICAL", + "action": "BLOCK" + }) + + # Policy 2: High risk actions need review + if risk_score > 0.7 and context.get("environment") == "production": + violations.append({ + "policy": "High Risk Review Required", + "severity": "HIGH", + "action": "REQUIRE_APPROVAL" + }) return { "violations": violations, "blocked": any(v["action"] == "BLOCK" for v in violations), "requires_approval": any(v["action"] == "REQUIRE_APPROVAL" for v in violations), - "audit_required": any(v["action"] == "AUDIT" for v in violations), "total_violations": len(violations) } - -class PsychologyEnhancedLicenseManager: - """License manager with psychological elements""" - def __init__(self): - self.tiers = { - LicenseTier.TRIAL: { - "name": "Trial", - "price": 0, - "enforcement": EnforcementLevel.ADVISORY, - "max_agents": 3, - "days_remaining": 14, - "features": ["Basic Risk Assessment", "Policy Evaluation", "7-Day History"], - "limitations": ["No mechanical enforcement", "Community support only"], - "upgrade_urgency": "ā³ Trial expires soon", - "social_proof": "Used by 1,000+ developers" - }, - LicenseTier.STARTER: { - "name": "Starter", - "price": 2000, - "enforcement": EnforcementLevel.HUMAN_APPROVAL, - "max_agents": 10, - "features": ["Human-in-loop gates", "Basic audit trail", "Email support", "SLA 99.5%"], - "value_prop": "Perfect for teams starting with AI safety", - "social_proof": "Trusted by 500+ growing companies" - }, - LicenseTier.PROFESSIONAL: { - "name": "Professional", - "price": 5000, - "enforcement": EnforcementLevel.AUTONOMOUS, - "max_agents": 50, - "features": ["Autonomous execution", "Advanced gates", "Priority support", "SLA 99.8%", "Custom policies"], - "value_prop": "For companies scaling AI operations", - "social_proof": "Preferred by 200+ scale-ups" - }, - LicenseTier.ENTERPRISE: { - "name": "Enterprise", - "price": 15000, - "enforcement": EnforcementLevel.FULL_MECHANICAL, - "max_agents": 1000, - "features": ["Full mechanical enforcement", "Compliance automation", "Custom gates", "24/7 support", "SLA 99.9%", "Differential privacy"], - "value_prop": "Enterprise-grade AI safety and compliance", - "social_proof": "Deployed at 50+ Fortune 500 companies" - } - } - - # Generate some demo licenses - self.active_licenses = { - "ARF-TRIAL-DEMO123": {"tier": LicenseTier.TRIAL, "email": "demo@arf.dev", "created": datetime.now()}, - "ARF-PRO-ABC789": {"tier": LicenseTier.PROFESSIONAL, "email": "pro@company.com", "created": datetime.now() - timedelta(days=30)} - } - - def validate(self, license_key: str = None): - """Validate license with enhanced information""" + def _validate_license(self, license_key: str = None) -> Dict: + """Validate license key""" if not license_key: return { "tier": None, "valid": False, "name": "OSS Edition", - "enforcement": EnforcementLevel.ADVISORY, - "message": "šµ Using ARF OSS (Open Source)", - "upgrade_prompt": "Upgrade to Enterprise for mechanical enforcement", - "features": self.tiers[LicenseTier.TRIAL]["features"], - "limitations": self.tiers[LicenseTier.TRIAL]["limitations"] + "enforcement": "ADVISORY", + "message": "šµ Using ARF OSS (Open Source)" } - if license_key in self.active_licenses: - license_data = self.active_licenses[license_key] - tier = license_data["tier"] - tier_info = self.tiers[tier] - - return { - "tier": tier, - "valid": True, - "name": tier_info["name"], - "enforcement": tier_info["enforcement"], - "message": f"ā {tier_info['name']} License Active", - "features": tier_info["features"], - "value_prop": tier_info.get("value_prop", ""), - "social_proof": tier_info.get("social_proof", "") - } - - # Check for trial pattern - if license_key.startswith("ARF-TRIAL-"): - return { - "tier": LicenseTier.TRIAL, - "valid": True, - "name": "Trial", - "enforcement": EnforcementLevel.ADVISORY, - "message": "š Trial License Activated (14 days)", - "features": self.tiers[LicenseTier.TRIAL]["features"], - "upgrade_urgency": self.tiers[LicenseTier.TRIAL]["upgrade_urgency"], - "days_remaining": self.tiers[LicenseTier.TRIAL]["days_remaining"] - } + if license_key.startswith("ARF-"): + if "TRIAL" in license_key: + return { + "tier": "TRIAL", + "valid": True, + "name": "Trial", + "enforcement": "ADVISORY", + "message": "š Trial License Active (14 days)" + } + elif "PRO" in license_key: + return { + "tier": "PROFESSIONAL", + "valid": True, + "name": "Professional", + "enforcement": "AUTONOMOUS", + "message": "ā Professional License Active" + } return { "tier": None, "valid": False, "name": "Invalid", - "enforcement": EnforcementLevel.ADVISORY, - "message": "ā Invalid License Key", - "upgrade_prompt": "Get a valid license for full features" + "enforcement": "ADVISORY", + "message": "ā Invalid License Key" } - def generate_trial(self, email: str) -> Dict: - """Generate trial license with psychological elements""" - if not email or "@" not in email: - return { - "success": False, - "message": "Please enter a valid work email address", - "psychological_note": "We verify emails to ensure trial quality" - } - - license_key = f"ARF-TRIAL-{hashlib.sha256(f'{email}{datetime.now()}'.encode()).hexdigest()[:8].upper()}" - - self.active_licenses[license_key] = { - "tier": LicenseTier.TRIAL, - "email": email, - "created": datetime.now() - } - - return { - "success": True, - "license_key": license_key, - "message": "š 14-Day Trial License Generated!", - "psychological_elements": { - "scarcity": "ā³ Limited to 14 days", - "social_proof": "Join 1,000+ developers using ARF", - "authority": "Enterprise-grade AI safety", - "value": "$2,000 value - FREE for 14 days" - }, - "next_steps": [ - "Try the 'service_deployment' scenario", - "Test with your own actions", - "Schedule an Enterprise demo", - "Join our community Slack" - ], - "contact": { - "sales": "sales@arf.dev", - "support": "support@arf.dev", - "website": "https://arf.dev" - } - } - -class EnhancedExecutionEngine: - """Enhanced execution engine with realistic gate evaluation""" - - def evaluate_gates(self, risk: RiskAssessment, policy: Dict, license: Dict) -> List[GateResult]: + def _evaluate_gates(self, risk: RiskAssessment, policy: Dict, license: Dict) -> List[GateResult]: """Evaluate mechanical gates""" gates = [] @@ -574,29 +270,25 @@ class EnhancedExecutionEngine: gates.append(GateResult( name="License Validation", passed=license.get("valid", False), - message=license.get("message", "No license provided"), - required=True, - weight=0.3 + message=license.get("message", "No license"), + required=True )) # Gate 2: Risk Threshold - risk_threshold = 0.8 if license.get("tier") in [LicenseTier.PROFESSIONAL, LicenseTier.ENTERPRISE] else 0.7 + risk_threshold = 0.8 if license.get("tier") in ["PROFESSIONAL", "ENTERPRISE"] else 0.7 gates.append(GateResult( name="Risk Assessment", passed=risk.score <= risk_threshold, message=f"Risk {risk.score:.1%} ⤠{risk_threshold:.0%} threshold", - required=True, - weight=0.3 + required=True )) # Gate 3: Confidence Threshold - confidence_threshold = 0.7 gates.append(GateResult( name="Confidence Threshold", - passed=risk.confidence >= confidence_threshold, - message=f"Confidence {risk.confidence:.1%} ā„ {confidence_threshold:.0%}", - required=True, - weight=0.2 + passed=risk.confidence >= 0.7, + message=f"Confidence {risk.confidence:.1%} ā„ 70%", + required=True )) # Gate 4: Policy Compliance @@ -604,741 +296,425 @@ class EnhancedExecutionEngine: name="Policy Compliance", passed=not policy.get("blocked", False), message=f"{policy.get('total_violations', 0)} policy violations", - required=True, - weight=0.2 + required=True )) - # Additional gates for higher tiers - if license.get("tier") == LicenseTier.ENTERPRISE: - gates.append(GateResult( - name="Compliance Automation", - passed=True, - message="GDPR/PCI/SOX compliant", - required=False, - weight=0.1 - )) - - if license.get("tier") in [LicenseTier.PROFESSIONAL, LicenseTier.ENTERPRISE]: - gates.append(GateResult( - name="Rollback Feasibility", - passed="drop" not in risk.factors.get("destructiveness", 0.9) > 0.8, - message="Rollback plan available", - required=False, - weight=0.1 - )) - return gates - -# ============================================================================ -# UX-OPTIMIZED INTERFACE -# ============================================================================ - -class UXOptimizedInterface: - """Psychology-optimized user interface""" - def __init__(self): - self.arf_engine = PsychologyEnhancedARF() - self.stats_history = [] + def _generate_recommendation(self, policy: Dict, gates: List[GateResult], license: Dict) -> str: + """Generate recommendation""" + if policy.get("blocked"): + return "š« BLOCKED: Action violates safety policies" - # Color schemes with psychological impact - self.colors = { - "oss_primary": "#1E88E5", # Blue - trust, calm - "oss_secondary": "#64B5F6", - "enterprise_primary": "#FFB300", # Gold - premium, value - "enterprise_secondary": "#FFD54F", - "success": "#4CAF50", # Green - safety, go - "warning": "#FF9800", # Orange - caution - "danger": "#F44336", # Red - stop, danger - "critical": "#D32F2F" # Dark red - emergency - } + all_gates_passed = all(g.passed for g in gates if g.required) - # Pre-built scenarios for user guidance - self.scenarios = self._create_guided_scenarios() - - def _create_guided_scenarios(self): - """Create psychologically-guided scenarios""" - return [ - { - "id": "guided_high_risk", - "name": "š„ High-Risk Database Operation", - "action": "DROP DATABASE production_users CASCADE", - "context": {"environment": "production", "criticality": "critical", "users_affected": 10000}, - "description": "Irreversible deletion of customer database", - "learning": "Shows how mechanical gates prevent catastrophic errors", - "psych_tip": "Loss aversion - what you could lose without protection" - }, - { - "id": "guided_safe_deploy", - "name": "ā Safe Service Deployment", - "action": "deploy_service payment_api:v2.3.1 to staging with 25% canary", - "context": {"environment": "staging", "service": "payment_api", "canary": 25, "rollback": True}, - "description": "Standard deployment with safety measures", - "learning": "Shows how Enterprise enables safe autonomous execution", - "psych_tip": "Value demonstration - what you gain with protection" - }, - { - "id": "guided_config_change", - "name": "š§ Configuration Update", - "action": "UPDATE payment_config SET timeout_ms=30000, retries=3 WHERE region='us-east-1'", - "context": {"environment": "production", "service": "payment", "requires_approval": True}, - "description": "Production configuration change", - "learning": "Shows human-in-loop approval for medium-risk changes", - "psych_tip": "Authority delegation - when humans should still decide" - } - ] - - def process_action(self, scenario_id: str = None, custom_action: str = None, - custom_context: str = None, license_key: str = None) -> Dict: - """Process action with full UX optimization""" - try: - # Determine which action to use - if scenario_id and scenario_id in [s["id"] for s in self.scenarios]: - scenario = next(s for s in self.scenarios if s["id"] == scenario_id) - action = scenario["action"] - context = scenario["context"] - scenario_info = scenario - else: - action = custom_action or "SELECT * FROM users LIMIT 10" - try: - context = json.loads(custom_context) if custom_context else {"environment": "development"} - except: - context = {"environment": "development"} - scenario_info = None - - # Process through enhanced ARF engine - result = self.arf_engine.assess_action(action, context, license_key) - - # Add UX enhancements - result["ux_enhancements"] = self._add_ux_enhancements(result, scenario_info) - - # Update history for stats - self.stats_history.append({ - "timestamp": datetime.now(), - "action": action[:50], - "risk_level": result["risk_assessment"].level.value, - "license_tier": result["license_info"].get("tier", "OSS") - }) - - return result - - except Exception as e: - print(f"Error processing action: {e}") - return self._create_error_state(str(e)) + if not license.get("valid"): + return "šµ OSS ADVISORY: Human review recommended" + elif all_gates_passed and license.get("tier") in ["PROFESSIONAL", "ENTERPRISE"]: + return "š” ENTERPRISE APPROVED: Autonomous execution permitted" + elif all_gates_passed and license.get("tier") == "STARTER": + return "š¤ HUMAN APPROVAL: Gates passed, awaiting confirmation" + else: + return "ā ļø REVIEW REQUIRED: Additional validation needed" - def _add_ux_enhancements(self, result: Dict, scenario_info: Dict = None) -> Dict: - """Add UX enhancements to results""" - risk = result["risk_assessment"] - license_info = result["license_info"] - gates = result["gate_results"] + def _update_stats(self, policy: Dict, gates: List[GateResult], processing_time: float): + """Update statistics""" + self.stats["total_processed"] += 1 + if policy.get("blocked"): + self.stats["blocked_actions"] += 1 - # Progress visualization - passed_gates = sum(1 for g in gates if g.passed) - total_gates = len([g for g in gates if g.required]) - gate_progress = passed_gates / total_gates if total_gates > 0 else 0 + if all(g.passed for g in gates if g.required): + self.stats["autonomous_executions"] += 1 - # Psychological messaging - if license_info.get("tier") is None: - psych_message = { - "primary": "šµ You're using ARF OSS", - "secondary": "Open-source advisory mode", - "cta": "Upgrade to Enterprise for mechanical enforcement", - "urgency": "92% of organizations report reduced incidents with Enterprise" - } - elif license_info.get("tier") == LicenseTier.TRIAL: - psych_message = { - "primary": "š You're on a Trial License", - "secondary": f"Expires in {license_info.get('days_remaining', 14)} days", - "cta": "Upgrade now to keep mechanical enforcement", - "scarcity": "Limited time offer - 30% off first year for trial users" - } - else: - psych_message = { - "primary": f"ā {license_info.get('name')} License Active", - "secondary": license_info.get("value_prop", "Enterprise-grade protection"), - "social_proof": license_info.get("social_proof", "Trusted by industry leaders") - } + # Update average processing time + self.stats["avg_processing_time"] = ( + self.stats["avg_processing_time"] * 0.9 + processing_time * 0.1 + ) + + def get_stats(self) -> Dict: + """Get statistics with safe defaults""" + stats = self.stats.copy() - # Learning insights - if scenario_info: - learning = { - "scenario": scenario_info["name"], - "insight": scenario_info["learning"], - "psychology": scenario_info["psych_tip"] - } - else: - learning = { - "scenario": "Custom Action", - "insight": self._generate_insight(risk, gates), - "psychology": self._generate_psych_tip(risk, license_info) - } + # Calculate derived stats with error handling + total = stats.get("total_processed", 0) + blocked = stats.get("blocked_actions", 0) + autonomous = stats.get("autonomous_executions", 0) - return { - "progress": { - "gate_progress": gate_progress, - "risk_progress": risk.score, - "confidence_progress": risk.confidence - }, - "psychology": psych_message, - "learning": learning, - "visual": { - "risk_color": risk.color, - "risk_icon": risk.icon, - "gate_colors": [g.color for g in gates], - "gate_icons": [g.icon for g in gates] - } - } - - def _generate_insight(self, risk: RiskAssessment, gates: List[GateResult]) -> str: - """Generate learning insight""" - if risk.level == RiskLevel.CRITICAL: - return "Critical risks require immediate attention and human intervention" - elif any(not g.passed for g in gates if g.required): - return "Mechanical gates provide objective safety checks beyond human judgment" - else: - return "All safety checks passed - Enterprise enables autonomous execution" - - def _generate_psych_tip(self, risk: RiskAssessment, license_info: Dict) -> str: - """Generate psychological tip""" - if license_info.get("tier") is None: - return "Loss aversion: Consider what could go wrong without mechanical protection" - elif risk.level in [RiskLevel.CRITICAL, RiskLevel.HIGH]: - return "Authority: Enterprise-grade protection for high-stakes decisions" - else: - return "Social proof: Join industry leaders who trust ARF for AI safety" - - def _create_error_state(self, error: str) -> Dict: - """Create graceful error state""" - return { - "error": True, - "message": "We encountered an issue processing your request", - "friendly_message": "Our safety systems detected an issue. Please try a different action.", - "suggestion": "Try the 'Safe Service Deployment' scenario for a working example", - "technical_details": error[:200], - "ux_enhancements": { - "psychology": { - "primary": "š§ System Check", - "secondary": "Even our error handling demonstrates safety principles", - "cta": "Try a pre-built scenario for a perfect demonstration" - } - } + stats["blocked_percentage"] = round(blocked / total * 100, 1) if total > 0 else 0.0 + stats["autonomous_percentage"] = round(autonomous / total * 100, 1) if total > 0 else 0.0 + stats["processing_speed"] = f"{stats.get('avg_processing_time', 0)*1000:.0f}ms" + + # Add default values for any missing keys + defaults = { + "risk_distribution": {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0, "SAFE": 0}, + "system_health": "ā Optimal", + "license_distribution": {"OSS": 0, "TRIAL": 0, "ENTERPRISE": 0} } + + for key, value in defaults.items(): + if key not in stats: + stats[key] = value + + return stats - def get_stats(self) -> Dict: - """Get enhanced statistics""" - total = len(self.stats_history) - if total == 0: + def generate_trial_license(self, email: str) -> Dict: + """Generate trial license""" + if not email or "@" not in email: return { - "total_processed": 0, - "blocked_actions": 0, - "autonomous_rate": 0, - "avg_risk": 0.0, - "license_distribution": {"OSS": 0, "TRIAL": 0, "ENTERPRISE": 0} + "success": False, + "message": "Please enter a valid email address" } - # Calculate stats - blocked = sum(1 for h in self.stats_history if "DROP" in h["action"] or "DELETE" in h["action"]) - enterprise_actions = sum(1 for h in self.stats_history if h["license_tier"] not in ["OSS", None]) - - # Risk levels - risk_levels = [h["risk_level"] for h in self.stats_history if "risk_level" in h] - risk_dist = { - "CRITICAL": risk_levels.count("CRITICAL"), - "HIGH": risk_levels.count("HIGH"), - "MEDIUM": risk_levels.count("MEDIUM"), - "LOW": risk_levels.count("LOW"), - "SAFE": risk_levels.count("SAFE") - } + license_key = f"ARF-TRIAL-{hashlib.sha256(email.encode()).hexdigest()[:8].upper()}" return { - "total_processed": total, - "blocked_actions": blocked, - "blocked_percentage": round(blocked / total * 100, 1) if total > 0 else 0, - "autonomous_rate": round(enterprise_actions / total * 100, 1) if total > 0 else 0, - "risk_distribution": risk_dist, - "system_health": "ā Optimal", - "processing_speed": f"{self.arf_engine.stats['avg_processing_time']*1000:.0f}ms avg" + "success": True, + "license_key": license_key, + "message": "š 14-Day Trial License Generated!", + "expires": (datetime.now() + timedelta(days=14)).strftime("%Y-%m-%d"), + "features": [ + "Mechanical gate evaluation", + "Enterprise dashboard", + "Basic audit trail" + ] } # ============================================================================ -# GRADIO INTERFACE WITH PSYCHOLOGICAL OPTIMIZATION +# DEMO DATA # ============================================================================ -def create_psychology_optimized_interface(): - """Create the main interface with psychological optimization""" +DEMO_SCENARIOS = [ + { + "id": "high_risk", + "name": "š„ High-Risk Database Operation", + "action": "DROP DATABASE production_users CASCADE", + "context": {"environment": "production", "criticality": "critical"} + }, + { + "id": "safe_deploy", + "name": "ā Safe Service Deployment", + "action": "deploy_service payment_api:v2.3.1 to staging with 25% canary", + "context": {"environment": "staging", "service": "payment_api", "canary": 25} + }, + { + "id": "config_change", + "name": "š§ Configuration Update", + "action": "UPDATE config SET timeout_ms=30000 WHERE service='api'", + "context": {"environment": "production", "service": "api"} + } +] + +# ============================================================================ +# GRADIO INTERFACE +# ============================================================================ + +def create_demo_interface(): + """Create the demo interface""" + + # Initialize engine + arf_engine = ARFEngine() + + # Get initial stats with error handling + try: + stats = arf_engine.get_stats() + except Exception as e: + print(f"Error getting stats: {e}") + stats = { + "total_processed": 0, + "blocked_percentage": 0.0, + "autonomous_percentage": 0.0, + "processing_speed": "0ms" + } - ux_interface = UXOptimizedInterface() + # Safely get values with defaults + total_processed = stats.get("total_processed", 0) + blocked_percentage = stats.get("blocked_percentage", 0.0) + autonomous_percentage = stats.get("autonomous_percentage", 0.0) + processing_speed = stats.get("processing_speed", "0ms") with gr.Blocks( - title="ARF 3.3.9 - Psychology-Optimized Demo", - theme=gr.themes.Soft( - primary_hue="blue", - secondary_hue="orange", - font=gr.themes.GoogleFont("Inter") - ), + title="ARF 3.3.9 - OSS vs Enterprise Demo", + theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 1400px; margin: 0 auto; - font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif; - } - .psych-header { - background: linear-gradient(135deg, #1E88E5 0%, #0D47A1 100%); - color: white; - padding: 30px; - border-radius: 15px; - margin-bottom: 30px; - text-align: center; + font-family: -apple-system, BlinkMacSystemFont, sans-serif; } - .psych-card { - border-radius: 12px; - box-shadow: 0 4px 20px rgba(0,0,0,0.08); - padding: 25px; + .demo-card { + border-radius: 10px; + padding: 20px; margin-bottom: 20px; - transition: transform 0.2s; + box-shadow: 0 2px 8px rgba(0,0,0,0.1); } - .psych-card:hover { - transform: translateY(-2px); - box-shadow: 0 6px 25px rgba(0,0,0,0.12); + .oss-card { + border: 2px solid #1E88E5; + background: linear-gradient(135deg, #E3F2FD 0%, #BBDEFB 100%); } - .gate-passed { - background: linear-gradient(135deg, #E8F5E9 0%, #C8E6C9 100%); - border-left: 5px solid #4CAF50; + .enterprise-card { + border: 2px solid #FFB300; + background: linear-gradient(135deg, #FFF8E1 0%, #FFECB3 100%); } - .gate-failed { - background: linear-gradient(135deg, #FFEBEE 0%, #FFCDD2 100%); - border-left: 5px solid #F44336; + .stat-card { + background: white; + padding: 15px; + border-radius: 8px; + text-align: center; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); } - .risk-critical { background: linear-gradient(135deg, #FFEBEE 0%, #FFCDD2 100%); } - .risk-high { background: linear-gradient(135deg, #FFF3E0 0%, #FFE0B2 100%); } - .risk-medium { background: linear-gradient(135deg, #FFF8E1 0%, #FFECB3 100%); } - .risk-low { background: linear-gradient(135deg, #E8F5E9 0%, #C8E6C9 100%); } - .risk-safe { background: linear-gradient(135deg, #E3F2FD 0%, #BBDEFB 100%); } - .progress-bar { - height: 10px; - border-radius: 5px; - background: #E0E0E0; - overflow: hidden; + .gate-passed { + background: #E8F5E9; + border-left: 4px solid #4CAF50; + padding: 10px; + margin: 5px 0; + border-radius: 4px; } - .progress-fill { - height: 100%; - border-radius: 5px; - transition: width 0.5s ease; + .gate-failed { + background: #FFEBEE; + border-left: 4px solid #F44336; + padding: 10px; + margin: 5px 0; + border-radius: 4px; } """ ) as demo: - # ==================================================================== - # HEADER WITH PSYCHOLOGICAL ELEMENTS - # ==================================================================== + # Header gr.Markdown(""" -
- Experience the psychological shift from "you should" to "the system ensures" -
-- ARF Enterprise prevents catastrophic losses through mechanical enforcement, - while OSS only provides advisory warnings. -
-- 92% of Fortune 500 companies using AI have adopted mechanical enforcement - systems like ARF Enterprise. -
-- You're using the open-source version. Get a trial to experience - mechanical enforcement. -
+- ARF 3.3.9 ⢠- Website ⢠- GitHub ⢠- Contact Sales ⢠- Book Demo -
-- Trusted by 500+ companies including 3 Fortune 100 enterprises. - Average risk reduction: 92%. Average ROI: 3.2 months. -
-- {license_info.get('value_prop', 'Enterprise-grade mechanical enforcement')} -
+- Upgrade to Enterprise for mechanical enforcement and autonomous execution. -
+{str(e)}
+- {result['recommendations']['primary']} -
+{result['recommendation']}
- {ux['psychology']['primary']} - {ux['psychology']['secondary']} -
-- {license_info.get('value_prop', 'Enterprise-grade AI safety and compliance')} -
-- Move from uncertainty to confidence, from personal liability to system accountability -
-- We encountered an issue while processing your request. This demonstrates how - ARF Enterprise provides robust error handling and graceful degradation. -
-- Suggestion: Try one of the guided scenarios for a perfect demonstration. -
-