""" Production Guardrails for Helion-V2 Implements comprehensive safety controls and ethical AI boundaries. """ import json import logging from typing import Dict, List, Optional, Tuple from dataclasses import dataclass from enum import Enum from safety_classifier import SafetyClassifier, SafetyCategory from content_moderation import ContentFilter, ModerationResult logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class GuardrailAction(Enum): """Actions to take when guardrail is triggered.""" ALLOW = "allow" WARN = "warn" MODIFY = "modify" BLOCK = "block" REDIRECT = "redirect" @dataclass class GuardrailResponse: """Response from guardrail system.""" action: GuardrailAction original_input: str modified_input: Optional[str] reason: str severity: str suggestions: List[str] resources: Optional[Dict[str, str]] class GuardrailSystem: """ Comprehensive guardrail system for safe AI deployment. Implements multiple layers of protection and ethical boundaries. """ def __init__(self, config_path: str = "safety_config.json"): """ Initialize guardrail system with configuration. Args: config_path: Path to safety configuration file """ self.config = self._load_config(config_path) self.safety_classifier = SafetyClassifier() self.content_filter = ContentFilter() # Initialize crisis resources self.crisis_resources = { "suicide_prevention": { "name": "National Suicide Prevention Lifeline", "phone": "988 or 1-800-273-8255", "website": "https://988lifeline.org", "available": "24/7" }, "domestic_violence": { "name": "National Domestic Violence Hotline", "phone": "1-800-799-7233", "website": "https://www.thehotline.org", "available": "24/7" }, "substance_abuse": { "name": "SAMHSA National Helpline", "phone": "1-800-662-4357", "website": "https://www.samhsa.gov", "available": "24/7" }, "mental_health": { "name": "NAMI Helpline", "phone": "1-800-950-6264", "website": "https://www.nami.org", "available": "Mon-Fri 10am-10pm ET" }, "child_abuse": { "name": "Childhelp National Child Abuse Hotline", "phone": "1-800-422-4453", "website": "https://www.childhelp.org", "available": "24/7" } } def _load_config(self, config_path: str) -> Dict: """Load safety configuration.""" try: with open(config_path, 'r') as f: return json.load(f) except FileNotFoundError: logger.warning(f"Config file {config_path} not found, using defaults") return self._get_default_config() def _get_default_config(self) -> Dict: """Get default safety configuration.""" return { "content_filtering": {"enabled": True}, "pii_protection": {"enabled": True}, "output_moderation": {"enabled": True}, "model_behavior": { "refuse_harmful_requests": True, "provide_alternative_suggestions": True } } def check_input_guardrails(self, user_input: str) -> GuardrailResponse: """ Check input against all guardrails before processing. Args: user_input: User's input text Returns: GuardrailResponse with action and details """ # Check for crisis situations first (highest priority) crisis_detected, crisis_type = self._detect_crisis(user_input) if crisis_detected: return GuardrailResponse( action=GuardrailAction.REDIRECT, original_input=user_input, modified_input=None, reason=f"Crisis situation detected: {crisis_type}", severity="critical", suggestions=[ "Please reach out to a trained professional", "You don't have to face this alone", "Help is available 24/7" ], resources=self._get_crisis_resources(crisis_type) ) # Run safety classifier safety_result = self.safety_classifier.check_prompt(user_input) if not safety_result.is_safe: if safety_result.category == SafetyCategory.CHILD_SAFETY: return GuardrailResponse( action=GuardrailAction.BLOCK, original_input=user_input, modified_input=None, reason="Content violates child safety policies", severity="critical", suggestions=[ "This type of content is not permitted", "Please review our usage policies" ], resources=None ) elif safety_result.category in [SafetyCategory.VIOLENCE, SafetyCategory.ILLEGAL_ACTIVITY]: return GuardrailResponse( action=GuardrailAction.BLOCK, original_input=user_input, modified_input=None, reason=f"Content violates safety policy: {safety_result.category.value}", severity="high", suggestions=[ "I cannot provide assistance with this request", "Please ask about something else", "Review our acceptable use policy" ], resources=None ) # Run content moderation moderation_result = self.content_filter.moderate_content(user_input) if moderation_result.risk_level == "critical": return GuardrailResponse( action=GuardrailAction.BLOCK, original_input=user_input, modified_input=None, reason=f"Critical safety violation: {', '.join(moderation_result.violations)}", severity="critical", suggestions=[ "This request cannot be processed", "Please rephrase your question" ], resources=None ) elif moderation_result.risk_level == "high": # Check if we can sanitize if moderation_result.sanitized_content: return GuardrailResponse( action=GuardrailAction.MODIFY, original_input=user_input, modified_input=moderation_result.sanitized_content, reason="PII detected and sanitized", severity="medium", suggestions=[ "Personal information has been redacted for privacy", "Please avoid sharing sensitive information" ], resources=None ) else: return GuardrailResponse( action=GuardrailAction.WARN, original_input=user_input, modified_input=None, reason=f"Safety concerns: {', '.join(moderation_result.violations)}", severity="medium", suggestions=[ "Your request may contain problematic content", "Consider rephrasing more appropriately" ], resources=None ) elif moderation_result.risk_level == "medium": return GuardrailResponse( action=GuardrailAction.WARN, original_input=user_input, modified_input=None, reason="Minor content concerns detected", severity="low", suggestions=[ "Please maintain a respectful tone", "Consider more constructive phrasing" ], resources=None ) # All checks passed return GuardrailResponse( action=GuardrailAction.ALLOW, original_input=user_input, modified_input=None, reason="Input passed all safety checks", severity="none", suggestions=[], resources=None ) def check_output_guardrails(self, model_output: str, user_input: str) -> GuardrailResponse: """ Check model output against guardrails before returning to user. Args: model_output: Generated model response user_input: Original user input for context Returns: GuardrailResponse with action and details """ # Run safety checks on output safety_result = self.safety_classifier.check_response(model_output) if not safety_result.is_safe: return GuardrailResponse( action=GuardrailAction.BLOCK, original_input=model_output, modified_input=None, reason=f"Model output violates safety policy: {safety_result.category.value}", severity="high", suggestions=[ "I apologize, but I cannot provide that response", "Let me try to answer differently", "Please rephrase your question" ], resources=None ) # Check for PII in output has_pii, pii_types = self.content_filter.check_pii(model_output) if has_pii: sanitized = self.content_filter._sanitize_pii(model_output) return GuardrailResponse( action=GuardrailAction.MODIFY, original_input=model_output, modified_input=sanitized, reason="PII detected in output and sanitized", severity="medium", suggestions=[ "Personal information has been redacted" ], resources=None ) # Check for medical/legal/financial disclaimers needed needs_disclaimer, disclaimer_type = self._check_disclaimer_needed(user_input, model_output) if needs_disclaimer: disclaimer = self._get_disclaimer(disclaimer_type) modified_output = f"{model_output}\n\n{disclaimer}" return GuardrailResponse( action=GuardrailAction.MODIFY, original_input=model_output, modified_input=modified_output, reason=f"Added {disclaimer_type} disclaimer", severity="low", suggestions=[], resources=None ) # Output is safe return GuardrailResponse( action=GuardrailAction.ALLOW, original_input=model_output, modified_input=None, reason="Output passed all safety checks", severity="none", suggestions=[], resources=None ) def _detect_crisis(self, text: str) -> Tuple[bool, Optional[str]]: """ Detect crisis situations requiring immediate intervention. Args: text: Text to analyze Returns: Tuple of (is_crisis, crisis_type) """ text_lower = text.lower() # Suicide/self-harm detection suicide_indicators = [ 'want to die', 'kill myself', 'end my life', 'suicide', 'not worth living', 'better off dead', 'end it all' ] for indicator in suicide_indicators: if indicator in text_lower: return True, "suicide_prevention" # Abuse detection abuse_indicators = [ 'being abused', 'hurting me', 'hitting me', 'afraid of', 'domestic violence', 'child abuse', 'sexual abuse' ] for indicator in abuse_indicators: if indicator in text_lower: if 'child' in text_lower: return True, "child_abuse" return True, "domestic_violence" # Substance abuse crisis substance_indicators = [ 'overdose', 'too many pills', 'substance abuse', 'addiction help' ] for indicator in substance_indicators: if indicator in text_lower: return True, "substance_abuse" return False, None def _get_crisis_resources(self, crisis_type: str) -> Dict[str, str]: """Get appropriate crisis resources.""" if crisis_type in self.crisis_resources: return {crisis_type: self.crisis_resources[crisis_type]} return self.crisis_resources def _check_disclaimer_needed(self, user_input: str, model_output: str) -> Tuple[bool, Optional[str]]: """Check if disclaimer is needed for specialized advice.""" combined = (user_input + " " + model_output).lower() medical_keywords = [ 'diagnose', 'treatment', 'medication', 'symptoms', 'disease', 'health condition', 'medical advice', 'prescription' ] legal_keywords = [ 'legal advice', 'lawsuit', 'contract', 'attorney', 'law', 'sue', 'legal rights', 'court' ] financial_keywords = [ 'invest', 'stock', 'financial advice', 'tax', 'retirement', 'portfolio', 'trading', 'cryptocurrency' ] for keyword in medical_keywords: if keyword in combined: return True, "medical" for keyword in legal_keywords: if keyword in combined: return True, "legal" for keyword in financial_keywords: if keyword in combined: return True, "financial" return False, None def _get_disclaimer(self, disclaimer_type: str) -> str: """Get appropriate disclaimer text.""" disclaimers = { "medical": "⚠️ Disclaimer: This information is for educational purposes only and is not medical advice. Please consult with a qualified healthcare professional for medical concerns.", "legal": "⚠️ Disclaimer: This information is for general purposes only and is not legal advice. Please consult with a qualified attorney for legal matters.", "financial": "⚠️ Disclaimer: This information is for educational purposes only and is not financial advice. Please consult with a qualified financial advisor before making investment decisions." } return disclaimers.get(disclaimer_type, "") def process_interaction( self, user_input: str, model_output: str ) -> Dict[str, any]: """ Process complete interaction through guardrail system. Args: user_input: User's input model_output: Model's generated output Returns: Dictionary with processed results """ # Check input guardrails input_check = self.check_input_guardrails(user_input) if input_check.action == GuardrailAction.BLOCK: return { "approved": False, "final_output": None, "reason": input_check.reason, "suggestions": input_check.suggestions, "resources": input_check.resources, "action_taken": "blocked_input" } if input_check.action == GuardrailAction.REDIRECT: return { "approved": True, "final_output": self._generate_crisis_response(input_check), "reason": input_check.reason, "suggestions": input_check.suggestions, "resources": input_check.resources, "action_taken": "crisis_redirect" } # Use modified input if available processed_input = input_check.modified_input or user_input # Check output guardrails output_check = self.check_output_guardrails(model_output, processed_input) if output_check.action == GuardrailAction.BLOCK: return { "approved": False, "final_output": "I apologize, but I cannot provide that response. Please rephrase your question.", "reason": output_check.reason, "suggestions": output_check.suggestions, "resources": None, "action_taken": "blocked_output" } # Return final processed output final_output = output_check.modified_input or model_output return { "approved": True, "final_output": final_output, "reason": "Passed all safety checks", "suggestions": [], "resources": None, "action_taken": "approved", "modifications": { "input_modified": input_check.modified_input is not None, "output_modified": output_check.modified_input is not None } } def _generate_crisis_response(self, guardrail_response: GuardrailResponse) -> str: """Generate appropriate response for crisis situations.""" resources_text = "" if guardrail_response.resources: resources_text = "\n\n📞 Immediate Help Available:\n\n" for crisis_type, info in guardrail_response.resources.items(): resources_text += f"**{info['name']}**\n" resources_text += f"Phone: {info['phone']}\n" resources_text += f"Website: {info['website']}\n" resources_text += f"Available: {info['available']}\n\n" response = f"""I'm concerned about what you've shared. While I'm an AI and can't provide the professional support you need, please know that help is available and you don't have to face this alone. {chr(10).join(f"• {s}" for s in guardrail_response.suggestions)} {resources_text} These services are confidential and staffed by trained professionals who care and want to help. Please reach out to them.""" return response # Example usage if __name__ == "__main__": # Initialize guardrail system guardrails = GuardrailSystem() # Test cases test_cases = [ ("What is the capital of France?", "The capital of France is Paris."), ("How do I make a bomb?", "Here's how..."), ("I want to end my life", "Any response"), ("What medications should I take for my symptoms?", "You should take XYZ medication."), ] print("Guardrail System Tests:\n") print("=" * 80) for user_input, model_output in test_cases: print(f"\nUser: {user_input[:60]}...") print(f"Model: {model_output[:60]}...") result = guardrails.process_interaction(user_input, model_output) print(f"\nApproved: {result['approved']}") print(f"Action: {result['action_taken']}") print(f"Reason: {result['reason']}") if result['final_output']: print(f"Final Output: {result['final_output'][:100]}...") if result['resources']: print(f"Resources Provided: Yes") print("-" * 80)