Trouter-Library commited on
Commit
423516e
·
verified ·
1 Parent(s): 343441b

Create guardrails.py

Browse files
Files changed (1) hide show
  1. guardrails.py +527 -0
guardrails.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Production Guardrails for Helion-V2
3
+ Implements comprehensive safety controls and ethical AI boundaries.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import Dict, List, Optional, Tuple
9
+ from dataclasses import dataclass
10
+ from enum import Enum
11
+
12
+ from safety_classifier import SafetyClassifier, SafetyCategory
13
+ from content_moderation import ContentFilter, ModerationResult
14
+
15
+
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class GuardrailAction(Enum):
21
+ """Actions to take when guardrail is triggered."""
22
+ ALLOW = "allow"
23
+ WARN = "warn"
24
+ MODIFY = "modify"
25
+ BLOCK = "block"
26
+ REDIRECT = "redirect"
27
+
28
+
29
+ @dataclass
30
+ class GuardrailResponse:
31
+ """Response from guardrail system."""
32
+ action: GuardrailAction
33
+ original_input: str
34
+ modified_input: Optional[str]
35
+ reason: str
36
+ severity: str
37
+ suggestions: List[str]
38
+ resources: Optional[Dict[str, str]]
39
+
40
+
41
+ class GuardrailSystem:
42
+ """
43
+ Comprehensive guardrail system for safe AI deployment.
44
+ Implements multiple layers of protection and ethical boundaries.
45
+ """
46
+
47
+ def __init__(self, config_path: str = "safety_config.json"):
48
+ """
49
+ Initialize guardrail system with configuration.
50
+
51
+ Args:
52
+ config_path: Path to safety configuration file
53
+ """
54
+ self.config = self._load_config(config_path)
55
+ self.safety_classifier = SafetyClassifier()
56
+ self.content_filter = ContentFilter()
57
+
58
+ # Initialize crisis resources
59
+ self.crisis_resources = {
60
+ "suicide_prevention": {
61
+ "name": "National Suicide Prevention Lifeline",
62
+ "phone": "988 or 1-800-273-8255",
63
+ "website": "https://988lifeline.org",
64
+ "available": "24/7"
65
+ },
66
+ "domestic_violence": {
67
+ "name": "National Domestic Violence Hotline",
68
+ "phone": "1-800-799-7233",
69
+ "website": "https://www.thehotline.org",
70
+ "available": "24/7"
71
+ },
72
+ "substance_abuse": {
73
+ "name": "SAMHSA National Helpline",
74
+ "phone": "1-800-662-4357",
75
+ "website": "https://www.samhsa.gov",
76
+ "available": "24/7"
77
+ },
78
+ "mental_health": {
79
+ "name": "NAMI Helpline",
80
+ "phone": "1-800-950-6264",
81
+ "website": "https://www.nami.org",
82
+ "available": "Mon-Fri 10am-10pm ET"
83
+ },
84
+ "child_abuse": {
85
+ "name": "Childhelp National Child Abuse Hotline",
86
+ "phone": "1-800-422-4453",
87
+ "website": "https://www.childhelp.org",
88
+ "available": "24/7"
89
+ }
90
+ }
91
+
92
+ def _load_config(self, config_path: str) -> Dict:
93
+ """Load safety configuration."""
94
+ try:
95
+ with open(config_path, 'r') as f:
96
+ return json.load(f)
97
+ except FileNotFoundError:
98
+ logger.warning(f"Config file {config_path} not found, using defaults")
99
+ return self._get_default_config()
100
+
101
+ def _get_default_config(self) -> Dict:
102
+ """Get default safety configuration."""
103
+ return {
104
+ "content_filtering": {"enabled": True},
105
+ "pii_protection": {"enabled": True},
106
+ "output_moderation": {"enabled": True},
107
+ "model_behavior": {
108
+ "refuse_harmful_requests": True,
109
+ "provide_alternative_suggestions": True
110
+ }
111
+ }
112
+
113
+ def check_input_guardrails(self, user_input: str) -> GuardrailResponse:
114
+ """
115
+ Check input against all guardrails before processing.
116
+
117
+ Args:
118
+ user_input: User's input text
119
+
120
+ Returns:
121
+ GuardrailResponse with action and details
122
+ """
123
+ # Check for crisis situations first (highest priority)
124
+ crisis_detected, crisis_type = self._detect_crisis(user_input)
125
+ if crisis_detected:
126
+ return GuardrailResponse(
127
+ action=GuardrailAction.REDIRECT,
128
+ original_input=user_input,
129
+ modified_input=None,
130
+ reason=f"Crisis situation detected: {crisis_type}",
131
+ severity="critical",
132
+ suggestions=[
133
+ "Please reach out to a trained professional",
134
+ "You don't have to face this alone",
135
+ "Help is available 24/7"
136
+ ],
137
+ resources=self._get_crisis_resources(crisis_type)
138
+ )
139
+
140
+ # Run safety classifier
141
+ safety_result = self.safety_classifier.check_prompt(user_input)
142
+
143
+ if not safety_result.is_safe:
144
+ if safety_result.category == SafetyCategory.CHILD_SAFETY:
145
+ return GuardrailResponse(
146
+ action=GuardrailAction.BLOCK,
147
+ original_input=user_input,
148
+ modified_input=None,
149
+ reason="Content violates child safety policies",
150
+ severity="critical",
151
+ suggestions=[
152
+ "This type of content is not permitted",
153
+ "Please review our usage policies"
154
+ ],
155
+ resources=None
156
+ )
157
+
158
+ elif safety_result.category in [SafetyCategory.VIOLENCE, SafetyCategory.ILLEGAL_ACTIVITY]:
159
+ return GuardrailResponse(
160
+ action=GuardrailAction.BLOCK,
161
+ original_input=user_input,
162
+ modified_input=None,
163
+ reason=f"Content violates safety policy: {safety_result.category.value}",
164
+ severity="high",
165
+ suggestions=[
166
+ "I cannot provide assistance with this request",
167
+ "Please ask about something else",
168
+ "Review our acceptable use policy"
169
+ ],
170
+ resources=None
171
+ )
172
+
173
+ # Run content moderation
174
+ moderation_result = self.content_filter.moderate_content(user_input)
175
+
176
+ if moderation_result.risk_level == "critical":
177
+ return GuardrailResponse(
178
+ action=GuardrailAction.BLOCK,
179
+ original_input=user_input,
180
+ modified_input=None,
181
+ reason=f"Critical safety violation: {', '.join(moderation_result.violations)}",
182
+ severity="critical",
183
+ suggestions=[
184
+ "This request cannot be processed",
185
+ "Please rephrase your question"
186
+ ],
187
+ resources=None
188
+ )
189
+
190
+ elif moderation_result.risk_level == "high":
191
+ # Check if we can sanitize
192
+ if moderation_result.sanitized_content:
193
+ return GuardrailResponse(
194
+ action=GuardrailAction.MODIFY,
195
+ original_input=user_input,
196
+ modified_input=moderation_result.sanitized_content,
197
+ reason="PII detected and sanitized",
198
+ severity="medium",
199
+ suggestions=[
200
+ "Personal information has been redacted for privacy",
201
+ "Please avoid sharing sensitive information"
202
+ ],
203
+ resources=None
204
+ )
205
+ else:
206
+ return GuardrailResponse(
207
+ action=GuardrailAction.WARN,
208
+ original_input=user_input,
209
+ modified_input=None,
210
+ reason=f"Safety concerns: {', '.join(moderation_result.violations)}",
211
+ severity="medium",
212
+ suggestions=[
213
+ "Your request may contain problematic content",
214
+ "Consider rephrasing more appropriately"
215
+ ],
216
+ resources=None
217
+ )
218
+
219
+ elif moderation_result.risk_level == "medium":
220
+ return GuardrailResponse(
221
+ action=GuardrailAction.WARN,
222
+ original_input=user_input,
223
+ modified_input=None,
224
+ reason="Minor content concerns detected",
225
+ severity="low",
226
+ suggestions=[
227
+ "Please maintain a respectful tone",
228
+ "Consider more constructive phrasing"
229
+ ],
230
+ resources=None
231
+ )
232
+
233
+ # All checks passed
234
+ return GuardrailResponse(
235
+ action=GuardrailAction.ALLOW,
236
+ original_input=user_input,
237
+ modified_input=None,
238
+ reason="Input passed all safety checks",
239
+ severity="none",
240
+ suggestions=[],
241
+ resources=None
242
+ )
243
+
244
+ def check_output_guardrails(self, model_output: str, user_input: str) -> GuardrailResponse:
245
+ """
246
+ Check model output against guardrails before returning to user.
247
+
248
+ Args:
249
+ model_output: Generated model response
250
+ user_input: Original user input for context
251
+
252
+ Returns:
253
+ GuardrailResponse with action and details
254
+ """
255
+ # Run safety checks on output
256
+ safety_result = self.safety_classifier.check_response(model_output)
257
+
258
+ if not safety_result.is_safe:
259
+ return GuardrailResponse(
260
+ action=GuardrailAction.BLOCK,
261
+ original_input=model_output,
262
+ modified_input=None,
263
+ reason=f"Model output violates safety policy: {safety_result.category.value}",
264
+ severity="high",
265
+ suggestions=[
266
+ "I apologize, but I cannot provide that response",
267
+ "Let me try to answer differently",
268
+ "Please rephrase your question"
269
+ ],
270
+ resources=None
271
+ )
272
+
273
+ # Check for PII in output
274
+ has_pii, pii_types = self.content_filter.check_pii(model_output)
275
+ if has_pii:
276
+ sanitized = self.content_filter._sanitize_pii(model_output)
277
+ return GuardrailResponse(
278
+ action=GuardrailAction.MODIFY,
279
+ original_input=model_output,
280
+ modified_input=sanitized,
281
+ reason="PII detected in output and sanitized",
282
+ severity="medium",
283
+ suggestions=[
284
+ "Personal information has been redacted"
285
+ ],
286
+ resources=None
287
+ )
288
+
289
+ # Check for medical/legal/financial disclaimers needed
290
+ needs_disclaimer, disclaimer_type = self._check_disclaimer_needed(user_input, model_output)
291
+ if needs_disclaimer:
292
+ disclaimer = self._get_disclaimer(disclaimer_type)
293
+ modified_output = f"{model_output}\n\n{disclaimer}"
294
+ return GuardrailResponse(
295
+ action=GuardrailAction.MODIFY,
296
+ original_input=model_output,
297
+ modified_input=modified_output,
298
+ reason=f"Added {disclaimer_type} disclaimer",
299
+ severity="low",
300
+ suggestions=[],
301
+ resources=None
302
+ )
303
+
304
+ # Output is safe
305
+ return GuardrailResponse(
306
+ action=GuardrailAction.ALLOW,
307
+ original_input=model_output,
308
+ modified_input=None,
309
+ reason="Output passed all safety checks",
310
+ severity="none",
311
+ suggestions=[],
312
+ resources=None
313
+ )
314
+
315
+ def _detect_crisis(self, text: str) -> Tuple[bool, Optional[str]]:
316
+ """
317
+ Detect crisis situations requiring immediate intervention.
318
+
319
+ Args:
320
+ text: Text to analyze
321
+
322
+ Returns:
323
+ Tuple of (is_crisis, crisis_type)
324
+ """
325
+ text_lower = text.lower()
326
+
327
+ # Suicide/self-harm detection
328
+ suicide_indicators = [
329
+ 'want to die', 'kill myself', 'end my life', 'suicide',
330
+ 'not worth living', 'better off dead', 'end it all'
331
+ ]
332
+
333
+ for indicator in suicide_indicators:
334
+ if indicator in text_lower:
335
+ return True, "suicide_prevention"
336
+
337
+ # Abuse detection
338
+ abuse_indicators = [
339
+ 'being abused', 'hurting me', 'hitting me', 'afraid of',
340
+ 'domestic violence', 'child abuse', 'sexual abuse'
341
+ ]
342
+
343
+ for indicator in abuse_indicators:
344
+ if indicator in text_lower:
345
+ if 'child' in text_lower:
346
+ return True, "child_abuse"
347
+ return True, "domestic_violence"
348
+
349
+ # Substance abuse crisis
350
+ substance_indicators = [
351
+ 'overdose', 'too many pills', 'substance abuse', 'addiction help'
352
+ ]
353
+
354
+ for indicator in substance_indicators:
355
+ if indicator in text_lower:
356
+ return True, "substance_abuse"
357
+
358
+ return False, None
359
+
360
+ def _get_crisis_resources(self, crisis_type: str) -> Dict[str, str]:
361
+ """Get appropriate crisis resources."""
362
+ if crisis_type in self.crisis_resources:
363
+ return {crisis_type: self.crisis_resources[crisis_type]}
364
+ return self.crisis_resources
365
+
366
+ def _check_disclaimer_needed(self, user_input: str, model_output: str) -> Tuple[bool, Optional[str]]:
367
+ """Check if disclaimer is needed for specialized advice."""
368
+ combined = (user_input + " " + model_output).lower()
369
+
370
+ medical_keywords = [
371
+ 'diagnose', 'treatment', 'medication', 'symptoms', 'disease',
372
+ 'health condition', 'medical advice', 'prescription'
373
+ ]
374
+
375
+ legal_keywords = [
376
+ 'legal advice', 'lawsuit', 'contract', 'attorney', 'law',
377
+ 'sue', 'legal rights', 'court'
378
+ ]
379
+
380
+ financial_keywords = [
381
+ 'invest', 'stock', 'financial advice', 'tax', 'retirement',
382
+ 'portfolio', 'trading', 'cryptocurrency'
383
+ ]
384
+
385
+ for keyword in medical_keywords:
386
+ if keyword in combined:
387
+ return True, "medical"
388
+
389
+ for keyword in legal_keywords:
390
+ if keyword in combined:
391
+ return True, "legal"
392
+
393
+ for keyword in financial_keywords:
394
+ if keyword in combined:
395
+ return True, "financial"
396
+
397
+ return False, None
398
+
399
+ def _get_disclaimer(self, disclaimer_type: str) -> str:
400
+ """Get appropriate disclaimer text."""
401
+ disclaimers = {
402
+ "medical": "⚠️ Disclaimer: This information is for educational purposes only and is not medical advice. Please consult with a qualified healthcare professional for medical concerns.",
403
+ "legal": "⚠️ Disclaimer: This information is for general purposes only and is not legal advice. Please consult with a qualified attorney for legal matters.",
404
+ "financial": "⚠️ Disclaimer: This information is for educational purposes only and is not financial advice. Please consult with a qualified financial advisor before making investment decisions."
405
+ }
406
+ return disclaimers.get(disclaimer_type, "")
407
+
408
+ def process_interaction(
409
+ self,
410
+ user_input: str,
411
+ model_output: str
412
+ ) -> Dict[str, any]:
413
+ """
414
+ Process complete interaction through guardrail system.
415
+
416
+ Args:
417
+ user_input: User's input
418
+ model_output: Model's generated output
419
+
420
+ Returns:
421
+ Dictionary with processed results
422
+ """
423
+ # Check input guardrails
424
+ input_check = self.check_input_guardrails(user_input)
425
+
426
+ if input_check.action == GuardrailAction.BLOCK:
427
+ return {
428
+ "approved": False,
429
+ "final_output": None,
430
+ "reason": input_check.reason,
431
+ "suggestions": input_check.suggestions,
432
+ "resources": input_check.resources,
433
+ "action_taken": "blocked_input"
434
+ }
435
+
436
+ if input_check.action == GuardrailAction.REDIRECT:
437
+ return {
438
+ "approved": True,
439
+ "final_output": self._generate_crisis_response(input_check),
440
+ "reason": input_check.reason,
441
+ "suggestions": input_check.suggestions,
442
+ "resources": input_check.resources,
443
+ "action_taken": "crisis_redirect"
444
+ }
445
+
446
+ # Use modified input if available
447
+ processed_input = input_check.modified_input or user_input
448
+
449
+ # Check output guardrails
450
+ output_check = self.check_output_guardrails(model_output, processed_input)
451
+
452
+ if output_check.action == GuardrailAction.BLOCK:
453
+ return {
454
+ "approved": False,
455
+ "final_output": "I apologize, but I cannot provide that response. Please rephrase your question.",
456
+ "reason": output_check.reason,
457
+ "suggestions": output_check.suggestions,
458
+ "resources": None,
459
+ "action_taken": "blocked_output"
460
+ }
461
+
462
+ # Return final processed output
463
+ final_output = output_check.modified_input or model_output
464
+
465
+ return {
466
+ "approved": True,
467
+ "final_output": final_output,
468
+ "reason": "Passed all safety checks",
469
+ "suggestions": [],
470
+ "resources": None,
471
+ "action_taken": "approved",
472
+ "modifications": {
473
+ "input_modified": input_check.modified_input is not None,
474
+ "output_modified": output_check.modified_input is not None
475
+ }
476
+ }
477
+
478
+ def _generate_crisis_response(self, guardrail_response: GuardrailResponse) -> str:
479
+ """Generate appropriate response for crisis situations."""
480
+ resources_text = ""
481
+ if guardrail_response.resources:
482
+ resources_text = "\n\n📞 Immediate Help Available:\n\n"
483
+ for crisis_type, info in guardrail_response.resources.items():
484
+ resources_text += f"**{info['name']}**\n"
485
+ resources_text += f"Phone: {info['phone']}\n"
486
+ resources_text += f"Website: {info['website']}\n"
487
+ resources_text += f"Available: {info['available']}\n\n"
488
+
489
+ response = f"""I'm concerned about what you've shared. While I'm an AI and can't provide the professional support you need, please know that help is available and you don't have to face this alone.
490
+
491
+ {chr(10).join(f"• {s}" for s in guardrail_response.suggestions)}
492
+ {resources_text}
493
+ These services are confidential and staffed by trained professionals who care and want to help. Please reach out to them."""
494
+
495
+ return response
496
+
497
+
498
+ # Example usage
499
+ if __name__ == "__main__":
500
+ # Initialize guardrail system
501
+ guardrails = GuardrailSystem()
502
+
503
+ # Test cases
504
+ test_cases = [
505
+ ("What is the capital of France?", "The capital of France is Paris."),
506
+ ("How do I make a bomb?", "Here's how..."),
507
+ ("I want to end my life", "Any response"),
508
+ ("What medications should I take for my symptoms?", "You should take XYZ medication."),
509
+ ]
510
+
511
+ print("Guardrail System Tests:\n")
512
+ print("=" * 80)
513
+
514
+ for user_input, model_output in test_cases:
515
+ print(f"\nUser: {user_input[:60]}...")
516
+ print(f"Model: {model_output[:60]}...")
517
+
518
+ result = guardrails.process_interaction(user_input, model_output)
519
+
520
+ print(f"\nApproved: {result['approved']}")
521
+ print(f"Action: {result['action_taken']}")
522
+ print(f"Reason: {result['reason']}")
523
+ if result['final_output']:
524
+ print(f"Final Output: {result['final_output'][:100]}...")
525
+ if result['resources']:
526
+ print(f"Resources Provided: Yes")
527
+ print("-" * 80)