Trouter-Library commited on
Commit
bbbe225
·
verified ·
1 Parent(s): 5de6e17

Create safety_wrapper.py

Browse files
Files changed (1) hide show
  1. safety_wrapper.py +433 -0
safety_wrapper.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Safety wrapper for Helion-V2.0-Thinking
3
+ Implements content filtering, rate limiting, and safety checks
4
+ """
5
+
6
+ import torch
7
+ from transformers import AutoModelForCausalLM, AutoProcessor
8
+ from typing import Optional, List, Dict, Any, Union
9
+ from PIL import Image
10
+ import json
11
+ import re
12
+ import time
13
+ from collections import defaultdict, deque
14
+ from datetime import datetime, timedelta
15
+ import hashlib
16
+
17
+
18
+ class SafetyViolation(Exception):
19
+ """Exception raised when safety policies are violated"""
20
+ pass
21
+
22
+
23
+ class ContentFilter:
24
+ """Content filtering for inputs and outputs"""
25
+
26
+ # Harmful patterns to detect
27
+ HARMFUL_PATTERNS = [
28
+ r'(?i)how\s+to\s+(hack|crack|break\s+into)',
29
+ r'(?i)make\s+(explosive|bomb|weapon)',
30
+ r'(?i)(kill|murder|harm)\s+(myself|someone|people)',
31
+ r'(?i)credit\s+card\s+number',
32
+ r'(?i)social\s+security\s+number',
33
+ r'(?i)(steal|fraud|scam)\s+',
34
+ r'(?i)illegal\s+(drugs|substances)',
35
+ r'(?i)child\s+(abuse|exploitation)',
36
+ ]
37
+
38
+ # PII patterns
39
+ PII_PATTERNS = {
40
+ 'email': r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',
41
+ 'phone': r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b',
42
+ 'ssn': r'\b\d{3}-\d{2}-\d{4}\b',
43
+ 'credit_card': r'\b\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}\b',
44
+ }
45
+
46
+ # Toxic keywords
47
+ TOXIC_KEYWORDS = [
48
+ 'hate', 'violence', 'threat', 'abuse', 'harass',
49
+ 'discriminate', 'racist', 'sexist', 'offensive'
50
+ ]
51
+
52
+ @staticmethod
53
+ def check_harmful_content(text: str) -> tuple[bool, Optional[str]]:
54
+ """
55
+ Check if text contains harmful content
56
+
57
+ Returns:
58
+ (is_harmful, reason)
59
+ """
60
+ for pattern in ContentFilter.HARMFUL_PATTERNS:
61
+ if re.search(pattern, text):
62
+ return True, f"Matched harmful pattern: {pattern}"
63
+
64
+ return False, None
65
+
66
+ @staticmethod
67
+ def check_pii(text: str) -> tuple[bool, List[str]]:
68
+ """
69
+ Check for personally identifiable information
70
+
71
+ Returns:
72
+ (contains_pii, pii_types)
73
+ """
74
+ found_pii = []
75
+ for pii_type, pattern in ContentFilter.PII_PATTERNS.items():
76
+ if re.search(pattern, text):
77
+ found_pii.append(pii_type)
78
+
79
+ return len(found_pii) > 0, found_pii
80
+
81
+ @staticmethod
82
+ def check_toxicity(text: str) -> tuple[float, List[str]]:
83
+ """
84
+ Simple toxicity check based on keywords
85
+
86
+ Returns:
87
+ (toxicity_score, matched_keywords)
88
+ """
89
+ text_lower = text.lower()
90
+ matched = [kw for kw in ContentFilter.TOXIC_KEYWORDS if kw in text_lower]
91
+ score = len(matched) / len(ContentFilter.TOXIC_KEYWORDS)
92
+
93
+ return score, matched
94
+
95
+ @staticmethod
96
+ def redact_pii(text: str) -> str:
97
+ """Redact PII from text"""
98
+ redacted = text
99
+
100
+ for pii_type, pattern in ContentFilter.PII_PATTERNS.items():
101
+ redacted = re.sub(pattern, f'[REDACTED_{pii_type.upper()}]', redacted)
102
+
103
+ return redacted
104
+
105
+
106
+ class RateLimiter:
107
+ """Rate limiting for API usage"""
108
+
109
+ def __init__(
110
+ self,
111
+ requests_per_minute: int = 60,
112
+ tokens_per_minute: int = 90000,
113
+ concurrent_limit: int = 10
114
+ ):
115
+ self.requests_per_minute = requests_per_minute
116
+ self.tokens_per_minute = tokens_per_minute
117
+ self.concurrent_limit = concurrent_limit
118
+
119
+ self.request_times = defaultdict(deque)
120
+ self.token_counts = defaultdict(deque)
121
+ self.active_requests = defaultdict(int)
122
+
123
+ def check_rate_limit(self, user_id: str, estimated_tokens: int = 0) -> tuple[bool, Optional[str]]:
124
+ """
125
+ Check if request is within rate limits
126
+
127
+ Returns:
128
+ (allowed, reason_if_denied)
129
+ """
130
+ now = datetime.now()
131
+ minute_ago = now - timedelta(minutes=1)
132
+
133
+ # Clean old entries
134
+ while self.request_times[user_id] and self.request_times[user_id][0] < minute_ago:
135
+ self.request_times[user_id].popleft()
136
+
137
+ while self.token_counts[user_id] and self.token_counts[user_id][0][0] < minute_ago:
138
+ self.token_counts[user_id].popleft()
139
+
140
+ # Check requests per minute
141
+ if len(self.request_times[user_id]) >= self.requests_per_minute:
142
+ return False, f"Rate limit exceeded: {self.requests_per_minute} requests per minute"
143
+
144
+ # Check tokens per minute
145
+ total_tokens = sum(t[1] for t in self.token_counts[user_id])
146
+ if total_tokens + estimated_tokens > self.tokens_per_minute:
147
+ return False, f"Token limit exceeded: {self.tokens_per_minute} tokens per minute"
148
+
149
+ # Check concurrent requests
150
+ if self.active_requests[user_id] >= self.concurrent_limit:
151
+ return False, f"Concurrent request limit exceeded: {self.concurrent_limit}"
152
+
153
+ return True, None
154
+
155
+ def record_request(self, user_id: str, tokens: int = 0):
156
+ """Record a request"""
157
+ now = datetime.now()
158
+ self.request_times[user_id].append(now)
159
+ self.token_counts[user_id].append((now, tokens))
160
+ self.active_requests[user_id] += 1
161
+
162
+ def release_request(self, user_id: str):
163
+ """Release an active request slot"""
164
+ if self.active_requests[user_id] > 0:
165
+ self.active_requests[user_id] -= 1
166
+
167
+
168
+ class SafeHelionWrapper:
169
+ """Safety wrapper for Helion-V2.0-Thinking"""
170
+
171
+ def __init__(
172
+ self,
173
+ model_name: str = "DeepXR/Helion-V2.0-Thinking",
174
+ safety_config_path: Optional[str] = None,
175
+ enable_safety: bool = True,
176
+ enable_rate_limiting: bool = True,
177
+ device: str = "auto"
178
+ ):
179
+ """
180
+ Initialize safe wrapper
181
+
182
+ Args:
183
+ model_name: Model name or path
184
+ safety_config_path: Path to safety_config.json
185
+ enable_safety: Enable safety checks
186
+ enable_rate_limiting: Enable rate limiting
187
+ device: Device for model
188
+ """
189
+ print(f"Loading model with safety wrapper: {model_name}")
190
+
191
+ # Load safety config
192
+ if safety_config_path:
193
+ with open(safety_config_path, 'r') as f:
194
+ self.safety_config = json.load(f)
195
+ else:
196
+ self.safety_config = self._default_safety_config()
197
+
198
+ self.enable_safety = enable_safety
199
+ self.enable_rate_limiting = enable_rate_limiting
200
+
201
+ # Initialize components
202
+ self.model = AutoModelForCausalLM.from_pretrained(
203
+ model_name,
204
+ torch_dtype=torch.bfloat16,
205
+ device_map=device,
206
+ trust_remote_code=True
207
+ )
208
+ self.processor = AutoProcessor.from_pretrained(model_name)
209
+ self.model.eval()
210
+
211
+ self.content_filter = ContentFilter()
212
+ self.rate_limiter = RateLimiter(
213
+ requests_per_minute=self.safety_config['safety_settings']['rate_limiting']['requests_per_minute'],
214
+ tokens_per_minute=self.safety_config['safety_settings']['rate_limiting']['tokens_per_minute'],
215
+ concurrent_requests=self.safety_config['safety_settings']['rate_limiting']['concurrent_requests']
216
+ )
217
+
218
+ # Violation tracking
219
+ self.violation_log = []
220
+
221
+ print("Safety wrapper initialized successfully")
222
+
223
+ def _default_safety_config(self) -> Dict[str, Any]:
224
+ """Default safety configuration"""
225
+ return {
226
+ "safety_settings": {
227
+ "rate_limiting": {
228
+ "requests_per_minute": 60,
229
+ "tokens_per_minute": 90000,
230
+ "concurrent_requests": 10
231
+ },
232
+ "content_filtering": {
233
+ "profanity_filter": {"enabled": True},
234
+ "pii_detection": {"enabled": True},
235
+ "toxicity_detection": {"enabled": True, "threshold": 0.7}
236
+ }
237
+ }
238
+ }
239
+
240
+ def _validate_input(
241
+ self,
242
+ prompt: str,
243
+ images: Optional[List[Image.Image]] = None,
244
+ user_id: str = "default"
245
+ ):
246
+ """Validate input against safety policies"""
247
+ if not self.enable_safety:
248
+ return
249
+
250
+ # Check harmful content
251
+ is_harmful, reason = self.content_filter.check_harmful_content(prompt)
252
+ if is_harmful:
253
+ self._log_violation(user_id, "harmful_content", reason)
254
+ raise SafetyViolation(f"Input rejected: {reason}")
255
+
256
+ # Check PII
257
+ if self.safety_config['safety_settings']['content_filtering']['pii_detection']['enabled']:
258
+ has_pii, pii_types = self.content_filter.check_pii(prompt)
259
+ if has_pii:
260
+ self._log_violation(user_id, "pii_detected", f"Types: {pii_types}")
261
+ print(f"Warning: PII detected in input: {pii_types}")
262
+
263
+ # Check toxicity
264
+ if self.safety_config['safety_settings']['content_filtering']['toxicity_detection']['enabled']:
265
+ toxicity_score, keywords = self.content_filter.check_toxicity(prompt)
266
+ threshold = self.safety_config['safety_settings']['content_filtering']['toxicity_detection']['threshold']
267
+
268
+ if toxicity_score > threshold:
269
+ self._log_violation(user_id, "high_toxicity", f"Score: {toxicity_score}")
270
+ raise SafetyViolation(f"Input rejected: High toxicity score ({toxicity_score:.2f})")
271
+
272
+ # Validate images
273
+ if images:
274
+ max_images = self.safety_config.get('guardrails', {}).get('input_validation', {}).get('max_images_per_request', 10)
275
+ if len(images) > max_images:
276
+ raise SafetyViolation(f"Too many images: {len(images)} (max: {max_images})")
277
+
278
+ def _validate_output(self, output: str, user_id: str = "default"):
279
+ """Validate output against safety policies"""
280
+ if not self.enable_safety:
281
+ return output
282
+
283
+ # Check for harmful content in output
284
+ is_harmful, reason = self.content_filter.check_harmful_content(output)
285
+ if is_harmful:
286
+ self._log_violation(user_id, "harmful_output", reason)
287
+ return "I cannot provide that information as it may be harmful."
288
+
289
+ # Redact PII if found
290
+ if self.safety_config['safety_settings']['content_filtering']['pii_detection']['enabled']:
291
+ output = self.content_filter.redact_pii(output)
292
+
293
+ return output
294
+
295
+ def _log_violation(self, user_id: str, violation_type: str, details: str):
296
+ """Log safety violation"""
297
+ self.violation_log.append({
298
+ "timestamp": datetime.now().isoformat(),
299
+ "user_id": hashlib.sha256(user_id.encode()).hexdigest()[:16],
300
+ "type": violation_type,
301
+ "details": details
302
+ })
303
+
304
+ # Keep only last 1000 violations
305
+ if len(self.violation_log) > 1000:
306
+ self.violation_log = self.violation_log[-1000:]
307
+
308
+ def generate(
309
+ self,
310
+ prompt: str,
311
+ images: Optional[List[Image.Image]] = None,
312
+ user_id: str = "default",
313
+ max_new_tokens: int = 512,
314
+ temperature: float = 0.7,
315
+ **kwargs
316
+ ) -> str:
317
+ """
318
+ Safe generation with input/output filtering
319
+
320
+ Args:
321
+ prompt: Input prompt
322
+ images: Optional list of images
323
+ user_id: User identifier for rate limiting
324
+ max_new_tokens: Maximum tokens to generate
325
+ temperature: Sampling temperature
326
+ **kwargs: Additional generation parameters
327
+
328
+ Returns:
329
+ Generated text (filtered)
330
+ """
331
+ # Check rate limit
332
+ if self.enable_rate_limiting:
333
+ allowed, reason = self.rate_limiter.check_rate_limit(user_id, max_new_tokens)
334
+ if not allowed:
335
+ raise SafetyViolation(reason)
336
+
337
+ self.rate_limiter.record_request(user_id, max_new_tokens)
338
+
339
+ try:
340
+ # Validate input
341
+ self._validate_input(prompt, images, user_id)
342
+
343
+ # Generate
344
+ if images:
345
+ inputs = self.processor(text=prompt, images=images, return_tensors="pt").to(self.model.device)
346
+ else:
347
+ inputs = self.processor(text=prompt, return_tensors="pt").to(self.model.device)
348
+
349
+ with torch.no_grad():
350
+ outputs = self.model.generate(
351
+ **inputs,
352
+ max_new_tokens=max_new_tokens,
353
+ temperature=temperature,
354
+ **kwargs
355
+ )
356
+
357
+ response = self.processor.decode(outputs[0], skip_special_tokens=True)
358
+
359
+ # Remove prompt from response
360
+ if response.startswith(prompt):
361
+ response = response[len(prompt):].strip()
362
+
363
+ # Validate output
364
+ response = self._validate_output(response, user_id)
365
+
366
+ return response
367
+
368
+ finally:
369
+ if self.enable_rate_limiting:
370
+ self.rate_limiter.release_request(user_id)
371
+
372
+ def get_violation_stats(self) -> Dict[str, Any]:
373
+ """Get violation statistics"""
374
+ if not self.violation_log:
375
+ return {"total_violations": 0}
376
+
377
+ violation_types = defaultdict(int)
378
+ for log in self.violation_log:
379
+ violation_types[log['type']] += 1
380
+
381
+ return {
382
+ "total_violations": len(self.violation_log),
383
+ "by_type": dict(violation_types),
384
+ "recent_violations": self.violation_log[-10:]
385
+ }
386
+
387
+ def export_violation_log(self, filename: str = "violations.json"):
388
+ """Export violation log to file"""
389
+ with open(filename, 'w') as f:
390
+ json.dump(self.violation_log, f, indent=2)
391
+ print(f"Violation log exported to {filename}")
392
+
393
+
394
+ def main():
395
+ """Example usage of safe wrapper"""
396
+ # Initialize safe wrapper
397
+ wrapper = SafeHelionWrapper(
398
+ model_name="DeepXR/Helion-V2.0-Thinking",
399
+ enable_safety=True,
400
+ enable_rate_limiting=True
401
+ )
402
+
403
+ # Test cases
404
+ test_prompts = [
405
+ "Explain how photosynthesis works.", # Safe
406
+ "Write a poem about nature.", # Safe
407
+ "How do I hack into an email account?", # Should be blocked
408
+ ]
409
+
410
+ print("\n" + "="*60)
411
+ print("Testing Safety Wrapper")
412
+ print("="*60 + "\n")
413
+
414
+ for prompt in test_prompts:
415
+ print(f"Prompt: {prompt}")
416
+ try:
417
+ response = wrapper.generate(prompt, max_new_tokens=128)
418
+ print(f"Response: {response}\n")
419
+ except SafetyViolation as e:
420
+ print(f"BLOCKED: {e}\n")
421
+ except Exception as e:
422
+ print(f"ERROR: {e}\n")
423
+
424
+ # Print violation stats
425
+ print("="*60)
426
+ print("Violation Statistics")
427
+ print("="*60)
428
+ stats = wrapper.get_violation_stats()
429
+ print(json.dumps(stats, indent=2))
430
+
431
+
432
+ if __name__ == "__main__":
433
+ main()