| """ | |
| Response validation tools for ensuring safe and appropriate responses | |
| """ | |
| import re | |
| from typing import Dict, List, Tuple, Optional, Any | |
| from dataclasses import dataclass | |
| import json | |
| from transformers import pipeline | |
| import torch | |
| from pydantic import PrivateAttr | |
| from crewai.tools import BaseTool | |
| #from .base_tool import BaseTool | |
| # @dataclass | |
| class ValidationResult: | |
| """Result of validation check""" | |
| is_valid: bool | |
| issues: List[str] | |
| warnings: List[str] | |
| suggestions: List[str] | |
| confidence: float | |
| refined_text: Optional[str] = None | |
| # class ValidationTools: | |
| # """Tools for validating responses and ensuring safety""" | |
| # def __init__(self, config): | |
| # self.config = config | |
| # # Initialize sentiment analyzer for tone checking | |
| # self.sentiment_analyzer = pipeline( | |
| # "sentiment-analysis", | |
| # model="nlptown/bert-base-multilingual-uncased-sentiment", | |
| # device=0 if torch.cuda.is_available() else -1 | |
| # ) | |
| # # Prohibited patterns for different categories | |
| # self.prohibited_patterns = { | |
| # 'medical': [ | |
| # r'\b(?:diagnos|prescrib|medicat|cure|treat|therap)\w*\b', | |
| # r'\b(?:disease|illness|disorder|syndrome)\s+(?:is|are|can be)\b', | |
| # r'\b(?:take|consume|dose|dosage)\s+\d+\s*(?:mg|ml|pill|tablet)', | |
| # r'\b(?:medical|clinical|physician|doctor)\s+(?:advice|consultation|opinion)', | |
| # ], | |
| # 'legal': [ | |
| # r'\b(?:legal advice|lawsuit|sue|court|litigation)\b', | |
| # r'\b(?:illegal|unlawful|crime|criminal|prosecut)\w*\b', | |
| # r'\b(?:you should|must|have to)\s+(?:sign|agree|consent|contract)', | |
| # r'\b(?:rights|obligations|liability|damages)\s+(?:are|include)\b', | |
| # ], | |
| # 'financial': [ | |
| # r'\b(?:invest|buy|sell|trade)\s+(?:stock|crypto|bitcoin|forex)\b', | |
| # r'\b(?:guaranteed|promise)\s+(?:return|profit|income|earnings)\b', | |
| # r'\b(?:financial advisor|investment advice|trading strategy)\b', | |
| # r'\b(?:tax|accounting|financial planning)\s+(?:advice|consultation)', | |
| # ], | |
| # 'harmful': [ | |
| # r'\b(?:suicide|suicidal|kill\s+(?:your|my)self|end\s+(?:it|life))\b', | |
| # r'\b(?:self[\-\s]?harm|hurt\s+(?:your|my)self|cutting)\b', | |
| # r'\b(?:violence|violent|weapon|attack|assault)\b', | |
| # r'\b(?:hate|discriminat|racist|sexist|homophobic)\b', | |
| # ], | |
| # 'absolute': [ | |
| # r'\b(?:always|never|every|all|none|no one|everyone)\s+(?:will|must|should|is|are)\b', | |
| # r'\b(?:definitely|certainly|guaranteed|assured|promise)\b', | |
| # r'\b(?:only way|only solution|must do|have to)\b', | |
| # ] | |
| # } | |
| # # Required elements for supportive responses | |
| # self.supportive_elements = { | |
| # 'empathy': [ | |
| # 'understand', 'hear', 'feel', 'acknowledge', 'recognize', | |
| # 'appreciate', 'empathize', 'relate', 'comprehend' | |
| # ], | |
| # 'validation': [ | |
| # 'valid', 'normal', 'understandable', 'natural', 'okay', | |
| # 'reasonable', 'makes sense', 'legitimate' | |
| # ], | |
| # 'support': [ | |
| # 'support', 'help', 'here for you', 'together', 'alongside', | |
| # 'assist', 'guide', 'accompany', 'with you' | |
| # ], | |
| # 'hope': [ | |
| # 'can', 'possible', 'able', 'capable', 'potential', | |
| # 'opportunity', 'growth', 'improve', 'better', 'progress' | |
| # ], | |
| # 'empowerment': [ | |
| # 'choice', 'decide', 'control', 'power', 'strength', | |
| # 'agency', 'capable', 'resource', 'ability' | |
| # ] | |
| # } | |
| # # Crisis indicators | |
| # self.crisis_indicators = [ | |
| # r'\b(?:want|going|plan)\s+to\s+(?:die|kill|end)\b', | |
| # r'\b(?:no reason|point|hope)\s+(?:to|in)\s+(?:live|living|life)\b', | |
| # r'\b(?:better off|world)\s+without\s+me\b', | |
| # r'\bsuicide\s+(?:plan|method|attempt)\b', | |
| # r'\b(?:final|last)\s+(?:goodbye|letter|message)\b' | |
| # ] | |
| # # Tone indicators | |
| # self.negative_tone_words = [ | |
| # 'stupid', 'idiot', 'dumb', 'pathetic', 'worthless', | |
| # 'loser', 'failure', 'weak', 'incompetent', 'useless' | |
| # ] | |
| # self.dismissive_phrases = [ | |
| # 'just get over it', 'stop complaining', 'not a big deal', | |
| # 'being dramatic', 'overreacting', 'too sensitive' | |
| # ] | |
| # def validate_response(self, response: str, context: Dict[str, Any] = None) -> ValidationResult: | |
| # """Comprehensive validation of response""" | |
| # issues = [] | |
| # warnings = [] | |
| # suggestions = [] | |
| # # Check for prohibited content | |
| # prohibited_check = self._check_prohibited_content(response) | |
| # if prohibited_check["found"]: | |
| # issues.extend(prohibited_check["violations"]) | |
| # suggestions.extend(prohibited_check["suggestions"]) | |
| # # Check tone and sentiment | |
| # tone_check = self._check_tone(response) | |
| # if not tone_check["appropriate"]: | |
| # warnings.extend(tone_check["issues"]) | |
| # suggestions.extend(tone_check["suggestions"]) | |
| # # Check for supportive elements | |
| # support_check = self._check_supportive_elements(response) | |
| # if support_check["missing"]: | |
| # warnings.append(f"Missing supportive elements: {', '.join(support_check['missing'])}") | |
| # suggestions.extend(support_check["suggestions"]) | |
| # # Check for crisis content in context | |
| # if context and context.get("user_input"): | |
| # crisis_check = self._check_crisis_indicators(context["user_input"]) | |
| # if crisis_check["is_crisis"] and "crisis" not in response.lower(): | |
| # warnings.append("User may be in crisis but response doesn't address this") | |
| # suggestions.append("Include crisis resources and immediate support options") | |
| # # Calculate overall confidence | |
| # confidence = self._calculate_confidence(issues, warnings) | |
| # # Generate refined response if needed | |
| # refined_text = None | |
| # if issues or (warnings and confidence < 0.7): | |
| # refined_text = self._refine_response(response, issues, warnings, suggestions) | |
| # return ValidationResult( | |
| # is_valid=len(issues) == 0, | |
| # issues=issues, | |
| # warnings=warnings, | |
| # suggestions=suggestions, | |
| # confidence=confidence, | |
| # refined_text=refined_text | |
| # ) | |
| # def _check_prohibited_content(self, text: str) -> Dict[str, Any]: | |
| # """Check for prohibited content patterns""" | |
| # found_violations = [] | |
| # suggestions = [] | |
| # for category, patterns in self.prohibited_patterns.items(): | |
| # for pattern in patterns: | |
| # if re.search(pattern, text, re.IGNORECASE): | |
| # found_violations.append(f"Contains {category} advice/content") | |
| # # Add specific suggestions | |
| # if category == "medical": | |
| # suggestions.append("Replace with: 'Consider speaking with a healthcare professional'") | |
| # elif category == "legal": | |
| # suggestions.append("Replace with: 'For legal matters, consult with a qualified attorney'") | |
| # elif category == "financial": | |
| # suggestions.append("Replace with: 'For financial decisions, consider consulting a financial advisor'") | |
| # elif category == "harmful": | |
| # suggestions.append("Include crisis resources and express immediate concern for safety") | |
| # elif category == "absolute": | |
| # suggestions.append("Use qualifying language like 'often', 'might', 'could' instead of absolutes") | |
| # break | |
| # return { | |
| # "found": len(found_violations) > 0, | |
| # "violations": found_violations, | |
| # "suggestions": suggestions | |
| # } | |
| # def _check_tone(self, text: str) -> Dict[str, Any]: | |
| # """Check the tone and sentiment of the response""" | |
| # issues = [] | |
| # suggestions = [] | |
| # # Check sentiment | |
| # try: | |
| # sentiment_result = self.sentiment_analyzer(text[:512])[0] # Limit length for model | |
| # sentiment_score = sentiment_result['score'] | |
| # sentiment_label = sentiment_result['label'] | |
| # # Check if too negative | |
| # if '1' in sentiment_label or '2' in sentiment_label: # 1-2 stars = negative | |
| # issues.append("Response tone is too negative") | |
| # suggestions.append("Add more supportive and hopeful language") | |
| # except: | |
| # pass | |
| # # Check for negative words | |
| # text_lower = text.lower() | |
| # found_negative = [word for word in self.negative_tone_words if word in text_lower] | |
| # if found_negative: | |
| # issues.append(f"Contains negative/judgmental language: {', '.join(found_negative)}") | |
| # suggestions.append("Replace judgmental terms with supportive language") | |
| # # Check for dismissive phrases | |
| # found_dismissive = [phrase for phrase in self.dismissive_phrases if phrase in text_lower] | |
| # if found_dismissive: | |
| # issues.append("Contains dismissive language") | |
| # suggestions.append("Acknowledge and validate the person's feelings instead") | |
| # return { | |
| # "appropriate": len(issues) == 0, | |
| # "issues": issues, | |
| # "suggestions": suggestions | |
| # } | |
| # def _check_supportive_elements(self, text: str) -> Dict[str, Any]: | |
| # """Check for presence of supportive elements""" | |
| # text_lower = text.lower() | |
| # missing_elements = [] | |
| # suggestions = [] | |
| # element_scores = {} | |
| # for element, keywords in self.supportive_elements.items(): | |
| # found = any(keyword in text_lower for keyword in keywords) | |
| # element_scores[element] = found | |
| # if not found: | |
| # missing_elements.append(element) | |
| # # Generate suggestions for missing elements | |
| # if 'empathy' in missing_elements: | |
| # suggestions.append("Add empathetic language like 'I understand how difficult this must be'") | |
| # if 'validation' in missing_elements: | |
| # suggestions.append("Validate their feelings with phrases like 'Your feelings are completely valid'") | |
| # if 'support' in missing_elements: | |
| # suggestions.append("Express support with 'I'm here to support you through this'") | |
| # if 'hope' in missing_elements: | |
| # suggestions.append("Include hopeful elements about growth and positive change") | |
| # if 'empowerment' in missing_elements: | |
| # suggestions.append("Emphasize their agency and ability to make choices") | |
| # return { | |
| # "missing": missing_elements, | |
| # "present": [k for k, v in element_scores.items() if v], | |
| # "suggestions": suggestions | |
| # } | |
| # def _check_crisis_indicators(self, text: str) -> Dict[str, Any]: | |
| # """Check for crisis indicators in text""" | |
| # for pattern in self.crisis_indicators: | |
| # if re.search(pattern, text, re.IGNORECASE): | |
| # return { | |
| # "is_crisis": True, | |
| # "pattern_matched": pattern, | |
| # "action": "Immediate crisis response needed" | |
| # } | |
| # return {"is_crisis": False} | |
| # def _calculate_confidence(self, issues: List[str], warnings: List[str]) -> float: | |
| # """Calculate confidence score for validation""" | |
| # if issues: | |
| # return 0.3 - (0.1 * len(issues)) # Major issues severely impact confidence | |
| # confidence = 1.0 | |
| # confidence -= 0.1 * len(warnings) # Each warning reduces confidence | |
| # return max(0.0, confidence) | |
| # def _refine_response(self, response: str, issues: List[str], warnings: List[str], suggestions: List[str]) -> str: | |
| # """Attempt to refine the response based on issues found""" | |
| # refined = response | |
| # # Add disclaimer for professional advice | |
| # if any('advice' in issue for issue in issues): | |
| # disclaimer = "\n\n*Please note: I'm here to provide support and guidance, but for specific professional matters, it's important to consult with qualified professionals.*" | |
| # if disclaimer not in refined: | |
| # refined += disclaimer | |
| # # Add crisis resources if needed | |
| # if any('crisis' in warning for warning in warnings): | |
| # crisis_text = "\n\n**If you're in crisis, please reach out for immediate help:**\n- Crisis Hotline: 988 (US)\n- Crisis Text Line: Text HOME to 741741\n- International: findahelpline.com" | |
| # if crisis_text not in refined: | |
| # refined += crisis_text | |
| # # Add supportive closing if missing hope | |
| # if any('hope' in warning for warning in warnings): | |
| # hopeful_closing = "\n\nRemember, you have the strength to navigate this challenge, and positive change is possible. I'm here to support you on this journey." | |
| # if not any(phrase in refined.lower() for phrase in ['journey', 'strength', 'possible']): | |
| # refined += hopeful_closing | |
| # return refined | |
| # def validate_user_input(self, text: str) -> ValidationResult: | |
| # """Validate user input for safety and process-ability""" | |
| # issues = [] | |
| # warnings = [] | |
| # suggestions = [] | |
| # # Check if empty | |
| # if not text or not text.strip(): | |
| # issues.append("Empty input received") | |
| # suggestions.append("Please share what's on your mind") | |
| # return ValidationResult(False, issues, warnings, suggestions, 0.0) | |
| # # Check length | |
| # if len(text) > 5000: | |
| # warnings.append("Input is very long") | |
| # suggestions.append("Consider breaking this into smaller parts") | |
| # # Check for crisis indicators | |
| # crisis_check = self._check_crisis_indicators(text) | |
| # if crisis_check["is_crisis"]: | |
| # warnings.append("Crisis indicators detected") | |
| # suggestions.append("Prioritize safety and provide crisis resources") | |
| # # Check for spam/repetition | |
| # if self._is_spam(text): | |
| # issues.append("Input appears to be spam or repetitive") | |
| # suggestions.append("Please share genuine thoughts or concerns") | |
| # confidence = self._calculate_confidence(issues, warnings) | |
| # return ValidationResult( | |
| # is_valid=len(issues) == 0, | |
| # issues=issues, | |
| # warnings=warnings, | |
| # suggestions=suggestions, | |
| # confidence=confidence | |
| # ) | |
| # def _is_spam(self, text: str) -> bool: | |
| # """Simple spam detection""" | |
| # # Check for excessive repetition | |
| # words = text.lower().split() | |
| # if len(words) > 10: | |
| # unique_ratio = len(set(words)) / len(words) | |
| # if unique_ratio < 0.3: # Less than 30% unique words | |
| # return True | |
| # # Check for common spam patterns | |
| # spam_patterns = [ | |
| # r'(?:buy|sell|click|visit)\s+(?:now|here|this)', | |
| # r'(?:congratulations|winner|prize|lottery)', | |
| # r'(?:viagra|pills|drugs|pharmacy)', | |
| # r'(?:$$|money\s+back|guarantee)' | |
| # ] | |
| # for pattern in spam_patterns: | |
| # if re.search(pattern, text, re.IGNORECASE): | |
| # return True | |
| # return False | |
| # def get_crisis_resources(self, location: str = "global") -> Dict[str, Any]: | |
| # """Get crisis resources based on location""" | |
| # resources = { | |
| # "global": { | |
| # "name": "International Association for Suicide Prevention", | |
| # "url": "https://www.iasp.info/resources/Crisis_Centres/", | |
| # "text": "Find crisis centers worldwide" | |
| # }, | |
| # "us": { | |
| # "name": "988 Suicide & Crisis Lifeline", | |
| # "phone": "988", | |
| # "text": "Text HOME to 741741", | |
| # "url": "https://988lifeline.org/" | |
| # }, | |
| # "uk": { | |
| # "name": "Samaritans", | |
| # "phone": "116 123", | |
| # "email": "jo@samaritans.org", | |
| # "url": "https://www.samaritans.org/" | |
| # }, | |
| # "india": { | |
| # "name": "National Suicide Prevention Helpline", | |
| # "phone": "91-9820466726", | |
| # "additional": "Vandrevala Foundation: 9999666555" | |
| # }, | |
| # "australia": { | |
| # "name": "Lifeline", | |
| # "phone": "13 11 14", | |
| # "text": "Text 0477 13 11 14", | |
| # "url": "https://www.lifeline.org.au/" | |
| # } | |
| # } | |
| # return resources.get(location.lower(), resources["global"]) | |
| #from .base_tool import BaseTool | |
| #from crewai_tools import BaseTool | |
| class ValidateResponseTool(BaseTool): | |
| name: str = "validate_response" | |
| description: str = "Validates safety and helpfulness." | |
| model_config = {"arbitrary_types_allowed": True} | |
| _config: object = PrivateAttr() | |
| def __init__(self, config=None, **data): | |
| super().__init__(**data) | |
| self._config = config | |
| # ... any required initialization ... | |
| def _run(self, response: str, context: dict = None): | |
| # Place your actual validation logic here, include dummy for illustration | |
| # For full validation logic, use your own code! | |
| # """Result of validation check""" | |
| is_valid: bool | |
| issues: List[str] | |
| warnings: List[str] | |
| suggestions: List[str] | |
| confidence: float | |
| refined_text: Optional[str] = None | |
| return {"is_valid", "issues", "warnings", "suggestions","confidence","refined_text"} | |
| class ValidationTools: | |
| #_model: ValidateResponseTool = PrivateAttr() | |
| def __init__(self, config=None): | |
| self._validate_response = ValidateResponseTool(config) | |
| # Add more tools as needed (check_safety, refine_response, etc.) | |
| # # Initialize sentiment analyzer for tone checking | |
| self.sentiment_analyzer = pipeline( | |
| "sentiment-analysis", | |
| model="nlptown/bert-base-multilingual-uncased-sentiment", | |
| device=0 if torch.cuda.is_available() else -1 | |
| ) | |
| # Prohibited patterns for different categories | |
| self.prohibited_patterns = { | |
| 'medical': [ | |
| r'\b(?:diagnos|prescrib|medicat|cure|treat|therap)\w*\b', | |
| r'\b(?:disease|illness|disorder|syndrome)\s+(?:is|are|can be)\b', | |
| r'\b(?:take|consume|dose|dosage)\s+\d+\s*(?:mg|ml|pill|tablet)', | |
| r'\b(?:medical|clinical|physician|doctor)\s+(?:advice|consultation|opinion)', | |
| ], | |
| 'legal': [ | |
| r'\b(?:legal advice|lawsuit|sue|court|litigation)\b', | |
| r'\b(?:illegal|unlawful|crime|criminal|prosecut)\w*\b', | |
| r'\b(?:you should|must|have to)\s+(?:sign|agree|consent|contract)', | |
| r'\b(?:rights|obligations|liability|damages)\s+(?:are|include)\b', | |
| ], | |
| 'financial': [ | |
| r'\b(?:invest|buy|sell|trade)\s+(?:stock|crypto|bitcoin|forex)\b', | |
| r'\b(?:guaranteed|promise)\s+(?:return|profit|income|earnings)\b', | |
| r'\b(?:financial advisor|investment advice|trading strategy)\b', | |
| r'\b(?:tax|accounting|financial planning)\s+(?:advice|consultation)', | |
| ], | |
| 'harmful': [ | |
| r'\b(?:suicide|suicidal|kill\s+(?:your|my)self|end\s+(?:it|life))\b', | |
| r'\b(?:self[\-\s]?harm|hurt\s+(?:your|my)self|cutting)\b', | |
| r'\b(?:violence|violent|weapon|attack|assault)\b', | |
| r'\b(?:hate|discriminat|racist|sexist|homophobic)\b', | |
| ], | |
| 'absolute': [ | |
| r'\b(?:always|never|every|all|none|no one|everyone)\s+(?:will|must|should|is|are)\b', | |
| r'\b(?:definitely|certainly|guaranteed|assured|promise)\b', | |
| r'\b(?:only way|only solution|must do|have to)\b', | |
| ] | |
| } | |
| # Required elements for supportive responses | |
| self.supportive_elements = { | |
| 'empathy': [ | |
| 'understand', 'hear', 'feel', 'acknowledge', 'recognize', | |
| 'appreciate', 'empathize', 'relate', 'comprehend' | |
| ], | |
| 'validation': [ | |
| 'valid', 'normal', 'understandable', 'natural', 'okay', | |
| 'reasonable', 'makes sense', 'legitimate' | |
| ], | |
| 'support': [ | |
| 'support', 'help', 'here for you', 'together', 'alongside', | |
| 'assist', 'guide', 'accompany', 'with you' | |
| ], | |
| 'hope': [ | |
| 'can', 'possible', 'able', 'capable', 'potential', | |
| 'opportunity', 'growth', 'improve', 'better', 'progress' | |
| ], | |
| 'empowerment': [ | |
| 'choice', 'decide', 'control', 'power', 'strength', | |
| 'agency', 'capable', 'resource', 'ability' | |
| ] | |
| } | |
| # Crisis indicators | |
| self.crisis_indicators = [ | |
| r'\b(?:want|going|plan)\s+to\s+(?:die|kill|end)\b', | |
| r'\b(?:no reason|point|hope)\s+(?:to|in)\s+(?:live|living|life)\b', | |
| r'\b(?:better off|world)\s+without\s+me\b', | |
| r'\bsuicide\s+(?:plan|method|attempt)\b', | |
| r'\b(?:final|last)\s+(?:goodbye|letter|message)\b' | |
| ] | |
| # Tone indicators | |
| self.negative_tone_words = [ | |
| 'stupid', 'idiot', 'dumb', 'pathetic', 'worthless', | |
| 'loser', 'failure', 'weak', 'incompetent', 'useless' | |
| ] | |
| self.dismissive_phrases = [ | |
| 'just get over it', 'stop complaining', 'not a big deal', | |
| 'being dramatic', 'overreacting', 'too sensitive' | |
| ] | |
| def validate_response(self, response: str, context: Dict[str, Any] = None) -> ValidationResult: | |
| """Comprehensive validation of response""" | |
| issues = [] | |
| warnings = [] | |
| suggestions = [] | |
| # Check for prohibited content | |
| prohibited_check = self._check_prohibited_content(response) | |
| if prohibited_check["found"]: | |
| issues.extend(prohibited_check["violations"]) | |
| suggestions.extend(prohibited_check["suggestions"]) | |
| # Check tone and sentiment | |
| tone_check = self._check_tone(response) | |
| if not tone_check["appropriate"]: | |
| warnings.extend(tone_check["issues"]) | |
| suggestions.extend(tone_check["suggestions"]) | |
| # Check for supportive elements | |
| support_check = self._check_supportive_elements(response) | |
| if support_check["missing"]: | |
| warnings.append(f"Missing supportive elements: {', '.join(support_check['missing'])}") | |
| suggestions.extend(support_check["suggestions"]) | |
| # Check for crisis content in context | |
| if context and context.get("user_input"): | |
| crisis_check = self._check_crisis_indicators(context["user_input"]) | |
| if crisis_check["is_crisis"] and "crisis" not in response.lower(): | |
| warnings.append("User may be in crisis but response doesn't address this") | |
| suggestions.append("Include crisis resources and immediate support options") | |
| # Calculate overall confidence | |
| confidence = self._calculate_confidence(issues, warnings) | |
| # Generate refined response if needed | |
| refined_text = None | |
| if issues or (warnings and confidence < 0.7): | |
| refined_text = self._refine_response(response, issues, warnings, suggestions) | |
| return ValidationResult( | |
| is_valid=len(issues) == 0, | |
| issues=issues, | |
| warnings=warnings, | |
| suggestions=suggestions, | |
| confidence=confidence, | |
| refined_text=refined_text | |
| ) | |
| def _check_prohibited_content(self, text: str) -> Dict[str, Any]: | |
| """Check for prohibited content patterns""" | |
| found_violations = [] | |
| suggestions = [] | |
| for category, patterns in self.prohibited_patterns.items(): | |
| for pattern in patterns: | |
| if re.search(pattern, text, re.IGNORECASE): | |
| found_violations.append(f"Contains {category} advice/content") | |
| # Add specific suggestions | |
| if category == "medical": | |
| suggestions.append("Replace with: 'Consider speaking with a healthcare professional'") | |
| elif category == "legal": | |
| suggestions.append("Replace with: 'For legal matters, consult with a qualified attorney'") | |
| elif category == "financial": | |
| suggestions.append("Replace with: 'For financial decisions, consider consulting a financial advisor'") | |
| elif category == "harmful": | |
| suggestions.append("Include crisis resources and express immediate concern for safety") | |
| elif category == "absolute": | |
| suggestions.append("Use qualifying language like 'often', 'might', 'could' instead of absolutes") | |
| break | |
| return { | |
| "found": len(found_violations) > 0, | |
| "violations": found_violations, | |
| "suggestions": suggestions | |
| } | |
| def _check_tone(self, text: str) -> Dict[str, Any]: | |
| """Check the tone and sentiment of the response""" | |
| issues = [] | |
| suggestions = [] | |
| # Check sentiment | |
| try: | |
| sentiment_result = self.sentiment_analyzer(text[:512])[0] # Limit length for model | |
| sentiment_score = sentiment_result['score'] | |
| sentiment_label = sentiment_result['label'] | |
| # Check if too negative | |
| if '1' in sentiment_label or '2' in sentiment_label: # 1-2 stars = negative | |
| issues.append("Response tone is too negative") | |
| suggestions.append("Add more supportive and hopeful language") | |
| except: | |
| pass | |
| # Check for negative words | |
| text_lower = text.lower() | |
| found_negative = [word for word in self.negative_tone_words if word in text_lower] | |
| if found_negative: | |
| issues.append(f"Contains negative/judgmental language: {', '.join(found_negative)}") | |
| suggestions.append("Replace judgmental terms with supportive language") | |
| # Check for dismissive phrases | |
| found_dismissive = [phrase for phrase in self.dismissive_phrases if phrase in text_lower] | |
| if found_dismissive: | |
| issues.append("Contains dismissive language") | |
| suggestions.append("Acknowledge and validate the person's feelings instead") | |
| return { | |
| "appropriate": len(issues) == 0, | |
| "issues": issues, | |
| "suggestions": suggestions | |
| } | |
| def _check_supportive_elements(self, text: str) -> Dict[str, Any]: | |
| """Check for presence of supportive elements""" | |
| text_lower = text.lower() | |
| missing_elements = [] | |
| suggestions = [] | |
| element_scores = {} | |
| for element, keywords in self.supportive_elements.items(): | |
| found = any(keyword in text_lower for keyword in keywords) | |
| element_scores[element] = found | |
| if not found: | |
| missing_elements.append(element) | |
| # Generate suggestions for missing elements | |
| if 'empathy' in missing_elements: | |
| suggestions.append("Add empathetic language like 'I understand how difficult this must be'") | |
| if 'validation' in missing_elements: | |
| suggestions.append("Validate their feelings with phrases like 'Your feelings are completely valid'") | |
| if 'support' in missing_elements: | |
| suggestions.append("Express support with 'I'm here to support you through this'") | |
| if 'hope' in missing_elements: | |
| suggestions.append("Include hopeful elements about growth and positive change") | |
| if 'empowerment' in missing_elements: | |
| suggestions.append("Emphasize their agency and ability to make choices") | |
| return { | |
| "missing": missing_elements, | |
| "present": [k for k, v in element_scores.items() if v], | |
| "suggestions": suggestions | |
| } | |
| def _check_crisis_indicators(self, text: str) -> Dict[str, Any]: | |
| """Check for crisis indicators in text""" | |
| for pattern in self.crisis_indicators: | |
| if re.search(pattern, text, re.IGNORECASE): | |
| return { | |
| "is_crisis": True, | |
| "pattern_matched": pattern, | |
| "action": "Immediate crisis response needed" | |
| } | |
| return {"is_crisis": False} | |
| def _calculate_confidence(self, issues: List[str], warnings: List[str]) -> float: | |
| """Calculate confidence score for validation""" | |
| if issues: | |
| return 0.3 - (0.1 * len(issues)) # Major issues severely impact confidence | |
| confidence = 1.0 | |
| confidence -= 0.1 * len(warnings) # Each warning reduces confidence | |
| return max(0.0, confidence) | |
| def _refine_response(self, response: str, issues: List[str], warnings: List[str], suggestions: List[str]) -> str: | |
| """Attempt to refine the response based on issues found""" | |
| refined = response | |
| # Add disclaimer for professional advice | |
| if any('advice' in issue for issue in issues): | |
| disclaimer = "\n\n*Please note: I'm here to provide support and guidance, but for specific professional matters, it's important to consult with qualified professionals.*" | |
| if disclaimer not in refined: | |
| refined += disclaimer | |
| # Add crisis resources if needed | |
| if any('crisis' in warning for warning in warnings): | |
| crisis_text = "\n\n**If you're in crisis, please reach out for immediate help:**\n- Crisis Hotline: 988 (US)\n- Crisis Text Line: Text HOME to 741741\n- International: findahelpline.com" | |
| if crisis_text not in refined: | |
| refined += crisis_text | |
| # Add supportive closing if missing hope | |
| if any('hope' in warning for warning in warnings): | |
| hopeful_closing = "\n\nRemember, you have the strength to navigate this challenge, and positive change is possible. I'm here to support you on this journey." | |
| if not any(phrase in refined.lower() for phrase in ['journey', 'strength', 'possible']): | |
| refined += hopeful_closing | |
| return refined | |
| def validate_user_input(self, text: str) -> ValidationResult: | |
| """Validate user input for safety and process-ability""" | |
| issues = [] | |
| warnings = [] | |
| suggestions = [] | |
| # Check if empty | |
| if not text or not text.strip(): | |
| issues.append("Empty input received") | |
| suggestions.append("Please share what's on your mind") | |
| return ValidationResult(False, issues, warnings, suggestions, 0.0) | |
| # Check length | |
| if len(text) > 5000: | |
| warnings.append("Input is very long") | |
| suggestions.append("Consider breaking this into smaller parts") | |
| # Check for crisis indicators | |
| crisis_check = self._check_crisis_indicators(text) | |
| if crisis_check["is_crisis"]: | |
| warnings.append("Crisis indicators detected") | |
| suggestions.append("Prioritize safety and provide crisis resources") | |
| # Check for spam/repetition | |
| if self._is_spam(text): | |
| issues.append("Input appears to be spam or repetitive") | |
| suggestions.append("Please share genuine thoughts or concerns") | |
| confidence = self._calculate_confidence(issues, warnings) | |
| return ValidationResult( | |
| is_valid=len(issues) == 0, | |
| issues=issues, | |
| warnings=warnings, | |
| suggestions=suggestions, | |
| confidence=confidence | |
| ) | |
| def _is_spam(self, text: str) -> bool: | |
| """Simple spam detection""" | |
| # Check for excessive repetition | |
| words = text.lower().split() | |
| if len(words) > 10: | |
| unique_ratio = len(set(words)) / len(words) | |
| if unique_ratio < 0.3: # Less than 30% unique words | |
| return True | |
| # Check for common spam patterns | |
| spam_patterns = [ | |
| r'(?:buy|sell|click|visit)\s+(?:now|here|this)', | |
| r'(?:congratulations|winner|prize|lottery)', | |
| r'(?:viagra|pills|drugs|pharmacy)', | |
| r'(?:$$|money\s+back|guarantee)' | |
| ] | |
| for pattern in spam_patterns: | |
| if re.search(pattern, text, re.IGNORECASE): | |
| return True | |
| return False | |
| def get_crisis_resources(self, location: str = "global") -> Dict[str, Any]: | |
| """Get crisis resources based on location""" | |
| resources = { | |
| "global": { | |
| "name": "International Association for Suicide Prevention", | |
| "url": "https://www.iasp.info/resources/Crisis_Centres/", | |
| "text": "Find crisis centers worldwide" | |
| }, | |
| "us": { | |
| "name": "988 Suicide & Crisis Lifeline", | |
| "phone": "988", | |
| "text": "Text HOME to 741741", | |
| "url": "https://988lifeline.org/" | |
| }, | |
| "uk": { | |
| "name": "Samaritans", | |
| "phone": "116 123", | |
| "email": "jo@samaritans.org", | |
| "url": "https://www.samaritans.org/" | |
| }, | |
| "india": { | |
| "name": "National Suicide Prevention Helpline", | |
| "phone": "91-9820466726", | |
| "additional": "Vandrevala Foundation: 9999666555" | |
| }, | |
| "australia": { | |
| "name": "Lifeline", | |
| "phone": "13 11 14", | |
| "text": "Text 0477 13 11 14", | |
| "url": "https://www.lifeline.org.au/" | |
| } | |
| } | |
| return resources.get(location.lower(), resources["global"]) |