| # llm_guard was tested for PII removal and general prompt validation. However, | |
| # llm_guard is very slow. It was replaced by Presidio. | |
| # from llm_guard import scan_prompt | |
| # from llm_guard.input_scanners import Anonymize, PromptInjection, TokenLimit, Toxicity | |
| # from llm_guard.vault import Vault | |
| # class GuardrailManager: | |
| # def __init__(self, is_champ: bool) -> None: | |
| # self.vault = Vault() | |
| # self.scanners = ( | |
| # [ | |
| # Anonymize(self.vault), | |
| # PromptInjection(), | |
| # TokenLimit( | |
| # limit=10_000 | |
| # ), # TODO: Confirm that this token limit is appropriate | |
| # Toxicity(), | |
| # ] | |
| # if is_champ | |
| # else [Anonymize(self.vault)] | |
| # ) | |
| # self.is_champ = is_champ | |
| # def sanitize(self, text: str) -> str: | |
| # sanitized_text, is_valid, _ = scan_prompt(self.scanners, text) | |
| # if not all(is_valid.values()): | |
| # # TODO: What should we do here? | |
| # pass | |
| # return sanitized_text | |