Spaces:
Sleeping
Sleeping
File size: 5,562 Bytes
b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 a2e1879 b5386e2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# main.py
import os
# Disable TensorFlow oneDNN warnings
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
# Disable torch compile warnings and optimizations for CPU-only devices
os.environ["TORCH_COMPILE_DISABLE"] = "1"
os.environ["TORCHDYNAMO_DISABLE"] = "1"
import sys
import time
from backend import Backend
import config
def run_output_test_mode():
"""
Runs the application in output testing mode for testing modular output guardrails.
"""
print("\n\n" + "=" * 60)
print("\n--- OUTPUT GUARDRAIL TESTING MODE ---")
print("π This mode allows you to test modular output guardrails with manual input")
print(" You can enter both a prompt and the LLM's response to test filtering")
print("=" * 60)
try:
# Initialize backend in output test mode
app_backend = Backend(output_test_mode=True)
except Exception as e:
print(f"\nβ Error initializing output testing backend: {e}")
print(" Make sure you have the presidio libraries installed for PII detection.")
sys.exit(1)
while True:
try:
print(f"\n{'='*60}")
print("π OUTPUT GUARDRAIL TEST")
print(f"{'='*60}")
# Get prompt from user
prompt = input("\nπ Enter the input prompt (or 'exit' to quit): ")
if prompt.lower() in ["exit", "quit"]:
print("\nπ Exiting output test mode. Goodbye!")
break
# Get manual output from user
print("\nπ€ Enter the LLM output you want to test:")
print("(Press Enter twice to finish your input)\n")
lines = []
empty_line_count = 0
while True:
line = input()
if line == "":
empty_line_count += 1
if empty_line_count >= 2:
break
lines.append(line)
else:
empty_line_count = 0
lines.append(line)
manual_output = "\n".join(lines).strip()
if not manual_output:
print("β No output provided. Please try again.")
continue
print(f"\nβ
Testing output ({len(manual_output)} characters) against modular guardrails...\n")
# Test the output against guardrails
processed_output, is_safe = app_backend.test_output_guardrails(prompt, manual_output)
print(f"\n{'='*60}")
print("π GUARDRAIL TEST RESULTS")
print(f"{'='*60}")
if is_safe:
print("β
Result: OUTPUT APPROVED")
print("\nπ Final output after guardrail processing:")
print(f"'{processed_output}'")
if processed_output != manual_output:
print(f"\nβ οΈ Note: Output was modified by guardrails")
print(f" Original length: {len(manual_output)} characters")
print(f" Modified length: {len(processed_output)} characters")
else:
print("π Result: OUTPUT BLOCKED")
print(f"\nβ Reason: {processed_output}")
except KeyboardInterrupt:
print("\nπ Exiting output test mode. Goodbye!")
break
except Exception as e:
print(f"\n\nβ An error occurred: {e}")
def run_interactive_mode(app_backend: Backend):
"""
Runs the application in interactive mode, accepting user input.
"""
print("\n\n" + "=" * 60)
print("\n--- INTERACTIVE MODE ---")
print("π AI Detection: Finetuned model will scan all prompts for attacks")
print("Enter your prompt below. Type 'exit' or 'quit' to end the session.")
print("=" * 60)
while True:
try:
prompt = input("\nπ€ You: ")
if prompt.lower() in ["exit", "quit"]:
print("\nπ Exiting interactive mode. Goodbye!")
break
response_stream, is_safe, processed_prompt = app_backend.process_request(
prompt, stream=True
)
if not is_safe:
print(f" π System: {response_stream}")
continue
print("\nπ€ Chatbot (streaming): ", end="")
full_response = ""
for chunk in response_stream:
full_response += chunk
print(chunk, end="", flush=True)
time.sleep(0.05)
print() # For the newline
except KeyboardInterrupt:
print("\nπ Exiting interactive mode. Goodbye!")
break
except Exception as e:
print(f"\n\nβ An error occurred: {e}")
def main():
"""
Main entry point. Initializes the backend and runs in the configured mode.
"""
# Check if we should run in output testing mode
if len(sys.argv) > 1 and sys.argv[1] == "output_test":
run_output_test_mode()
return
print("=" * 60)
print(" Guardrails System")
print(" π AI-powered attack detection with finetuned model")
print("=" * 60)
try:
app_backend = Backend()
except Exception as e:
print(f"\nβ Error initializing backend: {e}")
print(" Make sure you have the transformers library installed for AI Detection Mode.")
sys.exit(1)
run_interactive_mode(app_backend)
if __name__ == "__main__":
main() |