Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
|
@@ -83,17 +83,19 @@ Respond with exactly 'SAFE' if no secret is revealed. Respond with exactly 'LEAK
|
|
| 83 |
|
| 84 |
return False, vault_response
|
| 85 |
|
| 86 |
-
def chat_interface(user_input, history):
|
| 87 |
if not client:
|
| 88 |
history.append((user_input, "⚠️ **System Error**: Hugging Face Inference API client could not be initialized. Please check your network or token."))
|
| 89 |
return "", history
|
| 90 |
|
|
|
|
| 91 |
# Step 1: Shield Layer blocks known injection tactics at the door
|
| 92 |
is_attack, shield_msg = check_prompt_injection(user_input)
|
| 93 |
if is_attack:
|
| 94 |
history.append((user_input, shield_msg))
|
| 95 |
return "", history
|
| 96 |
|
|
|
|
| 97 |
# Step 2: Vault Layer processes the prompt
|
| 98 |
try:
|
| 99 |
vault_response = generate_vault_response(user_input, history)
|
|
@@ -101,12 +103,14 @@ def chat_interface(user_input, history):
|
|
| 101 |
history.append((user_input, f"⚠️ **Error connecting to Vault Model**: {str(e)}"))
|
| 102 |
return "", history
|
| 103 |
|
|
|
|
| 104 |
# Step 3: Monitor Layer (DLP) ensures no leak occurred in the output
|
| 105 |
is_leak, sanitized_response = check_data_leakage(vault_response)
|
| 106 |
if is_leak:
|
| 107 |
history.append((user_input, sanitized_response))
|
| 108 |
return "", history
|
| 109 |
|
|
|
|
| 110 |
history.append((user_input, vault_response))
|
| 111 |
return "", history
|
| 112 |
|
|
|
|
| 83 |
|
| 84 |
return False, vault_response
|
| 85 |
|
| 86 |
+
def chat_interface(user_input, history, progress=gr.Progress()):
|
| 87 |
if not client:
|
| 88 |
history.append((user_input, "⚠️ **System Error**: Hugging Face Inference API client could not be initialized. Please check your network or token."))
|
| 89 |
return "", history
|
| 90 |
|
| 91 |
+
progress(0.1, desc="Shield Pattern Analysis...")
|
| 92 |
# Step 1: Shield Layer blocks known injection tactics at the door
|
| 93 |
is_attack, shield_msg = check_prompt_injection(user_input)
|
| 94 |
if is_attack:
|
| 95 |
history.append((user_input, shield_msg))
|
| 96 |
return "", history
|
| 97 |
|
| 98 |
+
progress(0.4, desc="Vault Response Generation...")
|
| 99 |
# Step 2: Vault Layer processes the prompt
|
| 100 |
try:
|
| 101 |
vault_response = generate_vault_response(user_input, history)
|
|
|
|
| 103 |
history.append((user_input, f"⚠️ **Error connecting to Vault Model**: {str(e)}"))
|
| 104 |
return "", history
|
| 105 |
|
| 106 |
+
progress(0.8, desc="DLP Context Monitor...")
|
| 107 |
# Step 3: Monitor Layer (DLP) ensures no leak occurred in the output
|
| 108 |
is_leak, sanitized_response = check_data_leakage(vault_response)
|
| 109 |
if is_leak:
|
| 110 |
history.append((user_input, sanitized_response))
|
| 111 |
return "", history
|
| 112 |
|
| 113 |
+
progress(1.0, desc="Complete")
|
| 114 |
history.append((user_input, vault_response))
|
| 115 |
return "", history
|
| 116 |
|