Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ import re
|
|
| 6 |
import time
|
| 7 |
import requests
|
| 8 |
import gradio as gr
|
| 9 |
-
from typing import Tuple
|
| 10 |
|
| 11 |
OPENAI_API_URL = "https://api.openai.com/v1/responses"
|
| 12 |
OPENAI_MODEL_FALLBACK = ["gpt-4o-mini", "gpt-4o", "gpt-5-mini"]
|
|
@@ -25,13 +25,30 @@ Contenido a analizar:
|
|
| 25 |
{input}
|
| 26 |
"""
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
r"\bsqlmap\b", r"\\x", r"0x[0-9a-fA-F]{2,}", r"base64 -d", r"\\b\\$\\(", r"\\$\\{"
|
| 33 |
]
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
def call_openai_responses(prompt: str, api_key: str, models=None, timeout: int = 20) -> Tuple[bool, str]:
|
| 37 |
if models is None:
|
|
@@ -49,7 +66,7 @@ def call_openai_responses(prompt: str, api_key: str, models=None, timeout: int =
|
|
| 49 |
out = ""
|
| 50 |
if "output" in j:
|
| 51 |
if isinstance(j["output"], list):
|
| 52 |
-
parts = []
|
| 53 |
for item in j["output"]:
|
| 54 |
if isinstance(item, dict):
|
| 55 |
c = item.get("content") or item.get("text") or item.get("output_text")
|
|
@@ -89,11 +106,6 @@ def call_openai_responses(prompt: str, api_key: str, models=None, timeout: int =
|
|
| 89 |
return False, f"HTTP {r.status_code}: {msg}"
|
| 90 |
return False, "Ningún modelo disponible o permitido en la cuenta de OpenAI."
|
| 91 |
|
| 92 |
-
def contains_forbidden(text: str) -> bool:
|
| 93 |
-
if not text:
|
| 94 |
-
return False
|
| 95 |
-
return bool(FORBIDDEN_REGEX.search(text))
|
| 96 |
-
|
| 97 |
def safe_parse_json_from_model(text: str):
|
| 98 |
try:
|
| 99 |
return json.loads(text)
|
|
@@ -183,7 +195,7 @@ def generate_report(json_str: str, title: str = "Reporte Red Team") -> Tuple[str
|
|
| 183 |
return filename, filename
|
| 184 |
|
| 185 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 186 |
-
gr.Markdown("## 🧯
|
| 187 |
with gr.Row():
|
| 188 |
with gr.Column(scale=7):
|
| 189 |
inp = gr.Textbox(label="Pega aquí el correo RAW, URL o fragmento a analizar", lines=20, placeholder="Pega cabeceras, cuerpo o URL completa")
|
|
|
|
| 6 |
import time
|
| 7 |
import requests
|
| 8 |
import gradio as gr
|
| 9 |
+
from typing import Tuple, List
|
| 10 |
|
| 11 |
OPENAI_API_URL = "https://api.openai.com/v1/responses"
|
| 12 |
OPENAI_MODEL_FALLBACK = ["gpt-4o-mini", "gpt-4o", "gpt-5-mini"]
|
|
|
|
| 25 |
{input}
|
| 26 |
"""
|
| 27 |
|
| 28 |
+
# --------- Filtro seguro (sin regex unificado para evitar errores) ----------
|
| 29 |
+
FORBIDDEN_KEYWORDS = [
|
| 30 |
+
"exploit", "payload", "meterpreter", "msfconsole",
|
| 31 |
+
"reverse shell", "sqlmap", "chmod", "chown", "exec "
|
|
|
|
| 32 |
]
|
| 33 |
+
FORBIDDEN_CMDLIKE = ["curl ", "wget ", "sudo ", "bash -i", "nc ", "ncat ", "rm -rf"]
|
| 34 |
+
FORBIDDEN_SUBSTR = ["$(", "${", "\x", "base64 -d"]
|
| 35 |
+
FORBIDDEN_HEX_RE = re.compile(r"0x[0-9a-fA-F]{2,}") # compilar por separado, simple y seguro
|
| 36 |
+
|
| 37 |
+
def contains_forbidden(text: str) -> bool:
|
| 38 |
+
if not text:
|
| 39 |
+
return False
|
| 40 |
+
t = text.lower()
|
| 41 |
+
if any(k in t for k in FORBIDDEN_KEYWORDS):
|
| 42 |
+
return True
|
| 43 |
+
if any(k in t for k in FORBIDDEN_CMDLIKE):
|
| 44 |
+
return True
|
| 45 |
+
if any(s in text for s in FORBIDDEN_SUBSTR): # mantener mayúsculas para secuencias
|
| 46 |
+
return True
|
| 47 |
+
if FORBIDDEN_HEX_RE.search(text):
|
| 48 |
+
return True
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
# ---------------------------------------------------------------------------
|
| 52 |
|
| 53 |
def call_openai_responses(prompt: str, api_key: str, models=None, timeout: int = 20) -> Tuple[bool, str]:
|
| 54 |
if models is None:
|
|
|
|
| 66 |
out = ""
|
| 67 |
if "output" in j:
|
| 68 |
if isinstance(j["output"], list):
|
| 69 |
+
parts: List[str] = []
|
| 70 |
for item in j["output"]:
|
| 71 |
if isinstance(item, dict):
|
| 72 |
c = item.get("content") or item.get("text") or item.get("output_text")
|
|
|
|
| 106 |
return False, f"HTTP {r.status_code}: {msg}"
|
| 107 |
return False, "Ningún modelo disponible o permitido en la cuenta de OpenAI."
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
def safe_parse_json_from_model(text: str):
|
| 110 |
try:
|
| 111 |
return json.loads(text)
|
|
|
|
| 195 |
return filename, filename
|
| 196 |
|
| 197 |
with gr.Blocks(analytics_enabled=False) as demo:
|
| 198 |
+
gr.Markdown("## 🧯 Simulación Red Team (alto nivel) — Defender con IA")
|
| 199 |
with gr.Row():
|
| 200 |
with gr.Column(scale=7):
|
| 201 |
inp = gr.Textbox(label="Pega aquí el correo RAW, URL o fragmento a analizar", lines=20, placeholder="Pega cabeceras, cuerpo o URL completa")
|