Spaces:
Build error
Build error
| import os | |
| import time | |
| import gradio as gr | |
| # Evitar analíticas y minimizar logs | |
| os.environ["GRADIO_ANALYTICS_ENABLED"] = "false" | |
| try: | |
| from openai import OpenAI | |
| from openai import APIError, APIConnectionError, RateLimitError, AuthenticationError | |
| except Exception: | |
| # En algunas versiones, las clases de error cambian; mantenemos compatibilidad amplia | |
| OpenAI = None | |
| APIError = Exception | |
| APIConnectionError = Exception | |
| RateLimitError = Exception | |
| AuthenticationError = Exception | |
| DEFAULT_MODEL = os.getenv("OPENAI_TEST_MODEL", "gpt-4o-mini") | |
| def mask_key(key: str) -> str: | |
| if not key or len(key) < 12: | |
| return "****" | |
| return key[:8] + "..." + key[-4:] | |
| def quick_healthcheck(client: "OpenAI", model: str) -> dict: | |
| """ | |
| Llamada mínima al endpoint para validar la clave y medir latencia. | |
| Usamos Responses API con salida de 1-2 tokens para costo mínimo. | |
| """ | |
| t0 = time.time() | |
| try: | |
| # Preferir Responses API (SDK v1) | |
| resp = client.responses.create( | |
| model=model, | |
| input="ping", | |
| max_output_tokens=2, | |
| ) | |
| latency_ms = int((time.time() - t0) * 1000) | |
| out = getattr(resp, "output", None) or getattr(resp, "output_text", None) | |
| # Respuesta normalizada | |
| return { | |
| "ok": True, | |
| "latency_ms": latency_ms, | |
| "model_used": model, | |
| "output_preview": (out[:120] + "…") if isinstance(out, str) and len(out) > 120 else out, | |
| } | |
| except AuthenticationError as e: | |
| return {"ok": False, "error_type": "auth", "message": str(e)} | |
| except RateLimitError as e: | |
| return {"ok": False, "error_type": "rate_limit", "message": str(e)} | |
| except APIConnectionError as e: | |
| return {"ok": False, "error_type": "network", "message": str(e)} | |
| except APIError as e: | |
| # Errores con status_code (ej. 404 modelo no encontrado, 429, 500s) | |
| status = getattr(e, "status_code", None) | |
| if status == 404: | |
| return {"ok": False, "error_type": "model_not_found", "message": str(e)} | |
| elif status == 401: | |
| return {"ok": False, "error_type": "auth", "message": str(e)} | |
| elif status == 429: | |
| return {"ok": False, "error_type": "rate_limit", "message": str(e)} | |
| else: | |
| return {"ok": False, "error_type": f"api_{status}", "message": str(e)} | |
| except Exception as e: | |
| return {"ok": False, "error_type": "unknown", "message": str(e)} | |
| def test_key(use_secret: bool, pasted_key: str, model: str): | |
| """ | |
| Prueba la API Key. Si `use_secret` es True, usa la de entorno (Space Secret). | |
| Si es False, toma la pegada en la UI (no se persiste). | |
| """ | |
| key_source = "SECRET del Space" if use_secret else "Clave pegada en UI" | |
| api_key = None | |
| if use_secret: | |
| api_key = os.getenv("OPENAI_API_KEY", "").strip() | |
| if not api_key: | |
| return { | |
| "estado": "❌ No se encontró `OPENAI_API_KEY` en los Secrets del Space.", | |
| "detalle": "Ve a Settings → Repository secrets y crea uno llamado OPENAI_API_KEY.", | |
| "clave_usada": "—", | |
| } | |
| else: | |
| pasted_key = (pasted_key or "").strip() | |
| if not pasted_key: | |
| return { | |
| "estado": "❌ Debes pegar una API Key o marcar 'Usar SECRET del Space'.", | |
| "detalle": "La clave no se guarda; se usa solo para esta prueba.", | |
| "clave_usada": "—", | |
| } | |
| api_key = pasted_key | |
| if OpenAI is None: | |
| return { | |
| "estado": "❌ SDK de OpenAI no disponible", | |
| "detalle": "Revisa versión del paquete `openai`.", | |
| "clave_usada": mask_key(api_key), | |
| } | |
| client = OpenAI(api_key=api_key) | |
| # Validar con llamada mínima | |
| result = quick_healthcheck(client, model=model) | |
| if result.get("ok"): | |
| return { | |
| "estado": f"✅ ¡Clave válida! ({key_source})", | |
| "detalle": f"Modelo: {result['model_used']} | Latencia: {result['latency_ms']} ms | Vista previa: {result['output_preview']}", | |
| "clave_usada": mask_key(api_key), | |
| } | |
| else: | |
| et = result.get("error_type") | |
| msg = result.get("message", "") | |
| explanation = { | |
| "auth": "La clave parece inválida o no pertenece a tu organización/proyecto.", | |
| "rate_limit": "Se alcanzó el límite de velocidad/cuota. Intenta de nuevo más tarde o ajusta el plan.", | |
| "model_not_found": "El modelo indicado no existe o no tienes acceso. Prueba con otro modelo.", | |
| "network": "Fallo de conexión. Verifica la red saliente del Space/servidor.", | |
| }.get(et, "Error no especificado.") | |
| return { | |
| "estado": f"❌ Error: {et}", | |
| "detalle": f"{explanation}\nMensaje de la API: {msg}", | |
| "clave_usada": mask_key(api_key), | |
| } | |
| with gr.Blocks(title="Verificador de OpenAI API Key") as demo: | |
| gr.Markdown("# 🔐 Verificador de OpenAI API Key\nPrueba tu clave de forma segura antes de usarla en otras apps.") | |
| with gr.Row(): | |
| use_secret = gr.Checkbox(label="Usar SECRET del Space (`OPENAI_API_KEY`)", value=True) | |
| model = gr.Dropdown(choices=["gpt-4o-mini", "gpt-4o", "gpt-4o-realtime-preview-2024-12-17", "gpt-4.1-mini"], value=DEFAULT_MODEL, label="Modelo para prueba") | |
| pasted_key = gr.Textbox(label="Pega tu API Key (opcional si usas SECRET)", type="password", placeholder="sk-xxxxxxxxxxxxxxxxxxxxxxxx") | |
| btn = gr.Button("Probar clave") | |
| out = gr.JSON(label="Resultado") | |
| btn.click(fn=test_key, inputs=[use_secret, pasted_key, model], outputs=out) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860"))) | |