Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,429 +1,199 @@
|
|
| 1 |
-
|
| 2 |
import os
|
| 3 |
-
import
|
| 4 |
import base64
|
|
|
|
| 5 |
import requests
|
| 6 |
-
import
|
| 7 |
-
import concurrent.futures
|
| 8 |
-
from io import BytesIO
|
| 9 |
-
from PIL import Image
|
| 10 |
-
from openai import OpenAI
|
| 11 |
-
import logging
|
| 12 |
-
|
| 13 |
-
# Configuración de logging
|
| 14 |
-
logging.basicConfig(level=logging.INFO)
|
| 15 |
-
logger = logging.getLogger(__name__)
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
if not api_key:
|
| 20 |
-
logger.warning("SAMBANOVA_API_KEY no encontrada en variables de entorno")
|
| 21 |
-
client = None
|
| 22 |
-
else:
|
| 23 |
-
try:
|
| 24 |
-
# Inicialización simple del cliente (ajusta según SDK real)
|
| 25 |
-
client = OpenAI(
|
| 26 |
-
api_key=api_key,
|
| 27 |
-
base_url="https://api.sambanova.ai/v1"
|
| 28 |
-
)
|
| 29 |
-
except Exception as e:
|
| 30 |
-
logger.error(f"Error inicializando cliente OpenAI: {e}")
|
| 31 |
-
client = None
|
| 32 |
-
|
| 33 |
-
# Configuración para la API de imágenes (ReVE como ejemplo)
|
| 34 |
-
API_URL = "https://api.reve.com/v1/image/create"
|
| 35 |
-
CARPETA_SALIDA = "generaciones_reve"
|
| 36 |
-
os.makedirs(CARPETA_SALIDA, exist_ok=True)
|
| 37 |
|
| 38 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
MODELS = {
|
| 40 |
"general_fast": {
|
| 41 |
"name": "Meta-Llama-3.1-8B-Instruct",
|
| 42 |
"role": "🔄 Respuestas rápidas y generales",
|
| 43 |
-
"description": "
|
| 44 |
},
|
| 45 |
"general_smart": {
|
| 46 |
"name": "Meta-Llama-3.3-70B-Instruct",
|
| 47 |
-
"role": "🧠 Razonamiento
|
| 48 |
-
"description": "
|
| 49 |
},
|
| 50 |
"coding_expert": {
|
| 51 |
"name": "DeepSeek-V3.1",
|
| 52 |
-
"role": "💻 Programación y
|
| 53 |
-
"description": "
|
| 54 |
},
|
| 55 |
"coding_alt": {
|
| 56 |
"name": "DeepSeek-V3-0324",
|
| 57 |
"role": "⚡ Código rápido",
|
| 58 |
-
"description": "Alternativa
|
| 59 |
},
|
| 60 |
"massive_brain": {
|
| 61 |
-
"name": "gpt-oss-120b",
|
| 62 |
-
"role": "🏛️ Sabiduría
|
| 63 |
-
"description": "
|
| 64 |
},
|
| 65 |
"specialized_1": {
|
| 66 |
"name": "DeepSeek-V3.1-Terminus",
|
| 67 |
-
"role": "🎯
|
| 68 |
-
"description": "
|
| 69 |
},
|
| 70 |
"specialized_2": {
|
| 71 |
"name": "Llama-3.3-Swallow-70B-Instruct-v0.4",
|
| 72 |
-
"role": "
|
| 73 |
-
"description": "
|
| 74 |
},
|
| 75 |
"multilingual": {
|
| 76 |
"name": "Qwen3-32B",
|
| 77 |
"role": "🌍 Multilingüe",
|
| 78 |
-
"description": "
|
| 79 |
},
|
| 80 |
"arabic_special": {
|
| 81 |
"name": "ALLaM-7B-Instruct-preview",
|
| 82 |
-
"role": "🕌
|
| 83 |
-
"description": "Experto en
|
| 84 |
},
|
| 85 |
"vision_expert": {
|
| 86 |
"name": "Llama-4-Maverick-17B-128E-Instruct",
|
| 87 |
-
"role": "👁️
|
| 88 |
-
"description": "
|
| 89 |
}
|
| 90 |
}
|
| 91 |
|
| 92 |
-
#
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
vision_keywords = ['imagen', 'foto', 'ver', 'mostrar', 'analizar imagen', 'describir imagen']
|
| 104 |
-
|
| 105 |
-
try:
|
| 106 |
-
if any(keyword in prompt_lower for keyword in coding_keywords):
|
| 107 |
-
if "rápido" in prompt_lower or "simple" in prompt_lower:
|
| 108 |
-
return MODELS["coding_alt"]["name"], MODELS["coding_alt"]["role"]
|
| 109 |
-
return MODELS["coding_expert"]["name"], MODELS["coding_expert"]["role"]
|
| 110 |
-
elif any(keyword in prompt_lower for keyword in complex_reasoning):
|
| 111 |
-
if "muy complejo" in prompt_lower or "extremadamente" in prompt_lower:
|
| 112 |
-
return MODELS["massive_brain"]["name"], MODELS["massive_brain"]["role"]
|
| 113 |
-
return MODELS["general_smart"]["name"], MODELS["general_smart"]["role"]
|
| 114 |
-
elif any(keyword in prompt_lower for keyword in japanese_keywords):
|
| 115 |
-
return MODELS["specialized_2"]["name"], MODELS["specialized_2"]["role"]
|
| 116 |
-
elif any(keyword in prompt_lower for keyword in arabic_keywords):
|
| 117 |
-
return MODELS["arabic_special"]["name"], MODELS["arabic_special"]["role"]
|
| 118 |
-
elif any(keyword in prompt_lower for keyword in multilingual_keywords):
|
| 119 |
-
return MODELS["multilingual"]["name"], MODELS["multilingual"]["role"]
|
| 120 |
-
elif any(keyword in prompt_lower for keyword in vision_keywords):
|
| 121 |
-
return MODELS["vision_expert"]["name"], MODELS["vision_expert"]["role"]
|
| 122 |
-
else:
|
| 123 |
-
# Fallback si no se puede consultar el cliente
|
| 124 |
-
return MODELS["general_smart"]["name"], MODELS["general_smart"]["role"]
|
| 125 |
-
except Exception as e:
|
| 126 |
-
logger.error(f"Error en select_best_model: {e}")
|
| 127 |
-
return MODELS["general_smart"]["name"], MODELS["general_smart"]["role"]
|
| 128 |
-
|
| 129 |
-
# Información de modelos
|
| 130 |
-
def get_models_info():
|
| 131 |
-
info = "## 🎯 Modelos Disponibles y sus Especialidades:\n\n"
|
| 132 |
-
for key, model in MODELS.items():
|
| 133 |
-
info += f"**{model['role']}**\n"
|
| 134 |
-
info += f"`{model['name']}` - {model.get('description', '')}\n\n"
|
| 135 |
-
return info
|
| 136 |
-
|
| 137 |
-
# Utilidades de imagen
|
| 138 |
-
def encode_image(image_path):
|
| 139 |
-
try:
|
| 140 |
-
with open(image_path, "rb") as image_file:
|
| 141 |
-
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 142 |
-
except Exception as e:
|
| 143 |
-
logger.error(f"Error codificando imagen: {e}")
|
| 144 |
-
return None
|
| 145 |
-
|
| 146 |
-
def guardar_imagen_local(img, index):
|
| 147 |
-
try:
|
| 148 |
-
timestamp = int(time.time())
|
| 149 |
-
nombre_archivo = f"reve_{timestamp}_{index}.png"
|
| 150 |
-
ruta_completa = os.path.join(CARPETA_SALIDA, nombre_archivo)
|
| 151 |
-
img.save(ruta_completa, "PNG")
|
| 152 |
-
return ruta_completa
|
| 153 |
-
except Exception as e:
|
| 154 |
-
logger.error(f"Error guardando imagen: {e}")
|
| 155 |
return None
|
| 156 |
-
|
| 157 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
payload = {
|
| 159 |
-
"
|
| 160 |
-
"
|
| 161 |
-
"
|
| 162 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
headers = {
|
| 164 |
-
"Authorization": f"Bearer {
|
| 165 |
"Content-Type": "application/json"
|
| 166 |
}
|
| 167 |
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
if
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
num_imagenes = min(num_imagenes, 8)
|
| 196 |
-
|
| 197 |
-
try:
|
| 198 |
-
with concurrent.futures.ThreadPoolExecutor(max_workers=min(num_imagenes, 4)) as executor:
|
| 199 |
-
futuros = [
|
| 200 |
-
executor.submit(llamar_api, prompt.strip(), ratio, version, api_key, i)
|
| 201 |
-
for i in range(num_imagenes)
|
| 202 |
]
|
| 203 |
-
|
| 204 |
-
for futuro in concurrent.futures.as_completed(futuros):
|
| 205 |
-
try:
|
| 206 |
-
img, creditos, error = futuro.result(timeout=120)
|
| 207 |
-
if img:
|
| 208 |
-
imagenes_nuevas.append(img)
|
| 209 |
-
creditos_totales += creditos
|
| 210 |
-
if error:
|
| 211 |
-
errores.append(error)
|
| 212 |
-
except concurrent.futures.TimeoutError:
|
| 213 |
-
errores.append("Timeout en generación")
|
| 214 |
-
except Exception as e:
|
| 215 |
-
logger.error(f"Error en generación batch: {e}")
|
| 216 |
-
errores.append(f"Error del sistema: {str(e)}")
|
| 217 |
-
|
| 218 |
-
if imagenes_nuevas:
|
| 219 |
-
mensaje = f"✅ {len(imagenes_nuevas)} imágenes generadas | Créditos: {creditos_totales}"
|
| 220 |
-
if errores:
|
| 221 |
-
mensaje += f" | Errores: {len(errores)}"
|
| 222 |
-
return imagenes_nuevas, mensaje
|
| 223 |
else:
|
| 224 |
-
|
| 225 |
-
return [], f"❌ Error: {error_msg}"
|
| 226 |
-
|
| 227 |
-
# Lógica de chat
|
| 228 |
-
def chat_logic(message, history, selected_model, api_key_ui, ratio, version, num_imagenes):
|
| 229 |
-
user_text = message.get("text", "").strip()
|
| 230 |
-
files = message.get("files", [])
|
| 231 |
-
|
| 232 |
-
if not user_text and not files:
|
| 233 |
-
yield "Por favor, ingresa un texto o sube una imagen."
|
| 234 |
-
return
|
| 235 |
-
|
| 236 |
-
if user_text.lower() in ["/modelos", "/help", "/ayuda"]:
|
| 237 |
-
yield get_models_info()
|
| 238 |
-
return
|
| 239 |
|
| 240 |
-
|
| 241 |
-
model_role = ""
|
| 242 |
-
messages_payload = []
|
| 243 |
-
|
| 244 |
-
try:
|
| 245 |
-
if files:
|
| 246 |
-
target_model = MODELS["vision_expert"]["name"]
|
| 247 |
-
model_role = MODELS["vision_expert"]["role"]
|
| 248 |
-
image_b64 = encode_image(files[0])
|
| 249 |
-
if not image_b64:
|
| 250 |
-
yield "❌ Error procesando la imagen."
|
| 251 |
-
return
|
| 252 |
-
|
| 253 |
-
messages_payload = [
|
| 254 |
-
{
|
| 255 |
-
"role": "user",
|
| 256 |
-
"content": [
|
| 257 |
-
{"type": "text", "text": user_text or "Describe esta imagen en detalle."},
|
| 258 |
-
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}}
|
| 259 |
-
]
|
| 260 |
-
}
|
| 261 |
-
]
|
| 262 |
-
yield f"👁️ **Modelo Seleccionado:** `{model_role}`\n*Analizando imagen...*\n\n"
|
| 263 |
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
target_model, model_role = select_best_model(user_text)
|
| 267 |
-
else:
|
| 268 |
-
# En caso de que selected_model no exista en MODELS, hacer fallback
|
| 269 |
-
if selected_model not in MODELS:
|
| 270 |
-
target_model, model_role = MODELS["general_smart"]["name"], MODELS["general_smart"]["role"]
|
| 271 |
-
else:
|
| 272 |
-
target_model = MODELS[selected_model]["name"]
|
| 273 |
-
model_role = MODELS[selected_model]["role"]
|
| 274 |
-
|
| 275 |
-
messages_payload = [
|
| 276 |
-
{"role": "system", "content": f"Eres un asistente especializado en {model_role}. Responde de manera útil y detallada en el mismo idioma del usuario."},
|
| 277 |
-
{"role": "user", "content": user_text}
|
| 278 |
-
]
|
| 279 |
-
|
| 280 |
-
yield f"🧠 **Modelo Seleccionado:** `{model_role}`\n*Procesando tu solicitud...*\n\n"
|
| 281 |
-
|
| 282 |
-
if not client:
|
| 283 |
-
yield "❌ Error: Cliente de API no configurado. Verifica tu API Key."
|
| 284 |
-
return
|
| 285 |
-
|
| 286 |
-
stream = client.chat.completions.create(
|
| 287 |
-
model=target_model,
|
| 288 |
-
messages=messages_payload,
|
| 289 |
-
temperature=0.3,
|
| 290 |
-
top_p=0.9,
|
| 291 |
-
stream=True
|
| 292 |
-
)
|
| 293 |
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
if delta:
|
| 300 |
-
content = getattr(delta, "content", None)
|
| 301 |
-
else:
|
| 302 |
-
# Fallback: intentar extraer de 'message' (varía por SDK)
|
| 303 |
-
content = getattr(chunk.choices[0], "message", {}).get("content") if hasattr(chunk.choices[0], "message") else None
|
| 304 |
|
| 305 |
-
|
| 306 |
-
partial_response += content
|
| 307 |
-
yield partial_response
|
| 308 |
-
|
| 309 |
-
except Exception as e:
|
| 310 |
-
logger.error(f"Error en chat_logic: {e}")
|
| 311 |
-
yield f"❌ Error: {str(e)}"
|
| 312 |
-
|
| 313 |
-
# CSS (sin cambios funcionales)
|
| 314 |
-
custom_css = """
|
| 315 |
-
:root {
|
| 316 |
-
--primary: #ff6b6b;
|
| 317 |
-
--secondary: #4ecdc4;
|
| 318 |
-
--dark: #1a1a2e;
|
| 319 |
-
--accent: #ffd93d;
|
| 320 |
-
}
|
| 321 |
-
.gradio-container { background: linear-gradient(135deg, var(--dark) 0%, #16213e 100%); min-height: 100vh; font-family: 'Segoe UI', system-ui, sans-serif; }
|
| 322 |
-
.dark .chatbot { background: rgba(255,255,255,0.95) !important; border-radius: 15px !important; box-shadow: 0 8px 32px rgba(0,0,0,0.3) !important; backdrop-filter: blur(10px); border: 1px solid rgba(255,255,255,0.2); }
|
| 323 |
-
h1 { background: linear-gradient(45deg, var(--primary), var(--accent)); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; text-align: center; font-size: 2.8em !important; margin-bottom: 5px !important; font-weight: 800 !important; }
|
| 324 |
-
.model-selector { background: rgba(255,255,255,0.1) !important; border-radius: 12px !important; padding: 15px !important; border: 1px solid rgba(255,255,255,0.2) !important; }
|
| 325 |
-
.model-info { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; border-radius: 12px !important; padding: 20px !important; color: white !important; margin-bottom: 20px !important; }
|
| 326 |
-
.textbox { border-radius: 12px !important; border: 2px solid var(--secondary) !important; background: rgba(255,255,255,0.95) !important; }
|
| 327 |
-
button.primary { background: linear-gradient(45deg, var(--primary), var(--accent)) !important; border: none !important; border-radius: 10px !important; color: #1a1a2e !important; font-weight: 700 !important; transition: all 0.3s ease !important; }
|
| 328 |
-
button.primary:hover { transform: translateY(-2px); box-shadow: 0 6px 20px rgba(255,217,61,0.4) !important; }
|
| 329 |
-
.gallery { border-radius: 12px !important; padding: 15px; background: rgba(255,255,255,0.1) !important; }
|
| 330 |
-
.model-badge { background: linear-gradient(45deg, #4ecdc4, #44a08d) !important; color: white !important; padding: 4px 12px !important; border-radius: 20px !important; font-size: 0.8em !important; font-weight: 600 !important; }
|
| 331 |
-
"""
|
| 332 |
-
|
| 333 |
-
# Construcción de la interfaz Gradio (modularizada)
|
| 334 |
-
def create_interface():
|
| 335 |
-
with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
| 336 |
-
gr.Markdown("# 🤖 BATUTO-ART: Ejército de IAs Especializadas")
|
| 337 |
-
gr.Markdown("### Cada modelo tiene su superpoder. ¡Elige el perfecto para tu tarea!")
|
| 338 |
-
|
| 339 |
-
galeria_state = gr.State([])
|
| 340 |
|
| 341 |
with gr.Row():
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
o puedes seleccionar manualmente el especialista que necesitas.</p>
|
| 348 |
-
</div>
|
| 349 |
-
\"\"\")
|
| 350 |
-
|
| 351 |
-
with gr.Tab("💬 Chat Inteligente"):
|
| 352 |
-
with gr.Row():
|
| 353 |
-
with gr.Column(scale=1):
|
| 354 |
-
gr.Markdown("### 🎛️ Configuración del Modelo")
|
| 355 |
-
|
| 356 |
-
model_selector = gr.Dropdown(
|
| 357 |
-
choices=["auto"] + list(MODELS.keys()),
|
| 358 |
-
value="auto",
|
| 359 |
-
label="🔧 Selecciona el Modelo",
|
| 360 |
-
info="'Auto' elige automáticamente el mejor modelo",
|
| 361 |
-
elem_classes="model-selector"
|
| 362 |
-
)
|
| 363 |
-
|
| 364 |
-
def update_model_info(selected_model):
|
| 365 |
-
if selected_model == "auto":
|
| 366 |
-
return "🤖 **Modo Automático:** El sistema elegirá el mejor modelo para tu tarea"
|
| 367 |
-
else:
|
| 368 |
-
model = MODELS[selected_model]
|
| 369 |
-
return f\"🎯 **{model['role']}**\\n\\n{model.get('description','')}\"
|
| 370 |
-
|
| 371 |
-
model_info = gr.Markdown(value=update_model_info("auto"), label="Información del Modelo")
|
| 372 |
-
model_selector.change(update_model_info, inputs=[model_selector], outputs=[model_info])
|
| 373 |
-
|
| 374 |
-
gr.Markdown("### 💡 Comandos Especiales")
|
| 375 |
-
gr.Markdown("- Escribe `/modelos` para ver todos los modelos disponibles")
|
| 376 |
-
gr.Markdown("- Escribe `/help` para ayuda")
|
| 377 |
-
|
| 378 |
-
with gr.Column(scale=3):
|
| 379 |
-
chatbot = gr.ChatInterface(
|
| 380 |
-
fn=lambda message, history: chat_logic(message, history, "auto", None, "1:1", "latest", 1),
|
| 381 |
-
multimodal=True,
|
| 382 |
-
textbox=gr.MultimodalTextbox(
|
| 383 |
-
file_types=["image"],
|
| 384 |
-
placeholder="Escribe tu mensaje o sube una imagen...",
|
| 385 |
-
show_label=False,
|
| 386 |
-
),
|
| 387 |
-
title="Chat con Especialistas de IA",
|
| 388 |
-
examples=[
|
| 389 |
-
["¿Puedes revisar este código Python?"],
|
| 390 |
-
["Explícame la teoría de la relatividad"],
|
| 391 |
-
["Traduce 'hola' al japonés"],
|
| 392 |
-
["Analiza esta imagen y descríbemela"]
|
| 393 |
-
]
|
| 394 |
-
)
|
| 395 |
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
with gr.Column(scale=1):
|
| 399 |
-
gr.Markdown("### ⚙️ Configuración de Imágenes")
|
| 400 |
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
with gr.Row():
|
| 404 |
-
limpiar_btn = gr.Button("🧹 Limpiar", size="sm")
|
| 405 |
-
generar_btn = gr.Button("🚀 Generar Imágenes", variant="primary")
|
| 406 |
-
with gr.Row():
|
| 407 |
-
ratio = gr.Dropdown(choices=["16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"], value="1:1", label="Relación de aspecto")
|
| 408 |
-
num_imagenes = gr.Slider(1, 4, step=1, value=1, label="Número de imágenes")
|
| 409 |
-
version = gr.Dropdown(choices=["latest", "reve-create@20250915"], value="latest", label="Versión del modelo")
|
| 410 |
|
| 411 |
-
|
| 412 |
-
gr.Markdown("### 🖼️ Resultados")
|
| 413 |
-
salida_galeria = gr.Gallery(label="Imágenes Generadas", columns=2, height=400)
|
| 414 |
-
mensaje_salida = gr.Markdown()
|
| 415 |
-
gr.Markdown("### 📚 Historial de Sesión")
|
| 416 |
-
galeria_historial = gr.Gallery(label="Todas las imágenes generadas", columns=4, height=300)
|
| 417 |
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
outputs=[galeria_historial, galeria_state]
|
| 423 |
)
|
| 424 |
|
| 425 |
-
|
|
|
|
|
|
|
|
|
|
| 426 |
|
| 427 |
if __name__ == "__main__":
|
| 428 |
-
demo
|
| 429 |
-
demo.launch(server_name="0.0.0.0" if os.getenv("DEPLOY") else "127.0.0.1", share=False)
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import json
|
| 3 |
import base64
|
| 4 |
+
import random
|
| 5 |
import requests
|
| 6 |
+
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
+
import gradio as gr
|
| 9 |
+
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
# ===========================
|
| 12 |
+
# CLAVES PARA SAMBANOVA
|
| 13 |
+
# ===========================
|
| 14 |
+
API_KEY = os.getenv("REVE_API_KEY")
|
| 15 |
+
BASE_URL = "https://api.sambanova.ai/v1"
|
| 16 |
+
|
| 17 |
+
if not API_KEY:
|
| 18 |
+
raise ValueError("⚠️ ERROR: No se encontró la variable REVE_API_KEY en HuggingFace Spaces.")
|
| 19 |
+
|
| 20 |
+
# ===========================
|
| 21 |
+
# FUNCIÓN SELECT MODEL
|
| 22 |
+
# ===========================
|
| 23 |
+
def select_model(preference):
|
| 24 |
+
if preference == "Lento 🧠 (mejor calidad)":
|
| 25 |
+
return {
|
| 26 |
+
"name": "Qwen2.5-72B-Instruct",
|
| 27 |
+
"description": "Máxima calidad de razonamiento"
|
| 28 |
+
}
|
| 29 |
+
else:
|
| 30 |
+
return {
|
| 31 |
+
"name": "Meta-Llama-3.1-8B-Instruct",
|
| 32 |
+
"description": "Rápido y eficiente"
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# ===========================
|
| 36 |
+
# DICCIONARIO DE MODELOS
|
| 37 |
+
# ===========================
|
| 38 |
MODELS = {
|
| 39 |
"general_fast": {
|
| 40 |
"name": "Meta-Llama-3.1-8B-Instruct",
|
| 41 |
"role": "🔄 Respuestas rápidas y generales",
|
| 42 |
+
"description": "Conversación ligera y eficiente."
|
| 43 |
},
|
| 44 |
"general_smart": {
|
| 45 |
"name": "Meta-Llama-3.3-70B-Instruct",
|
| 46 |
+
"role": "🧠 Razonamiento profundo",
|
| 47 |
+
"description": "Análisis detallado y avanzado."
|
| 48 |
},
|
| 49 |
"coding_expert": {
|
| 50 |
"name": "DeepSeek-V3.1",
|
| 51 |
+
"role": "💻 Programación y debugging",
|
| 52 |
+
"description": "Ideal para desarrollo."
|
| 53 |
},
|
| 54 |
"coding_alt": {
|
| 55 |
"name": "DeepSeek-V3-0324",
|
| 56 |
"role": "⚡ Código rápido",
|
| 57 |
+
"description": "Alternativa veloz."
|
| 58 |
},
|
| 59 |
"massive_brain": {
|
| 60 |
+
"name": "gpt-oss-120b",
|
| 61 |
+
"role": "🏛️ Sabiduría masiva",
|
| 62 |
+
"description": "Problemas pesados y complejos."
|
| 63 |
},
|
| 64 |
"specialized_1": {
|
| 65 |
"name": "DeepSeek-V3.1-Terminus",
|
| 66 |
+
"role": "🎯 Especialista técnico",
|
| 67 |
+
"description": "Tareas científicas y avanzadas."
|
| 68 |
},
|
| 69 |
"specialized_2": {
|
| 70 |
"name": "Llama-3.3-Swallow-70B-Instruct-v0.4",
|
| 71 |
+
"role": "🔥 Sin censura",
|
| 72 |
+
"description": "Modelo sin restricciones."
|
| 73 |
},
|
| 74 |
"multilingual": {
|
| 75 |
"name": "Qwen3-32B",
|
| 76 |
"role": "🌍 Multilingüe",
|
| 77 |
+
"description": "Múltiples idiomas."
|
| 78 |
},
|
| 79 |
"arabic_special": {
|
| 80 |
"name": "ALLaM-7B-Instruct-preview",
|
| 81 |
+
"role": "🕌 Estilo árabe y moda íntima",
|
| 82 |
+
"description": "Experto en estilos del Medio Oriente."
|
| 83 |
},
|
| 84 |
"vision_expert": {
|
| 85 |
"name": "Llama-4-Maverick-17B-128E-Instruct",
|
| 86 |
+
"role": "👁️ Visión avanzada",
|
| 87 |
+
"description": "Análisis de imágenes."
|
| 88 |
}
|
| 89 |
}
|
| 90 |
|
| 91 |
+
# ===========================
|
| 92 |
+
# CSS PERSONALIZADO
|
| 93 |
+
# ===========================
|
| 94 |
+
custom_css = """
|
| 95 |
+
.gradio-container {max-width: 900px !important; margin: auto;}
|
| 96 |
+
.model-box {padding: 10px; border-radius: 10px; background: #1d1d1d; margin-bottom: 6px;}
|
| 97 |
+
"""# ===========================
|
| 98 |
+
# CODIFICAR IMAGEN EN BASE64
|
| 99 |
+
# ===========================
|
| 100 |
+
def encode_image(img):
|
| 101 |
+
if img is None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
return None
|
| 103 |
+
buffer = io.BytesIO()
|
| 104 |
+
img.save(buffer, format="PNG")
|
| 105 |
+
return base64.b64encode(buffer.getvalue()).decode()
|
| 106 |
+
|
| 107 |
+
# ===========================
|
| 108 |
+
# API CALL SAMBANOVA
|
| 109 |
+
# ===========================
|
| 110 |
+
def call_sambanova(model, messages, images=None):
|
| 111 |
+
url = f"{BASE_URL}/chat/completions"
|
| 112 |
+
|
| 113 |
payload = {
|
| 114 |
+
"model": model,
|
| 115 |
+
"messages": messages,
|
| 116 |
+
"stream": False
|
| 117 |
}
|
| 118 |
+
|
| 119 |
+
if images:
|
| 120 |
+
payload["images"] = images
|
| 121 |
+
|
| 122 |
headers = {
|
| 123 |
+
"Authorization": f"Bearer {API_KEY}",
|
| 124 |
"Content-Type": "application/json"
|
| 125 |
}
|
| 126 |
|
| 127 |
+
response = requests.post(url, json=payload, headers=headers)
|
| 128 |
+
|
| 129 |
+
if response.status_code != 200:
|
| 130 |
+
return f"⚠️ Error en API: {response.status_code}\n{response.text}"
|
| 131 |
+
|
| 132 |
+
data = response.json()
|
| 133 |
+
return data["choices"][0]["message"]["content"]
|
| 134 |
+
|
| 135 |
+
# ===========================
|
| 136 |
+
# CHAT PRINCIPAL
|
| 137 |
+
# ===========================
|
| 138 |
+
def chat_logic(user_text, user_image, model_selection, history):
|
| 139 |
+
selected = MODELS[model_selection]
|
| 140 |
+
model_name = selected["name"]
|
| 141 |
+
|
| 142 |
+
msg = []
|
| 143 |
+
images_encoded = None
|
| 144 |
+
|
| 145 |
+
msg.append({"role": "system", "content": selected["role"]})
|
| 146 |
+
|
| 147 |
+
if user_image:
|
| 148 |
+
images_encoded = [encode_image(user_image)]
|
| 149 |
+
msg.append({
|
| 150 |
+
"role": "user",
|
| 151 |
+
"content": [
|
| 152 |
+
{"type": "text", "text": user_text or "Describe esta imagen."},
|
| 153 |
+
{"type": "image", "image": images_encoded[0]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
]
|
| 155 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
else:
|
| 157 |
+
msg.append({"role": "user", "content": user_text})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
|
| 159 |
+
reply = call_sambanova(model_name, msg, images_encoded)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
|
| 161 |
+
history.append(("🧍 Usuario: " + (user_text or "[Imagen]"), "🤖 Modelo: " + reply))
|
| 162 |
+
return history, reply
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
+
# ===========================
|
| 165 |
+
# INTERFAZ
|
| 166 |
+
# ===========================
|
| 167 |
+
def create_ui():
|
| 168 |
+
with gr.Blocks(css=custom_css) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 169 |
|
| 170 |
+
gr.Markdown("# 🤖 Chat avanzado con modelos SambaNova")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
with gr.Row():
|
| 173 |
+
model_selection = gr.Dropdown(
|
| 174 |
+
choices=list(MODELS.keys()),
|
| 175 |
+
value="general_fast",
|
| 176 |
+
label="Modelo"
|
| 177 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
| 179 |
+
user_text = gr.Textbox(label="Escribe tu mensaje")
|
| 180 |
+
user_image = gr.Image(label="Sube una imagen (opcional)")
|
|
|
|
|
|
|
| 181 |
|
| 182 |
+
chatbox = gr.Chatbot(label="Conversación")
|
| 183 |
+
output_text = gr.Textbox(label="Respuesta del modelo")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
+
send_btn = gr.Button("Enviar")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
|
| 187 |
+
send_btn.click(
|
| 188 |
+
chat_logic,
|
| 189 |
+
inputs=[user_text, user_image, model_selection, chatbox],
|
| 190 |
+
outputs=[chatbox, output_text]
|
|
|
|
| 191 |
)
|
| 192 |
|
| 193 |
+
return demo# ===========================
|
| 194 |
+
# LANZAR INTERFAZ
|
| 195 |
+
# ===========================
|
| 196 |
+
demo = create_ui()
|
| 197 |
|
| 198 |
if __name__ == "__main__":
|
| 199 |
+
demo.launch()
|
|
|