BATUTO-ART commited on
Commit
bbc3124
·
verified ·
1 Parent(s): 5b6d7ef

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +678 -0
app.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import base64
4
+ import json
5
+ import requests
6
+ import tempfile
7
+ from PIL import Image
8
+ import gradio as gr
9
+ from huggingface_hub import InferenceClient
10
+ from openai import OpenAI
11
+
12
+ # ============================================================
13
+ # CONFIGURACIÓN
14
+ # ============================================================
15
+
16
+ # SambaNova
17
+ SAMBA_API_KEY = os.getenv("REVE_API_KEY")
18
+ SAMBA_BASE_URL = "https://api.sambanova.ai/v1"
19
+
20
+ # OpenRouter
21
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
22
+ if not OPENROUTER_API_KEY:
23
+ raise ValueError("Falta la variable OPENROUTER_API_KEY.")
24
+
25
+ openrouter_client = OpenAI(
26
+ base_url="https://openrouter.ai/api/v1",
27
+ api_key=OPENROUTER_API_KEY
28
+ )
29
+
30
+ # Hugging Face (para generación de imágenes)
31
+ HF_TOKEN = os.getenv("HF_TOKEN")
32
+
33
+ DEBUG = False # Ponlo en True si quieres ver el payload enviado a SambaNova
34
+
35
+ # ============================================================
36
+ # TODOS LOS MODELOS (SAMBA + OPENROUTER) CON ROLES
37
+ # ============================================================
38
+
39
+ MODELS = {
40
+ # ===================== SAMBANOVA =====================
41
+ "general_fast": {
42
+ "provider": "sambanova",
43
+ "name": "Meta-Llama-3.1-8B-Instruct",
44
+ "role": "🔄 Respuestas rápidas y generales. Conversación ligera y eficiente.",
45
+ "supports_images": False
46
+ },
47
+ "general_smart": {
48
+ "provider": "sambanova",
49
+ "name": "Meta-Llama-3.3-70B-Instruct",
50
+ "role": "🧠 Razonamiento profundo. Análisis detallado y avanzado.",
51
+ "supports_images": False
52
+ },
53
+ "coding_expert": {
54
+ "provider": "sambanova",
55
+ "name": "DeepSeek-V3.1",
56
+ "role": "💻 Programación y debugging. Ideal para desarrollo.",
57
+ "supports_images": False
58
+ },
59
+ "coding_alt": {
60
+ "provider": "sambanova",
61
+ "name": "DeepSeek-V3-0324",
62
+ "role": "⚡ Código rápido. Alternativa veloz.",
63
+ "supports_images": False
64
+ },
65
+ "massive_brain": {
66
+ "provider": "sambanova",
67
+ "name": "gpt-oss-120b",
68
+ "role": "🏛️ Sabiduría masiva. Problemas pesados y complejos.",
69
+ "supports_images": False
70
+ },
71
+ "specialized_1": {
72
+ "provider": "sambanova",
73
+ "name": "DeepSeek-V3.1-Terminus",
74
+ "role": "🎯 Especialista técnico. Tareas científicas y avanzadas.",
75
+ "supports_images": False
76
+ },
77
+ "specialized_2": {
78
+ "provider": "sambanova",
79
+ "name": "Llama-3.3-Swallow-70B-Instruct-v0.4",
80
+ "role": "🔥 Sin censura. Modelo sin restricciones.",
81
+ "supports_images": False
82
+ },
83
+ "multilingual": {
84
+ "provider": "sambanova",
85
+ "name": "Qwen3-32B",
86
+ "role": "🌍 Multilingüe. Manejo de múltiples idiomas.",
87
+ "supports_images": False
88
+ },
89
+ "vision_expert": {
90
+ "provider": "sambanova",
91
+ "name": "Llama-4-Maverick-17B-128E-Instruct",
92
+ "role": "👁️ Visión avanzada. Análisis de imágenes.",
93
+ "supports_images": True
94
+ },
95
+ "vision_light": {
96
+ "provider": "sambanova",
97
+ "name": "Llama-3.2-11B-Vision-Instruct",
98
+ "role": "👁️ Visión ligera. Modelo de visión eficiente y rápido.",
99
+ "supports_images": True
100
+ },
101
+ "boudoir_specialist": {
102
+ "provider": "sambanova",
103
+ "name": "ALLaM-7B-Instruct-preview",
104
+ "role": "🎭 Especialista en Fotografía Íntima Profesional. Experto en prompts para fotografía boudoir.",
105
+ "supports_images": False,
106
+ "specialties": [
107
+ "Fotografía Boudoir",
108
+ "Desnudo Artístico",
109
+ "Moda Sensual",
110
+ "Lencería y moda íntima"
111
+ ],
112
+ "technical_expertise": [
113
+ "Iluminación suave",
114
+ "Composición elegante",
115
+ "Dirección de poses",
116
+ "Edición fina",
117
+ "Escenografía íntima"
118
+ ],
119
+ "ethical_principles": [
120
+ "Consentimiento explícito",
121
+ "Positividad corporal"
122
+ ]
123
+ },
124
+
125
+ # ===================== OPENROUTER =====================
126
+ # META LLAMA
127
+ "llama_3.1_70b": {
128
+ "provider": "openrouter",
129
+ "name": "meta-llama/llama-3.1-70b-instruct",
130
+ "role": "Respondes con precisión técnica y claridad.",
131
+ "supports_images": False
132
+ },
133
+ "llama_3.1_405b": {
134
+ "provider": "openrouter",
135
+ "name": "meta-llama/llama-3.1-405b-instruct",
136
+ "role": "Eres experto en programación, ciencia y análisis avanzado.",
137
+ "supports_images": False
138
+ },
139
+
140
+ # LLAMA 3.2 VISION
141
+ "llama_3.2_11b_vision": {
142
+ "provider": "openrouter",
143
+ "name": "meta-llama/llama-3.2-11b-vision-instruct",
144
+ "role": "Eres un modelo experto en análisis visual detallado.",
145
+ "supports_images": True
146
+ },
147
+ "llama_3.2_90b_vision": {
148
+ "provider": "openrouter",
149
+ "name": "meta-llama/llama-3.2-90b-vision-instruct",
150
+ "role": "Eres un analista visual avanzado altamente preciso.",
151
+ "supports_images": True
152
+ },
153
+
154
+ # QWEN
155
+ "qwen_72b": {
156
+ "provider": "openrouter",
157
+ "name": "qwen/qwen2.5-72b-instruct",
158
+ "role": "Respondes de forma profesional, directa y clara.",
159
+ "supports_images": False
160
+ },
161
+ "qwen_110b": {
162
+ "provider": "openrouter",
163
+ "name": "qwen/qwen2.5-110b-instruct",
164
+ "role": "Asistente experto en razonamiento estructurado.",
165
+ "supports_images": False
166
+ },
167
+
168
+ # GPT / OPENAI
169
+ "gpt_4.1": {
170
+ "provider": "openrouter",
171
+ "name": "openai/gpt-4.1",
172
+ "role": "Asistente avanzado para cualquier tarea general.",
173
+ "supports_images": False
174
+ },
175
+ "gpt_4.1_mini": {
176
+ "provider": "openrouter",
177
+ "name": "openai/gpt-4.1-mini",
178
+ "role": "Modelo rápido y eficiente, ideal para respuestas concisas.",
179
+ "supports_images": False
180
+ },
181
+ "gpt_4o_mini": {
182
+ "provider": "openrouter",
183
+ "name": "openai/gpt-4o-mini",
184
+ "role": "Asistente veloz con buena comprensión general.",
185
+ "supports_images": False
186
+ },
187
+
188
+ # CLAUDE
189
+ "claude_3.5_sonnet": {
190
+ "provider": "openrouter",
191
+ "name": "anthropic/claude-3.5-sonnet",
192
+ "role": "Especialista en redacción, precisión y análisis profundo.",
193
+ "supports_images": False
194
+ },
195
+ "claude_3.5_haiku": {
196
+ "provider": "openrouter",
197
+ "name": "anthropic/claude-3.5-haiku",
198
+ "role": "Modelo rápido con buena comprensión general.",
199
+ "supports_images": False
200
+ },
201
+ "claude_3_opus": {
202
+ "provider": "openrouter",
203
+ "name": "anthropic/claude-3-opus",
204
+ "role": "Máxima capacidad de análisis y lenguaje.",
205
+ "supports_images": False
206
+ },
207
+
208
+ # GOOGLE GEMINI
209
+ "gemini_flash": {
210
+ "provider": "openrouter",
211
+ "name": "google/gemini-flash-1.5",
212
+ "role": "Especialista en escenarios visuales y respuestas rápidas.",
213
+ "supports_images": True
214
+ },
215
+ "gemini_pro": {
216
+ "provider": "openrouter",
217
+ "name": "google/gemini-pro-1.5",
218
+ "role": "Razonador general robusto y flexible.",
219
+ "supports_images": True
220
+ },
221
+ "gemini_thinking": {
222
+ "provider": "openrouter",
223
+ "name": "google/gemini-1.5-thinking",
224
+ "role": "Modelo de razonamiento profundo y detallado.",
225
+ "supports_images": True
226
+ },
227
+
228
+ # DEEPSEEK
229
+ "deepseek_r1": {
230
+ "provider": "openrouter",
231
+ "name": "deepseek/deepseek-r1",
232
+ "role": "Razonamiento profundo y cadena de pensamiento estructurada.",
233
+ "supports_images": False
234
+ },
235
+
236
+ # MISTRAL
237
+ "mistral_large": {
238
+ "provider": "openrouter",
239
+ "name": "mistral/mistral-large-latest",
240
+ "role": "Asistente técnico avanzado y preciso.",
241
+ "supports_images": False
242
+ },
243
+ "mixtral_8x7b": {
244
+ "provider": "openrouter",
245
+ "name": "mistral/mixtral-8x7b-instruct",
246
+ "role": "Modelo eficiente para tareas complejas sin alto costo.",
247
+ "supports_images": False
248
+ },
249
+
250
+ # REKA
251
+ "reka_core": {
252
+ "provider": "openrouter",
253
+ "name": "reka/core",
254
+ "role": "Asistente racional y estructurado.",
255
+ "supports_images": False
256
+ },
257
+
258
+ # SAMBANOVA EN OPENROUTER
259
+ "samba_allam_7b": {
260
+ "provider": "openrouter",
261
+ "name": "sambanova/ALLAM-1-7B",
262
+ "role": "Asistente optimizado para rendimiento y claridad.",
263
+ "supports_images": False
264
+ },
265
+
266
+ # FLUX
267
+ "flux_pro": {
268
+ "provider": "openrouter",
269
+ "name": "black-forest-labs/flux-1.1-pro",
270
+ "role": "Experto en generación y análisis de imágenes.",
271
+ "supports_images": True
272
+ }
273
+ }
274
+
275
+ # ============================================================
276
+ # HELPERS
277
+ # ============================================================
278
+
279
+ def encode_image_to_base64(image):
280
+ if image is None:
281
+ return None
282
+ buf = io.BytesIO()
283
+ # Usar JPEG para mejor compatibilidad
284
+ if image.mode in ('RGBA', 'LA', 'P'):
285
+ # Convertir imágenes con alpha channel a RGB
286
+ background = Image.new('RGB', image.size, (255, 255, 255))
287
+ if image.mode == 'P':
288
+ image = image.convert('RGBA')
289
+ background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
290
+ image = background
291
+ image.save(buf, format="JPEG", quality=95)
292
+ return base64.b64encode(buf.getvalue()).decode("utf-8")
293
+
294
+ def build_messages(system_prompt, user_input, history, image_b64, supports_images):
295
+ messages = []
296
+
297
+ if system_prompt:
298
+ messages.append({"role": "system", "content": system_prompt})
299
+
300
+ # Procesar historial de Gradio
301
+ for entry in history:
302
+ if isinstance(entry, (list, tuple)) and len(entry) == 2:
303
+ user_msg, assistant_msg = entry
304
+
305
+ # Solo agregar mensajes no vacíos
306
+ if user_msg and str(user_msg).strip():
307
+ messages.append({"role": "user", "content": str(user_msg).strip()})
308
+
309
+ if assistant_msg and str(assistant_msg).strip():
310
+ messages.append({"role": "assistant", "content": str(assistant_msg).strip()})
311
+
312
+ # Manejar mensaje actual con imagen
313
+ current_content = []
314
+
315
+ # Agregar texto si existe
316
+ if user_input and str(user_input).strip():
317
+ current_content.append({"type": "text", "text": str(user_input).strip()})
318
+
319
+ # Agregar imagen si existe y es compatible
320
+ if image_b64 and supports_images:
321
+ current_content.append({
322
+ "type": "image_url",
323
+ "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}
324
+ })
325
+
326
+ # Solo agregar el mensaje si hay contenido
327
+ if current_content:
328
+ # Si solo hay texto, usar formato simple
329
+ if len(current_content) == 1 and current_content[0]["type"] == "text":
330
+ messages.append({"role": "user", "content": current_content[0]["text"]})
331
+ else:
332
+ messages.append({"role": "user", "content": current_content})
333
+ elif not user_input and not image_b64:
334
+ # Si no hay contenido, agregar mensaje vacío para mantener la conversación
335
+ messages.append({"role": "user", "content": ""})
336
+
337
+ return messages
338
+
339
+ # ============================================================
340
+ # LLAMADAS A LOS MODELOS
341
+ # ============================================================
342
+
343
+ def call_sambanova(model_name, messages, temperature=0.7, top_p=1.0):
344
+ payload = {
345
+ "model": model_name,
346
+ "messages": messages,
347
+ "stream": False,
348
+ "temperature": temperature,
349
+ "top_p": top_p
350
+ }
351
+
352
+ if DEBUG:
353
+ print("=== DEBUG SAMBANOVA PAYLOAD ===")
354
+ print(json.dumps(payload, indent=2, ensure_ascii=False))
355
+ print("=== END DEBUG ===")
356
+
357
+ headers = {
358
+ "Authorization": f"Bearer {SAMBA_API_KEY}",
359
+ "Content-Type": "application/json"
360
+ }
361
+
362
+ try:
363
+ r = requests.post(
364
+ f"{SAMBA_BASE_URL}/chat/completions",
365
+ json=payload,
366
+ headers=headers,
367
+ timeout=60
368
+ )
369
+ r.raise_for_status()
370
+
371
+ data = r.json()
372
+ return data["choices"][0]["message"]["content"]
373
+ except requests.exceptions.RequestException as e:
374
+ return f"Error en la conexión con SambaNova: {str(e)}"
375
+ except Exception as e:
376
+ return f"Error procesando respuesta de SambaNova: {str(e)}"
377
+
378
+ def call_openrouter(model_name, messages, temperature=0.7, top_p=1.0):
379
+ try:
380
+ response = openrouter_client.chat.completions.create(
381
+ model=model_name,
382
+ messages=messages,
383
+ temperature=temperature,
384
+ top_p=top_p
385
+ )
386
+ return response.choices[0].message.content
387
+ except Exception as e:
388
+ return f"Error en OpenRouter: {str(e)}"
389
+
390
+ # ============================================================
391
+ # LÓGICA DEL CHAT
392
+ # ============================================================
393
+
394
+ def chat_logic(user_text, user_image, model_key, history, temperature=0.7, top_p=1.0):
395
+ if history is None:
396
+ history = []
397
+
398
+ if model_key not in MODELS:
399
+ reply = "Error: modelo no encontrado."
400
+ history.append((user_text or "", reply))
401
+ return history, history
402
+
403
+ try:
404
+ model_cfg = MODELS[model_key]
405
+ image_b64 = encode_image_to_base64(user_image) if user_image else None
406
+
407
+ messages = build_messages(
408
+ system_prompt=model_cfg["role"],
409
+ user_input=user_text,
410
+ history=history,
411
+ image_b64=image_b64,
412
+ supports_images=model_cfg.get("supports_images", False)
413
+ )
414
+
415
+ if DEBUG:
416
+ print("=== FINAL MESSAGES ===")
417
+ for i, msg in enumerate(messages):
418
+ print(f"{i}: {msg['role']} - {type(msg['content'])}")
419
+ if isinstance(msg['content'], list):
420
+ for item in msg['content']:
421
+ print(f" - {item['type']}")
422
+ print("=== END MESSAGES ===")
423
+
424
+ if model_cfg["provider"] == "sambanova":
425
+ reply = call_sambanova(model_cfg["name"], messages, temperature, top_p)
426
+ else:
427
+ reply = call_openrouter(model_cfg["name"], messages, temperature, top_p)
428
+
429
+ # Usar texto vacío si no hay entrada del usuario
430
+ display_text = user_text or ("[Imagen]" if user_image else "")
431
+ history.append((display_text, reply))
432
+
433
+ except Exception as e:
434
+ error_msg = f"Error: {str(e)}"
435
+ history.append((user_text or "", error_msg))
436
+
437
+ return history, history
438
+
439
+ # ============================================================
440
+ # GENERACIÓN DE IMÁGENES (HUGGING FACE, OPCIONAL)
441
+ # ============================================================
442
+
443
+ def generate_image_hf(prompt):
444
+ if not HF_TOKEN:
445
+ return None, "❌ Falta HF_TOKEN", gr.update(visible=False)
446
+
447
+ try:
448
+ client = InferenceClient(token=HF_TOKEN)
449
+ img = client.text_to_image(
450
+ prompt,
451
+ model="stabilityai/stable-diffusion-xl-base-1.0"
452
+ )
453
+
454
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
455
+ img.save(tmp, format="PNG")
456
+ return img, "✅ Imagen generada", gr.update(value=tmp.name, visible=True)
457
+
458
+ except Exception as e:
459
+ return None, f"❌ Error: {e}", gr.update(visible=False)
460
+
461
+ # ============================================================
462
+ # UI GRADIO UNIFICADA
463
+ # ============================================================
464
+
465
+ def create_ui():
466
+ with gr.Blocks(theme=gr.themes.Soft(), title="METASAMBA") as demo:
467
+ # Título principal
468
+ gr.Markdown("# 🚀 METASAMBA")
469
+ gr.Markdown("### Plataforma Multimodelo de Inteligencia Artificial")
470
+
471
+ with gr.Row():
472
+ # Panel de configuración izquierdo
473
+ with gr.Column(scale=1):
474
+ with gr.Accordion("⚙️ CONFIGURACIÓN DEL MODELO", open=True):
475
+ model_sel = gr.Dropdown(
476
+ choices=list(MODELS.keys()),
477
+ value="general_fast",
478
+ label="Seleccionar Modelo",
479
+ info="Elige el modelo que quieres usar"
480
+ )
481
+
482
+ # Mostrar detalles del modelo seleccionado
483
+ model_info = gr.Markdown("")
484
+
485
+ with gr.Row():
486
+ temperature = gr.Slider(
487
+ minimum=0.1,
488
+ maximum=2.0,
489
+ value=0.7,
490
+ step=0.1,
491
+ label="Temperatura",
492
+ info="Controla la aleatoriedad (0.1=más determinista, 2.0=más creativo)"
493
+ )
494
+
495
+ top_p = gr.Slider(
496
+ minimum=0.1,
497
+ maximum=1.0,
498
+ value=1.0,
499
+ step=0.1,
500
+ label="Top-p",
501
+ info="Controla la diversidad del vocabulario"
502
+ )
503
+
504
+ with gr.Accordion("📊 INFORMACIÓN DEL MODELO", open=False):
505
+ gr.Markdown("""
506
+ ### Categorías de Modelos
507
+
508
+ **SambaNova:**
509
+ - 🚀 **Rápidos:** general_fast, coding_alt
510
+ - 🧠 **Inteligentes:** general_smart, massive_brain
511
+ - 💻 **Programación:** coding_expert, specialized_1
512
+ - 👁️ **Visión:** vision_expert, vision_light
513
+ - 🌍 **Multilingüe:** multilingual
514
+ - 🎭 **Especializados:** boudoir_specialist
515
+
516
+ **OpenRouter:**
517
+ - 🦙 **Llama:** Variantes de 70B a 405B
518
+ - 🤖 **GPT:** GPT-4.1 y variantes
519
+ - 👻 **Claude:** Claude 3.5 Sonnet/Haiku/Opus
520
+ - 🔷 **Gemini:** Flash, Pro, Thinking
521
+ - 🏔️ **Otros:** DeepSeek, Mistral, Reka
522
+ """)
523
+
524
+ with gr.Accordion("📎 ARCHIVOS ADJUNTOS", open=False):
525
+ gr.Markdown("""
526
+ ### Formatos soportados:
527
+ - 📷 **Imágenes:** JPG, PNG, WebP
528
+ - 📄 **Texto:** TXT, PDF, DOCX (próximamente)
529
+ - 🎥 **Multimedia:** MP3, MP4 (próximamente)
530
+ """)
531
+
532
+ # Panel de chat principal
533
+ with gr.Column(scale=2):
534
+ chat = gr.Chatbot(
535
+ height=500,
536
+ label="Conversación",
537
+ show_copy_button=True,
538
+ avatar_images=(None, "🤖")
539
+ )
540
+
541
+ with gr.Row():
542
+ img = gr.Image(
543
+ type="pil",
544
+ label="📎 Adjuntar Imagen",
545
+ height=150,
546
+ show_label=True
547
+ )
548
+
549
+ with gr.Row():
550
+ txt = gr.Textbox(
551
+ label="✏️ Tu mensaje",
552
+ placeholder="Escribe tu mensaje aquí...",
553
+ lines=4,
554
+ scale=5,
555
+ show_label=True
556
+ )
557
+
558
+ with gr.Row():
559
+ clear_btn = gr.Button("🧹 Limpiar Chat", variant="secondary", size="sm")
560
+ attach_btn = gr.Button("📎 Adjuntar Archivo", variant="secondary", size="sm")
561
+ send = gr.Button("🚀 Enviar", variant="primary", size="sm")
562
+
563
+ # Pestaña de generación de imágenes
564
+ with gr.Tab("🎨 GENERADOR DE IMÁGENES"):
565
+ gr.Markdown("### Generación de imágenes con Stable Diffusion XL")
566
+ with gr.Row():
567
+ with gr.Column(scale=2):
568
+ p = gr.Textbox(
569
+ label="Prompt para la imagen",
570
+ placeholder="Describe la imagen que quieres generar...",
571
+ lines=3
572
+ )
573
+ generate_btn = gr.Button("🖼️ Generar Imagen", variant="primary")
574
+
575
+ with gr.Column(scale=3):
576
+ out = gr.Image(label="Imagen generada", height=400, show_label=True)
577
+ status = gr.Textbox(label="Estado", interactive=False)
578
+ d = gr.DownloadButton("📥 Descargar", visible=False)
579
+
580
+ # Funciones para actualizar información del modelo
581
+ def update_model_info(model_key):
582
+ if model_key in MODELS:
583
+ model = MODELS[model_key]
584
+ info = f"""
585
+ ### **{model['name']}**
586
+
587
+ **Proveedor:** {'SambaNova' if model['provider'] == 'sambanova' else 'OpenRouter'}
588
+
589
+ **Rol:** {model['role']}
590
+
591
+ **Soporte de imágenes:** {'✅ Sí' if model.get('supports_images', False) else '❌ No'}
592
+ """
593
+
594
+ # Agregar información especializada para boudoir_specialist
595
+ if model_key == "boudoir_specialist":
596
+ info += "\n**Especialidades:**\n"
597
+ for specialty in model.get('specialties', []):
598
+ info += f"- {specialty}\n"
599
+
600
+ info += "\n**Expertise técnico:**\n"
601
+ for expertise in model.get('technical_expertise', []):
602
+ info += f"- {expertise}\n"
603
+
604
+ info += "\n**Principios éticos:**\n"
605
+ for principle in model.get('ethical_principles', []):
606
+ info += f"- {principle}\n"
607
+
608
+ return info
609
+ return "Selecciona un modelo para ver información detallada."
610
+
611
+ # Conectar eventos
612
+ model_sel.change(
613
+ update_model_info,
614
+ inputs=[model_sel],
615
+ outputs=[model_info]
616
+ )
617
+
618
+ send.click(
619
+ chat_logic,
620
+ inputs=[txt, img, model_sel, chat, temperature, top_p],
621
+ outputs=[chat, chat]
622
+ ).then(
623
+ lambda: ("", None), # Limpiar inputs después de enviar
624
+ outputs=[txt, img]
625
+ )
626
+
627
+ txt.submit(
628
+ chat_logic,
629
+ inputs=[txt, img, model_sel, chat, temperature, top_p],
630
+ outputs=[chat, chat]
631
+ ).then(
632
+ lambda: ("", None),
633
+ outputs=[txt, img]
634
+ )
635
+
636
+ clear_btn.click(
637
+ lambda: ([], []),
638
+ outputs=[chat, chat]
639
+ )
640
+
641
+ # Función para adjuntar archivo (placeholder)
642
+ def attach_file():
643
+ return "Funcionalidad de adjuntar archivo en desarrollo"
644
+
645
+ attach_btn.click(
646
+ attach_file,
647
+ outputs=[txt]
648
+ )
649
+
650
+ generate_btn.click(
651
+ generate_image_hf,
652
+ inputs=[p],
653
+ outputs=[out, status, d]
654
+ )
655
+
656
+ # Inicializar información del modelo
657
+ demo.load(
658
+ update_model_info,
659
+ inputs=[model_sel],
660
+ outputs=[model_info]
661
+ )
662
+
663
+ return demo
664
+
665
+ # ============================================================
666
+ # EJECUCIÓN
667
+ # ============================================================
668
+
669
+ demo = create_ui()
670
+
671
+ if __name__ == "__main__":
672
+ demo.launch(
673
+ share=False,
674
+ show_error=True,
675
+ debug=False,
676
+ server_name="0.0.0.0",
677
+ server_port=7860
678
+ )