BATUTO-ART commited on
Commit
5b6d7ef
·
verified ·
1 Parent(s): 2c2f487

Delate app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -559
app.py DELETED
@@ -1,559 +0,0 @@
1
- import os
2
- import io
3
- import base64
4
- import json
5
- import requests
6
- import tempfile
7
- from PIL import Image
8
- import gradio as gr
9
- from huggingface_hub import InferenceClient
10
- from openai import OpenAI
11
-
12
- # ============================================================
13
- # CONFIGURACIÓN
14
- # ============================================================
15
-
16
- # SambaNova
17
- SAMBA_API_KEY = os.getenv("REVE_API_KEY")
18
- SAMBA_BASE_URL = "https://api.sambanova.ai/v1"
19
-
20
- # OpenRouter
21
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
22
- if not OPENROUTER_API_KEY:
23
- raise ValueError("Falta la variable OPENROUTER_API_KEY.")
24
-
25
- openrouter_client = OpenAI(
26
- base_url="https://openrouter.ai/api/v1",
27
- api_key=OPENROUTER_API_KEY
28
- )
29
-
30
- # Hugging Face (para generación de imágenes)
31
- HF_TOKEN = os.getenv("HF_TOKEN")
32
-
33
- DEBUG = False # Ponlo en True si quieres ver el payload enviado a SambaNova
34
-
35
- # ============================================================
36
- # TODOS LOS MODELOS (SAMBA + OPENROUTER) CON ROLES
37
- # ============================================================
38
-
39
- MODELS = {
40
- # ===================== SAMBANOVA =====================
41
- "general_fast": {
42
- "provider": "sambanova",
43
- "name": "Meta-Llama-3.1-8B-Instruct",
44
- "role": "🔄 Respuestas rápidas y generales. Conversación ligera y eficiente.",
45
- "supports_images": False
46
- },
47
- "general_smart": {
48
- "provider": "sambanova",
49
- "name": "Meta-Llama-3.3-70B-Instruct",
50
- "role": "🧠 Razonamiento profundo. Análisis detallado y avanzado.",
51
- "supports_images": False
52
- },
53
- "coding_expert": {
54
- "provider": "sambanova",
55
- "name": "DeepSeek-V3.1",
56
- "role": "💻 Programación y debugging. Ideal para desarrollo.",
57
- "supports_images": False
58
- },
59
- "coding_alt": {
60
- "provider": "sambanova",
61
- "name": "DeepSeek-V3-0324",
62
- "role": "⚡ Código rápido. Alternativa veloz.",
63
- "supports_images": False
64
- },
65
- "massive_brain": {
66
- "provider": "sambanova",
67
- "name": "gpt-oss-120b",
68
- "role": "🏛️ Sabiduría masiva. Problemas pesados y complejos.",
69
- "supports_images": False
70
- },
71
- "specialized_1": {
72
- "provider": "sambanova",
73
- "name": "DeepSeek-V3.1-Terminus",
74
- "role": "🎯 Especialista técnico. Tareas científicas y avanzadas.",
75
- "supports_images": False
76
- },
77
- "specialized_2": {
78
- "provider": "sambanova",
79
- "name": "Llama-3.3-Swallow-70B-Instruct-v0.4",
80
- "role": "🔥 Sin censura. Modelo sin restricciones.",
81
- "supports_images": False
82
- },
83
- "multilingual": {
84
- "provider": "sambanova",
85
- "name": "Qwen3-32B",
86
- "role": "🌍 Multilingüe. Manejo de múltiples idiomas.",
87
- "supports_images": False
88
- },
89
- "vision_expert": {
90
- "provider": "sambanova",
91
- "name": "Llama-4-Maverick-17B-128E-Instruct",
92
- "role": "👁️ Visión avanzada. Análisis de imágenes.",
93
- "supports_images": True
94
- },
95
- "boudoir_specialist": {
96
- "provider": "sambanova",
97
- "name": "ALLaM-7B-Instruct-preview",
98
- "role": "🎭 Especialista en Fotografía Íntima Profesional. Experto en prompts para fotografía boudoir.",
99
- "supports_images": False,
100
- "specialties": [
101
- "Fotografía Boudoir",
102
- "Desnudo Artístico",
103
- "Moda Sensual",
104
- "Lencería y moda íntima"
105
- ],
106
- "technical_expertise": [
107
- "Iluminación suave",
108
- "Composición elegante",
109
- "Dirección de poses",
110
- "Edición fina",
111
- "Escenografía íntima"
112
- ],
113
- "ethical_principles": [
114
- "Consentimiento explícito",
115
- "Positividad corporal"
116
- ]
117
- },
118
-
119
- # ===================== OPENROUTER =====================
120
- # META LLAMA
121
- "llama_3.1_70b": {
122
- "provider": "openrouter",
123
- "name": "meta-llama/llama-3.1-70b-instruct",
124
- "role": "Respondes con precisión técnica y claridad.",
125
- "supports_images": False
126
- },
127
- "llama_3.1_405b": {
128
- "provider": "openrouter",
129
- "name": "meta-llama/llama-3.1-405b-instruct",
130
- "role": "Eres experto en programación, ciencia y análisis avanzado.",
131
- "supports_images": False
132
- },
133
-
134
- # LLAMA 3.2 VISION
135
- "llama_3.2_11b_vision": {
136
- "provider": "openrouter",
137
- "name": "meta-llama/llama-3.2-11b-vision-instruct",
138
- "role": "Eres un modelo experto en análisis visual detallado.",
139
- "supports_images": True
140
- },
141
- "llama_3.2_90b_vision": {
142
- "provider": "openrouter",
143
- "name": "meta-llama/llama-3.2-90b-vision-instruct",
144
- "role": "Eres un analista visual avanzado altamente preciso.",
145
- "supports_images": True
146
- },
147
-
148
- # QWEN
149
- "qwen_72b": {
150
- "provider": "openrouter",
151
- "name": "qwen/qwen2.5-72b-instruct",
152
- "role": "Respondes de forma profesional, directa y clara.",
153
- "supports_images": False
154
- },
155
- "qwen_110b": {
156
- "provider": "openrouter",
157
- "name": "qwen/qwen2.5-110b-instruct",
158
- "role": "Asistente experto en razonamiento estructurado.",
159
- "supports_images": False
160
- },
161
-
162
- # GPT / OPENAI
163
- "gpt_4.1": {
164
- "provider": "openrouter",
165
- "name": "openai/gpt-4.1",
166
- "role": "Asistente avanzado para cualquier tarea general.",
167
- "supports_images": False
168
- },
169
- "gpt_4.1_mini": {
170
- "provider": "openrouter",
171
- "name": "openai/gpt-4.1-mini",
172
- "role": "Modelo rápido y eficiente, ideal para respuestas concisas.",
173
- "supports_images": False
174
- },
175
- "gpt_4o_mini": {
176
- "provider": "openrouter",
177
- "name": "openai/gpt-4o-mini",
178
- "role": "Asistente veloz con buena comprensión general.",
179
- "supports_images": False
180
- },
181
-
182
- # CLAUDE
183
- "claude_3.5_sonnet": {
184
- "provider": "openrouter",
185
- "name": "anthropic/claude-3.5-sonnet",
186
- "role": "Especialista en redacción, precisión y análisis profundo.",
187
- "supports_images": False
188
- },
189
- "claude_3.5_haiku": {
190
- "provider": "openrouter",
191
- "name": "anthropic/claude-3.5-haiku",
192
- "role": "Modelo rápido con buena comprensión general.",
193
- "supports_images": False
194
- },
195
- "claude_3_opus": {
196
- "provider": "openrouter",
197
- "name": "anthropic/claude-3-opus",
198
- "role": "Máxima capacidad de análisis y lenguaje.",
199
- "supports_images": False
200
- },
201
-
202
- # GOOGLE GEMINI
203
- "gemini_flash": {
204
- "provider": "openrouter",
205
- "name": "google/gemini-flash-1.5",
206
- "role": "Especialista en escenarios visuales y respuestas rápidas.",
207
- "supports_images": True
208
- },
209
- "gemini_pro": {
210
- "provider": "openrouter",
211
- "name": "google/gemini-pro-1.5",
212
- "role": "Razonador general robusto y flexible.",
213
- "supports_images": True
214
- },
215
- "gemini_thinking": {
216
- "provider": "openrouter",
217
- "name": "google/gemini-1.5-thinking",
218
- "role": "Modelo de razonamiento profundo y detallado.",
219
- "supports_images": True
220
- },
221
-
222
- # DEEPSEEK
223
- "deepseek_r1": {
224
- "provider": "openrouter",
225
- "name": "deepseek/deepseek-r1",
226
- "role": "Razonamiento profundo y cadena de pensamiento estructurada.",
227
- "supports_images": False
228
- },
229
-
230
- # MISTRAL
231
- "mistral_large": {
232
- "provider": "openrouter",
233
- "name": "mistral/mistral-large-latest",
234
- "role": "Asistente técnico avanzado y preciso.",
235
- "supports_images": False
236
- },
237
- "mixtral_8x7b": {
238
- "provider": "openrouter",
239
- "name": "mistral/mixtral-8x7b-instruct",
240
- "role": "Modelo eficiente para tareas complejas sin alto costo.",
241
- "supports_images": False
242
- },
243
-
244
- # REKA
245
- "reka_core": {
246
- "provider": "openrouter",
247
- "name": "reka/core",
248
- "role": "Asistente racional y estructurado.",
249
- "supports_images": False
250
- },
251
-
252
- # SAMBANOVA EN OPENROUTER
253
- "samba_allam_7b": {
254
- "provider": "openrouter",
255
- "name": "sambanova/ALLAM-1-7B",
256
- "role": "Asistente optimizado para rendimiento y claridad.",
257
- "supports_images": False
258
- },
259
-
260
- # FLUX
261
- "flux_pro": {
262
- "provider": "openrouter",
263
- "name": "black-forest-labs/flux-1.1-pro",
264
- "role": "Experto en generación y análisis de imágenes.",
265
- "supports_images": True
266
- }
267
- }
268
-
269
- # ============================================================
270
- # HELPERS
271
- # ============================================================
272
-
273
- def encode_image_to_base64(image):
274
- if image is None:
275
- return None
276
- buf = io.BytesIO()
277
- # Usar JPEG para mejor compatibilidad
278
- if image.mode in ('RGBA', 'LA', 'P'):
279
- # Convertir imágenes con alpha channel a RGB
280
- background = Image.new('RGB', image.size, (255, 255, 255))
281
- if image.mode == 'P':
282
- image = image.convert('RGBA')
283
- background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
284
- image = background
285
- image.save(buf, format="JPEG", quality=95)
286
- return base64.b64encode(buf.getvalue()).decode("utf-8")
287
-
288
- def build_messages(system_prompt, user_input, history, image_b64, supports_images):
289
- messages = []
290
-
291
- if system_prompt:
292
- messages.append({"role": "system", "content": system_prompt})
293
-
294
- # Procesar historial de Gradio
295
- for entry in history:
296
- if isinstance(entry, (list, tuple)) and len(entry) == 2:
297
- user_msg, assistant_msg = entry
298
-
299
- # Solo agregar mensajes no vacíos
300
- if user_msg and str(user_msg).strip():
301
- messages.append({"role": "user", "content": str(user_msg).strip()})
302
-
303
- if assistant_msg and str(assistant_msg).strip():
304
- messages.append({"role": "assistant", "content": str(assistant_msg).strip()})
305
-
306
- # Manejar mensaje actual con imagen
307
- current_content = []
308
-
309
- # Agregar texto si existe
310
- if user_input and str(user_input).strip():
311
- current_content.append({"type": "text", "text": str(user_input).strip()})
312
-
313
- # Agregar imagen si existe y es compatible
314
- if image_b64 and supports_images:
315
- current_content.append({
316
- "type": "image_url",
317
- "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"}
318
- })
319
-
320
- # Solo agregar el mensaje si hay contenido
321
- if current_content:
322
- # Si solo hay texto, usar formato simple
323
- if len(current_content) == 1 and current_content[0]["type"] == "text":
324
- messages.append({"role": "user", "content": current_content[0]["text"]})
325
- else:
326
- messages.append({"role": "user", "content": current_content})
327
- elif not user_input and not image_b64:
328
- # Si no hay contenido, agregar mensaje vacío para mantener la conversación
329
- messages.append({"role": "user", "content": ""})
330
-
331
- return messages
332
-
333
- # ============================================================
334
- # LLAMADAS A LOS MODELOS
335
- # ============================================================
336
-
337
- def call_sambanova(model_name, messages):
338
- payload = {
339
- "model": model_name,
340
- "messages": messages,
341
- "stream": False
342
- }
343
-
344
- if DEBUG:
345
- print("=== DEBUG SAMBANOVA PAYLOAD ===")
346
- print(json.dumps(payload, indent=2, ensure_ascii=False))
347
- print("=== END DEBUG ===")
348
-
349
- headers = {
350
- "Authorization": f"Bearer {SAMBA_API_KEY}",
351
- "Content-Type": "application/json"
352
- }
353
-
354
- try:
355
- r = requests.post(
356
- f"{SAMBA_BASE_URL}/chat/completions",
357
- json=payload,
358
- headers=headers,
359
- timeout=60
360
- )
361
- r.raise_for_status()
362
-
363
- data = r.json()
364
- return data["choices"][0]["message"]["content"]
365
- except requests.exceptions.RequestException as e:
366
- return f"Error en la conexión con SambaNova: {str(e)}"
367
- except Exception as e:
368
- return f"Error procesando respuesta de SambaNova: {str(e)}"
369
-
370
- def call_openrouter(model_name, messages):
371
- try:
372
- response = openrouter_client.chat.completions.create(
373
- model=model_name,
374
- messages=messages,
375
- temperature=0.7
376
- )
377
- return response.choices[0].message.content
378
- except Exception as e:
379
- return f"Error en OpenRouter: {str(e)}"
380
-
381
- # ============================================================
382
- # LÓGICA DEL CHAT
383
- # ============================================================
384
-
385
- def chat_logic(user_text, user_image, model_key, history):
386
- if history is None:
387
- history = []
388
-
389
- if model_key not in MODELS:
390
- reply = "Error: modelo no encontrado."
391
- history.append((user_text or "", reply))
392
- return history, history
393
-
394
- try:
395
- model_cfg = MODELS[model_key]
396
- image_b64 = encode_image_to_base64(user_image) if user_image else None
397
-
398
- messages = build_messages(
399
- system_prompt=model_cfg["role"],
400
- user_input=user_text,
401
- history=history,
402
- image_b64=image_b64,
403
- supports_images=model_cfg.get("supports_images", False)
404
- )
405
-
406
- if DEBUG:
407
- print("=== FINAL MESSAGES ===")
408
- for i, msg in enumerate(messages):
409
- print(f"{i}: {msg['role']} - {type(msg['content'])}")
410
- if isinstance(msg['content'], list):
411
- for item in msg['content']:
412
- print(f" - {item['type']}")
413
- print("=== END MESSAGES ===")
414
-
415
- if model_cfg["provider"] == "sambanova":
416
- reply = call_sambanova(model_cfg["name"], messages)
417
- else:
418
- reply = call_openrouter(model_cfg["name"], messages)
419
-
420
- # Usar texto vacío si no hay entrada del usuario
421
- display_text = user_text or ("[Imagen]" if user_image else "")
422
- history.append((display_text, reply))
423
-
424
- except Exception as e:
425
- error_msg = f"Error: {str(e)}"
426
- history.append((user_text or "", error_msg))
427
-
428
- return history, history
429
-
430
- # ============================================================
431
- # GENERACIÓN DE IMÁGENES (HUGGING FACE, OPCIONAL)
432
- # ============================================================
433
-
434
- def generate_image_hf(prompt):
435
- if not HF_TOKEN:
436
- return None, "❌ Falta HF_TOKEN", gr.update(visible=False)
437
-
438
- try:
439
- client = InferenceClient(token=HF_TOKEN)
440
- img = client.text_to_image(
441
- prompt,
442
- model="stabilityai/stable-diffusion-xl-base-1.0"
443
- )
444
-
445
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
446
- img.save(tmp, format="PNG")
447
- return img, "✅ Imagen generada", gr.update(value=tmp.name, visible=True)
448
-
449
- except Exception as e:
450
- return None, f"❌ Error: {e}", gr.update(visible=False)
451
-
452
- # ============================================================
453
- # UI GRADIO UNIFICADA
454
- # ============================================================
455
-
456
- def create_ui():
457
- with gr.Blocks(theme="soft") as demo:
458
- gr.Markdown("## 🤖 Chat Multimodelo (SambaNova + OpenRouter)")
459
-
460
- with gr.Row():
461
- with gr.Column(scale=1):
462
- model_sel = gr.Dropdown(
463
- choices=list(MODELS.keys()),
464
- value="general_fast",
465
- label="Seleccionar Modelo",
466
- info="Elige el modelo que quieres usar"
467
- )
468
- gr.Markdown("### 📝 Instrucciones")
469
- gr.Markdown("""
470
- - **Texto**: Escribe tu mensaje normalmente
471
- - **Imágenes**: Algunos modelos soportan análisis de imágenes
472
- - **Historial**: La conversación se mantiene durante la sesión
473
- """)
474
-
475
- with gr.Column(scale=2):
476
- with gr.Tab("💬 Chat"):
477
- chat = gr.Chatbot(
478
- height=500,
479
- label="Conversación",
480
- show_copy_button=True
481
- )
482
- with gr.Row():
483
- img = gr.Image(
484
- type="pil",
485
- label="Imagen opcional (solo para modelos de visión)",
486
- height=150
487
- )
488
- with gr.Row():
489
- txt = gr.Textbox(
490
- label="Tu mensaje",
491
- placeholder="Escribe tu mensaje aquí...",
492
- lines=3,
493
- scale=4
494
- )
495
- send = gr.Button("🚀 Enviar", scale=1)
496
-
497
- # Botón para limpiar chat
498
- clear_btn = gr.Button("🧹 Limpiar Chat")
499
-
500
- with gr.Tab("🎨 Generar Imágenes (HF)"):
501
- gr.Markdown("### Generación de imágenes con Stable Diffusion XL")
502
- gr.Markdown("Usa Hugging Face para generar imágenes a partir de texto.")
503
- with gr.Row():
504
- with gr.Column():
505
- p = gr.Textbox(
506
- label="Prompt para la imagen",
507
- placeholder="Describe la imagen que quieres generar...",
508
- lines=3
509
- )
510
- generate_btn = gr.Button("🖼️ Generar Imagen")
511
- with gr.Column():
512
- out = gr.Image(label="Imagen generada", height=400)
513
- status = gr.Textbox(label="Estado", interactive=False)
514
- d = gr.DownloadButton("📥 Descargar", visible=False)
515
-
516
- # Conectar eventos
517
- send.click(
518
- chat_logic,
519
- inputs=[txt, img, model_sel, chat],
520
- outputs=[chat, chat]
521
- ).then(
522
- lambda: ("", None), # Limpiar inputs después de enviar
523
- outputs=[txt, img]
524
- )
525
-
526
- txt.submit(
527
- chat_logic,
528
- inputs=[txt, img, model_sel, chat],
529
- outputs=[chat, chat]
530
- ).then(
531
- lambda: ("", None),
532
- outputs=[txt, img]
533
- )
534
-
535
- clear_btn.click(
536
- lambda: ([], []),
537
- outputs=[chat, chat]
538
- )
539
-
540
- generate_btn.click(
541
- generate_image_hf,
542
- inputs=[p],
543
- outputs=[out, status, d]
544
- )
545
-
546
- return demo
547
-
548
- # ============================================================
549
- # EJECUCIÓN
550
- # ============================================================
551
-
552
- demo = create_ui()
553
-
554
- if __name__ == "__main__":
555
- demo.launch(
556
- share=False,
557
- show_error=True,
558
- debug=False
559
- )