Spaces:
Runtime error
Runtime error
| import os | |
| import io | |
| import base64 | |
| import json | |
| import requests | |
| import tempfile | |
| from PIL import Image | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| from openai import OpenAI | |
| # ============================================================ | |
| # CONFIGURACIÓN | |
| # ============================================================ | |
| # SambaNova | |
| SAMBA_API_KEY = os.getenv("REVE_API_KEY") | |
| SAMBA_BASE_URL = "https://api.sambanova.ai/v1" | |
| # OpenRouter | |
| OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") | |
| if not OPENROUTER_API_KEY: | |
| raise ValueError("Falta la variable OPENROUTER_API_KEY.") | |
| openrouter_client = OpenAI( | |
| base_url="https://openrouter.ai/api/v1", | |
| api_key=OPENROUTER_API_KEY | |
| ) | |
| # Hugging Face (para generación de imágenes) | |
| HF_TOKEN = os.getenv("HF_TOKEN") | |
| DEBUG = False # Ponlo en True si quieres ver el payload enviado a SambaNova | |
| # ============================================================ | |
| # TODOS LOS MODELOS (SAMBA + OPENROUTER) CON ROLES | |
| # ============================================================ | |
| MODELS = { | |
| # ===================== SAMBANOVA ===================== | |
| "general_fast": { | |
| "provider": "sambanova", | |
| "name": "Meta-Llama-3.1-8B-Instruct", | |
| "role": "🔄 Respuestas rápidas y generales. Conversación ligera y eficiente.", | |
| "supports_images": False | |
| }, | |
| "general_smart": { | |
| "provider": "sambanova", | |
| "name": "Meta-Llama-3.3-70B-Instruct", | |
| "role": "🧠 Razonamiento profundo. Análisis detallado y avanzado.", | |
| "supports_images": False | |
| }, | |
| "coding_expert": { | |
| "provider": "sambanova", | |
| "name": "DeepSeek-V3.1", | |
| "role": "💻 Programación y debugging. Ideal para desarrollo.", | |
| "supports_images": False | |
| }, | |
| "coding_alt": { | |
| "provider": "sambanova", | |
| "name": "DeepSeek-V3-0324", | |
| "role": "⚡ Código rápido. Alternativa veloz.", | |
| "supports_images": False | |
| }, | |
| "massive_brain": { | |
| "provider": "sambanova", | |
| "name": "gpt-oss-120b", | |
| "role": "🏛️ Sabiduría masiva. Problemas pesados y complejos.", | |
| "supports_images": False | |
| }, | |
| "specialized_1": { | |
| "provider": "sambanova", | |
| "name": "DeepSeek-V3.1-Terminus", | |
| "role": "🎯 Especialista técnico. Tareas científicas y avanzadas.", | |
| "supports_images": False | |
| }, | |
| "specialized_2": { | |
| "provider": "sambanova", | |
| "name": "Llama-3.3-Swallow-70B-Instruct-v0.4", | |
| "role": "🔥 Sin censura. Modelo sin restricciones.", | |
| "supports_images": False | |
| }, | |
| "multilingual": { | |
| "provider": "sambanova", | |
| "name": "Qwen3-32B", | |
| "role": "🌍 Multilingüe. Manejo de múltiples idiomas.", | |
| "supports_images": False | |
| }, | |
| "vision_expert": { | |
| "provider": "sambanova", | |
| "name": "Llama-4-Maverick-17B-128E-Instruct", | |
| "role": "👁️ Visión avanzada. Análisis de imágenes.", | |
| "supports_images": True | |
| }, | |
| "vision_light": { | |
| "provider": "sambanova", | |
| "name": "Llama-3.2-11B-Vision-Instruct", | |
| "role": "👁️ Visión ligera. Modelo de visión eficiente y rápido.", | |
| "supports_images": True | |
| }, | |
| "boudoir_specialist": { | |
| "provider": "sambanova", | |
| "name": "ALLaM-7B-Instruct-preview", | |
| "role": "🎭 Especialista en Fotografía Íntima Profesional. Experto en prompts para fotografía boudoir.", | |
| "supports_images": False, | |
| "specialties": [ | |
| "Fotografía Boudoir", | |
| "Desnudo Artístico", | |
| "Moda Sensual", | |
| "Lencería y moda íntima" | |
| ], | |
| "technical_expertise": [ | |
| "Iluminación suave", | |
| "Composición elegante", | |
| "Dirección de poses", | |
| "Edición fina", | |
| "Escenografía íntima" | |
| ], | |
| "ethical_principles": [ | |
| "Consentimiento explícito", | |
| "Positividad corporal" | |
| ] | |
| }, | |
| # ===================== OPENROUTER ===================== | |
| # META LLAMA | |
| "llama_3.1_70b": { | |
| "provider": "openrouter", | |
| "name": "meta-llama/llama-3.1-70b-instruct", | |
| "role": "Respondes con precisión técnica y claridad.", | |
| "supports_images": False | |
| }, | |
| "llama_3.1_405b": { | |
| "provider": "openrouter", | |
| "name": "meta-llama/llama-3.1-405b-instruct", | |
| "role": "Eres experto en programación, ciencia y análisis avanzado.", | |
| "supports_images": False | |
| }, | |
| # LLAMA 3.2 VISION | |
| "llama_3.2_11b_vision": { | |
| "provider": "openrouter", | |
| "name": "meta-llama/llama-3.2-11b-vision-instruct", | |
| "role": "Eres un modelo experto en análisis visual detallado.", | |
| "supports_images": True | |
| }, | |
| "llama_3.2_90b_vision": { | |
| "provider": "openrouter", | |
| "name": "meta-llama/llama-3.2-90b-vision-instruct", | |
| "role": "Eres un analista visual avanzado altamente preciso.", | |
| "supports_images": True | |
| }, | |
| # QWEN | |
| "qwen_72b": { | |
| "provider": "openrouter", | |
| "name": "qwen/qwen2.5-72b-instruct", | |
| "role": "Respondes de forma profesional, directa y clara.", | |
| "supports_images": False | |
| }, | |
| "qwen_110b": { | |
| "provider": "openrouter", | |
| "name": "qwen/qwen2.5-110b-instruct", | |
| "role": "Asistente experto en razonamiento estructurado.", | |
| "supports_images": False | |
| }, | |
| # GPT / OPENAI | |
| "gpt_4.1": { | |
| "provider": "openrouter", | |
| "name": "openai/gpt-4.1", | |
| "role": "Asistente avanzado para cualquier tarea general.", | |
| "supports_images": False | |
| }, | |
| "gpt_4.1_mini": { | |
| "provider": "openrouter", | |
| "name": "openai/gpt-4.1-mini", | |
| "role": "Modelo rápido y eficiente, ideal para respuestas concisas.", | |
| "supports_images": False | |
| }, | |
| "gpt_4o_mini": { | |
| "provider": "openrouter", | |
| "name": "openai/gpt-4o-mini", | |
| "role": "Asistente veloz con buena comprensión general.", | |
| "supports_images": False | |
| }, | |
| # CLAUDE | |
| "claude_3.5_sonnet": { | |
| "provider": "openrouter", | |
| "name": "anthropic/claude-3.5-sonnet", | |
| "role": "Especialista en redacción, precisión y análisis profundo.", | |
| "supports_images": False | |
| }, | |
| "claude_3.5_haiku": { | |
| "provider": "openrouter", | |
| "name": "anthropic/claude-3.5-haiku", | |
| "role": "Modelo rápido con buena comprensión general.", | |
| "supports_images": False | |
| }, | |
| "claude_3_opus": { | |
| "provider": "openrouter", | |
| "name": "anthropic/claude-3-opus", | |
| "role": "Máxima capacidad de análisis y lenguaje.", | |
| "supports_images": False | |
| }, | |
| # GOOGLE GEMINI | |
| "gemini_flash": { | |
| "provider": "openrouter", | |
| "name": "google/gemini-flash-1.5", | |
| "role": "Especialista en escenarios visuales y respuestas rápidas.", | |
| "supports_images": True | |
| }, | |
| "gemini_pro": { | |
| "provider": "openrouter", | |
| "name": "google/gemini-pro-1.5", | |
| "role": "Razonador general robusto y flexible.", | |
| "supports_images": True | |
| }, | |
| "gemini_thinking": { | |
| "provider": "openrouter", | |
| "name": "google/gemini-1.5-thinking", | |
| "role": "Modelo de razonamiento profundo y detallado.", | |
| "supports_images": True | |
| }, | |
| # DEEPSEEK | |
| "deepseek_r1": { | |
| "provider": "openrouter", | |
| "name": "deepseek/deepseek-r1", | |
| "role": "Razonamiento profundo y cadena de pensamiento estructurada.", | |
| "supports_images": False | |
| }, | |
| # MISTRAL | |
| "mistral_large": { | |
| "provider": "openrouter", | |
| "name": "mistral/mistral-large-latest", | |
| "role": "Asistente técnico avanzado y preciso.", | |
| "supports_images": False | |
| }, | |
| "mixtral_8x7b": { | |
| "provider": "openrouter", | |
| "name": "mistral/mixtral-8x7b-instruct", | |
| "role": "Modelo eficiente para tareas complejas sin alto costo.", | |
| "supports_images": False | |
| }, | |
| # REKA | |
| "reka_core": { | |
| "provider": "openrouter", | |
| "name": "reka/core", | |
| "role": "Asistente racional y estructurado.", | |
| "supports_images": False | |
| }, | |
| # SAMBANOVA EN OPENROUTER | |
| "samba_allam_7b": { | |
| "provider": "openrouter", | |
| "name": "sambanova/ALLAM-1-7B", | |
| "role": "Asistente optimizado para rendimiento y claridad.", | |
| "supports_images": False | |
| }, | |
| # FLUX | |
| "flux_pro": { | |
| "provider": "openrouter", | |
| "name": "black-forest-labs/flux-1.1-pro", | |
| "role": "Experto en generación y análisis de imágenes.", | |
| "supports_images": True | |
| } | |
| } | |
| # ============================================================ | |
| # HELPERS | |
| # ============================================================ | |
| def encode_image_to_base64(image): | |
| if image is None: | |
| return None | |
| buf = io.BytesIO() | |
| # Usar JPEG para mejor compatibilidad | |
| if image.mode in ('RGBA', 'LA', 'P'): | |
| # Convertir imágenes con alpha channel a RGB | |
| background = Image.new('RGB', image.size, (255, 255, 255)) | |
| if image.mode == 'P': | |
| image = image.convert('RGBA') | |
| background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None) | |
| image = background | |
| image.save(buf, format="JPEG", quality=95) | |
| return base64.b64encode(buf.getvalue()).decode("utf-8") | |
| def build_messages(system_prompt, user_input, history, image_b64, supports_images): | |
| messages = [] | |
| if system_prompt: | |
| messages.append({"role": "system", "content": system_prompt}) | |
| # Procesar historial de Gradio | |
| for entry in history: | |
| if isinstance(entry, (list, tuple)) and len(entry) == 2: | |
| user_msg, assistant_msg = entry | |
| # Solo agregar mensajes no vacíos | |
| if user_msg and str(user_msg).strip(): | |
| messages.append({"role": "user", "content": str(user_msg).strip()}) | |
| if assistant_msg and str(assistant_msg).strip(): | |
| messages.append({"role": "assistant", "content": str(assistant_msg).strip()}) | |
| # Manejar mensaje actual con imagen | |
| current_content = [] | |
| # Agregar texto si existe | |
| if user_input and str(user_input).strip(): | |
| current_content.append({"type": "text", "text": str(user_input).strip()}) | |
| # Agregar imagen si existe y es compatible | |
| if image_b64 and supports_images: | |
| current_content.append({ | |
| "type": "image_url", | |
| "image_url": {"url": f"data:image/jpeg;base64,{image_b64}"} | |
| }) | |
| # Solo agregar el mensaje si hay contenido | |
| if current_content: | |
| # Si solo hay texto, usar formato simple | |
| if len(current_content) == 1 and current_content[0]["type"] == "text": | |
| messages.append({"role": "user", "content": current_content[0]["text"]}) | |
| else: | |
| messages.append({"role": "user", "content": current_content}) | |
| elif not user_input and not image_b64: | |
| # Si no hay contenido, agregar mensaje vacío para mantener la conversación | |
| messages.append({"role": "user", "content": ""}) | |
| return messages | |
| # ============================================================ | |
| # LLAMADAS A LOS MODELOS | |
| # ============================================================ | |
| def call_sambanova(model_name, messages, temperature=0.7, top_p=1.0): | |
| payload = { | |
| "model": model_name, | |
| "messages": messages, | |
| "stream": False, | |
| "temperature": temperature, | |
| "top_p": top_p | |
| } | |
| if DEBUG: | |
| print("=== DEBUG SAMBANOVA PAYLOAD ===") | |
| print(json.dumps(payload, indent=2, ensure_ascii=False)) | |
| print("=== END DEBUG ===") | |
| headers = { | |
| "Authorization": f"Bearer {SAMBA_API_KEY}", | |
| "Content-Type": "application/json" | |
| } | |
| try: | |
| r = requests.post( | |
| f"{SAMBA_BASE_URL}/chat/completions", | |
| json=payload, | |
| headers=headers, | |
| timeout=60 | |
| ) | |
| r.raise_for_status() | |
| data = r.json() | |
| return data["choices"][0]["message"]["content"] | |
| except requests.exceptions.RequestException as e: | |
| return f"Error en la conexión con SambaNova: {str(e)}" | |
| except Exception as e: | |
| return f"Error procesando respuesta de SambaNova: {str(e)}" | |
| def call_openrouter(model_name, messages, temperature=0.7, top_p=1.0): | |
| try: | |
| response = openrouter_client.chat.completions.create( | |
| model=model_name, | |
| messages=messages, | |
| temperature=temperature, | |
| top_p=top_p | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"Error en OpenRouter: {str(e)}" | |
| # ============================================================ | |
| # LÓGICA DEL CHAT | |
| # ============================================================ | |
| def chat_logic(user_text, user_image, model_key, history, temperature=0.7, top_p=1.0): | |
| if history is None: | |
| history = [] | |
| if model_key not in MODELS: | |
| reply = "Error: modelo no encontrado." | |
| history.append((user_text or "", reply)) | |
| return history, history | |
| try: | |
| model_cfg = MODELS[model_key] | |
| image_b64 = encode_image_to_base64(user_image) if user_image else None | |
| messages = build_messages( | |
| system_prompt=model_cfg["role"], | |
| user_input=user_text, | |
| history=history, | |
| image_b64=image_b64, | |
| supports_images=model_cfg.get("supports_images", False) | |
| ) | |
| if DEBUG: | |
| print("=== FINAL MESSAGES ===") | |
| for i, msg in enumerate(messages): | |
| print(f"{i}: {msg['role']} - {type(msg['content'])}") | |
| if isinstance(msg['content'], list): | |
| for item in msg['content']: | |
| print(f" - {item['type']}") | |
| print("=== END MESSAGES ===") | |
| if model_cfg["provider"] == "sambanova": | |
| reply = call_sambanova(model_cfg["name"], messages, temperature, top_p) | |
| else: | |
| reply = call_openrouter(model_cfg["name"], messages, temperature, top_p) | |
| # Usar texto vacío si no hay entrada del usuario | |
| display_text = user_text or ("[Imagen]" if user_image else "") | |
| history.append((display_text, reply)) | |
| except Exception as e: | |
| error_msg = f"Error: {str(e)}" | |
| history.append((user_text or "", error_msg)) | |
| return history, history | |
| # ============================================================ | |
| # GENERACIÓN DE IMÁGENES (HUGGING FACE, OPCIONAL) | |
| # ============================================================ | |
| def generate_image_hf(prompt): | |
| if not HF_TOKEN: | |
| return None, "❌ Falta HF_TOKEN", gr.update(visible=False) | |
| try: | |
| client = InferenceClient(token=HF_TOKEN) | |
| img = client.text_to_image( | |
| prompt, | |
| model="stabilityai/stable-diffusion-xl-base-1.0" | |
| ) | |
| with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: | |
| img.save(tmp, format="PNG") | |
| return img, "✅ Imagen generada", gr.update(value=tmp.name, visible=True) | |
| except Exception as e: | |
| return None, f"❌ Error: {e}", gr.update(visible=False) | |
| # ============================================================ | |
| # UI GRADIO UNIFICADA | |
| # ============================================================ | |
| def create_ui(): | |
| with gr.Blocks(theme=gr.themes.Soft(), title="METASAMBA") as demo: | |
| # Título principal | |
| gr.Markdown("# 🚀 METASAMBA") | |
| gr.Markdown("### Plataforma Multimodelo de Inteligencia Artificial") | |
| with gr.Row(): | |
| # Panel de configuración izquierdo | |
| with gr.Column(scale=1): | |
| with gr.Accordion("⚙️ CONFIGURACIÓN DEL MODELO", open=True): | |
| model_sel = gr.Dropdown( | |
| choices=list(MODELS.keys()), | |
| value="general_fast", | |
| label="Seleccionar Modelo", | |
| info="Elige el modelo que quieres usar" | |
| ) | |
| # Mostrar detalles del modelo seleccionado | |
| model_info = gr.Markdown("") | |
| with gr.Row(): | |
| temperature = gr.Slider( | |
| minimum=0.1, | |
| maximum=2.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperatura", | |
| info="Controla la aleatoriedad (0.1=más determinista, 2.0=más creativo)" | |
| ) | |
| top_p = gr.Slider( | |
| minimum=0.1, | |
| maximum=1.0, | |
| value=1.0, | |
| step=0.1, | |
| label="Top-p", | |
| info="Controla la diversidad del vocabulario" | |
| ) | |
| with gr.Accordion("📊 INFORMACIÓN DEL MODELO", open=False): | |
| gr.Markdown(""" | |
| ### Categorías de Modelos | |
| **SambaNova:** | |
| - 🚀 **Rápidos:** general_fast, coding_alt | |
| - 🧠 **Inteligentes:** general_smart, massive_brain | |
| - 💻 **Programación:** coding_expert, specialized_1 | |
| - 👁️ **Visión:** vision_expert, vision_light | |
| - 🌍 **Multilingüe:** multilingual | |
| - 🎭 **Especializados:** boudoir_specialist | |
| **OpenRouter:** | |
| - 🦙 **Llama:** Variantes de 70B a 405B | |
| - 🤖 **GPT:** GPT-4.1 y variantes | |
| - 👻 **Claude:** Claude 3.5 Sonnet/Haiku/Opus | |
| - 🔷 **Gemini:** Flash, Pro, Thinking | |
| - 🏔️ **Otros:** DeepSeek, Mistral, Reka | |
| """) | |
| with gr.Accordion("📎 ARCHIVOS ADJUNTOS", open=False): | |
| gr.Markdown(""" | |
| ### Formatos soportados: | |
| - 📷 **Imágenes:** JPG, PNG, WebP | |
| - 📄 **Texto:** TXT, PDF, DOCX (próximamente) | |
| - 🎥 **Multimedia:** MP3, MP4 (próximamente) | |
| """) | |
| # Panel de chat principal | |
| with gr.Column(scale=2): | |
| chat = gr.Chatbot( | |
| height=500, | |
| label="Conversación", | |
| show_copy_button=True, | |
| avatar_images=(None, "🤖") | |
| ) | |
| with gr.Row(): | |
| img = gr.Image( | |
| type="pil", | |
| label="📎 Adjuntar Imagen", | |
| height=150, | |
| show_label=True | |
| ) | |
| with gr.Row(): | |
| txt = gr.Textbox( | |
| label="✏️ Tu mensaje", | |
| placeholder="Escribe tu mensaje aquí...", | |
| lines=4, | |
| scale=5, | |
| show_label=True | |
| ) | |
| with gr.Row(): | |
| clear_btn = gr.Button("🧹 Limpiar Chat", variant="secondary", size="sm") | |
| attach_btn = gr.Button("📎 Adjuntar Archivo", variant="secondary", size="sm") | |
| send = gr.Button("🚀 Enviar", variant="primary", size="sm") | |
| # Pestaña de generación de imágenes | |
| with gr.Tab("🎨 GENERADOR DE IMÁGENES"): | |
| gr.Markdown("### Generación de imágenes con Stable Diffusion XL") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| p = gr.Textbox( | |
| label="Prompt para la imagen", | |
| placeholder="Describe la imagen que quieres generar...", | |
| lines=3 | |
| ) | |
| generate_btn = gr.Button("🖼️ Generar Imagen", variant="primary") | |
| with gr.Column(scale=3): | |
| out = gr.Image(label="Imagen generada", height=400, show_label=True) | |
| status = gr.Textbox(label="Estado", interactive=False) | |
| d = gr.DownloadButton("📥 Descargar", visible=False) | |
| # Funciones para actualizar información del modelo | |
| def update_model_info(model_key): | |
| if model_key in MODELS: | |
| model = MODELS[model_key] | |
| info = f""" | |
| ### **{model['name']}** | |
| **Proveedor:** {'SambaNova' if model['provider'] == 'sambanova' else 'OpenRouter'} | |
| **Rol:** {model['role']} | |
| **Soporte de imágenes:** {'✅ Sí' if model.get('supports_images', False) else '❌ No'} | |
| """ | |
| # Agregar información especializada para boudoir_specialist | |
| if model_key == "boudoir_specialist": | |
| info += "\n**Especialidades:**\n" | |
| for specialty in model.get('specialties', []): | |
| info += f"- {specialty}\n" | |
| info += "\n**Expertise técnico:**\n" | |
| for expertise in model.get('technical_expertise', []): | |
| info += f"- {expertise}\n" | |
| info += "\n**Principios éticos:**\n" | |
| for principle in model.get('ethical_principles', []): | |
| info += f"- {principle}\n" | |
| return info | |
| return "Selecciona un modelo para ver información detallada." | |
| # Conectar eventos | |
| model_sel.change( | |
| update_model_info, | |
| inputs=[model_sel], | |
| outputs=[model_info] | |
| ) | |
| send.click( | |
| chat_logic, | |
| inputs=[txt, img, model_sel, chat, temperature, top_p], | |
| outputs=[chat, chat] | |
| ).then( | |
| lambda: ("", None), # Limpiar inputs después de enviar | |
| outputs=[txt, img] | |
| ) | |
| txt.submit( | |
| chat_logic, | |
| inputs=[txt, img, model_sel, chat, temperature, top_p], | |
| outputs=[chat, chat] | |
| ).then( | |
| lambda: ("", None), | |
| outputs=[txt, img] | |
| ) | |
| clear_btn.click( | |
| lambda: ([], []), | |
| outputs=[chat, chat] | |
| ) | |
| # Función para adjuntar archivo (placeholder) | |
| def attach_file(): | |
| return "Funcionalidad de adjuntar archivo en desarrollo" | |
| attach_btn.click( | |
| attach_file, | |
| outputs=[txt] | |
| ) | |
| generate_btn.click( | |
| generate_image_hf, | |
| inputs=[p], | |
| outputs=[out, status, d] | |
| ) | |
| # Inicializar información del modelo | |
| demo.load( | |
| update_model_info, | |
| inputs=[model_sel], | |
| outputs=[model_info] | |
| ) | |
| return demo | |
| # ============================================================ | |
| # EJECUCIÓN | |
| # ============================================================ | |
| demo = create_ui() | |
| if __name__ == "__main__": | |
| demo.launch( | |
| share=False, | |
| show_error=True, | |
| debug=False, | |
| server_name="0.0.0.0", | |
| server_port=7860 | |
| ) |