# ========================================================= # BATUTOrquestaIA V3 — Compatible con Gradio 6.0 # ========================================================= import os import gradio as gr import requests import base64 import json from io import BytesIO from PIL import Image from openai import OpenAI from dotenv import load_dotenv import huggingface_hub load_dotenv() # --- CONFIGURACIÓN COMPLETA DE APIS --- OPENAI_CLIENT = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) SAMBA_API_KEY = os.getenv("SAMBANOVA_API_KEY") REVE_API_KEY = os.getenv("REVE_API_KEY") HUGGINGFACE_TOKEN = os.getenv("HF_TOKEN") # --- DICCIONARIO MAESTRO DE MODELOS ACTUALIZADO --- MODELOS_UNIFICADOS = { # === MODELOS LOCALES (Hugging Face) === "🧠 Mellum 4B Local": {"engine": "huggingface", "id": "JetBrains/Mellum-4b-sft-python"}, "🧠 Codestral 22B": {"engine": "huggingface", "id": "mistralai/Codestral-22B-v0.1"}, "💻 WizardCoder-V2": {"engine": "huggingface", "id": "WizardLM/WizardCoder-Python-34B-V1.0"}, "🐍 Llama 3.x Code": {"engine": "huggingface", "id": "meta-llama/Llama-3.2-11B-Vision-Instruct"}, # === SAMBANOVA (Alta Velocidad) === "💡 DeepSeek R1": {"engine": "samba", "id": "DeepSeek-R1"}, "💡 DeepSeek V3": {"engine": "samba", "id": "DeepSeek-V3"}, "💡 DeepSeek V3.1": {"engine": "samba", "id": "DeepSeek-V3.1"}, "💡 DeepSeek V3-0324": {"engine": "samba", "id": "DeepSeek-V3-0324"}, "🦙 Llama 3.1 8B": {"engine": "samba", "id": "Meta-Llama-3.1-8B-Instruct"}, "🦙 Llama 3.3 70B": {"engine": "samba", "id": "Meta-Llama-3.3-70B-Instruct"}, "🦙 Llama-4 Maverick 17B": {"engine": "samba", "id": "Llama-4-Maverick-17B-128E-Instruct"}, "🧩 Qwen3-32B": {"engine": "samba", "id": "Qwen3-32B"}, "🌀 GPT-OSS 120B": {"engine": "samba", "id": "gpt-oss-120b"}, "🌍 ALLaM-7B": {"engine": "samba", "id": "ALLaM-7B-Instruct-preview"}, # === GITHUB AI / PREMIUM === "🧬 GPT-5 Mini": {"engine": "github", "id": "openai/gpt-5-mini"}, "🧬 Grok 3": {"engine": "github", "id": "xai/grok-3"}, "🧬 Mistral Code": {"engine": "github", "id": "mistral-ai/Codestral-2501"}, # === OPENAI DIRECTO === "🪩 GPT-4.5 Omni": {"engine": "openai", "id": "gpt-4.5-turbo"}, "🪩 GPT-o1": {"engine": "openai", "id": "gpt-o1"}, "🪩 GPT-4o": {"engine": "openai", "id": "gpt-4o"}, "🪩 GPT-4o-mini": {"engine": "openai", "id": "gpt-4o-mini"}, # === GENERACIÓN VISUAL === "🎨 REVE CREATE (Imagen)": {"engine": "reve", "id": "reve-v1"}, "🎨 Stable Diffusion 3": {"engine": "huggingface", "id": "stabilityai/stable-diffusion-3.5-medium"}, # === MODELOS DE CÓDIGO ESPECIALIZADOS === "💾 CodeLlama 70B": {"engine": "samba", "id": "CodeLlama-70b"}, "🔧 DeepSeek Coder": {"engine": "samba", "id": "DeepSeek-Coder-V2"} } # --- LÓGICA MEJORADA PARA HUGGING FACE --- def query_huggingface(model_id, prompt, max_length=500): """Consulta modelos de Hugging Face""" API_URL = f"https://api-inference.huggingface.co/models/{model_id}" headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"} payload = { "inputs": prompt, "parameters": { "max_length": max_length, "temperature": 0.7, "top_p": 0.9 } } try: response = requests.post(API_URL, headers=headers, json=payload, timeout=30) response.raise_for_status() result = response.json() if isinstance(result, list) and len(result) > 0: return result[0].get('generated_text', str(result)) return str(result) except Exception as e: return f"❌ Error con Hugging Face: {str(e)}" # --- LÓGICA DE GENERACIÓN DE IMAGEN MEJORADA --- def gen_image(prompt, engine="reve"): """Genera imágenes con múltiples proveedores""" if engine == "reve": url = "https://api.reve.com/v1/image/create" headers = {"Authorization": f"Bearer {REVE_API_KEY}", "Content-Type": "application/json"} payload = { "prompt": prompt, "aspect_ratio": "1:1", "version": "latest", "quality": "standard", "guidance_scale": 7.5 } try: response = requests.post(url, headers=headers, json=payload, timeout=60) if response.status_code == 200: data = response.json() if "image" in data: img_data = base64.b64decode(data["image"]) return Image.open(BytesIO(img_data)) elif "image_url" in data: img_resp = requests.get(data["image_url"]) return Image.open(BytesIO(img_resp.content)) except Exception as e: print(f"Error REVE: {e}") # Fallback a Hugging Face para generación de imágenes elif engine == "huggingface": API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-medium" headers = {"Authorization": f"Bearer {HUGGINGFACE_TOKEN}"} try: response = requests.post(API_URL, headers=headers, json={"inputs": prompt}, timeout=60) if response.status_code == 200: return Image.open(BytesIO(response.content)) except: pass return None # --- LÓGICA DE CHAT UNIFICADA MEJORADA --- def responder_orquesta(mensaje, historial, modelo_nombre): """Procesa mensajes con el modelo seleccionado""" cfg = MODELOS_UNIFICADOS.get(modelo_nombre) if not cfg: return historial + [(mensaje, "❌ Modelo no encontrado")], None # Caso 1: Generación de Imagen if cfg["engine"] in ["reve", "huggingface"] and cfg["id"] in ["reve-v1", "stabilityai/stable-diffusion-3.5-medium"]: img = gen_image(mensaje, engine=cfg["engine"]) if img: historial.append((mensaje, f"🎨 **¡Aquí tienes tu creación, mi BATUTO!**\n\nModelo: {modelo_nombre}\n\n*Firma: BATUTO-ART*")) return historial, img return historial + [(mensaje, "❌ No se pudo generar la imagen. Intenta con otro prompt.")], None # Prompt del sistema mejorado prompt_sistema = """Eres BATUTO-ART, una IA con flow chilango de barrio pero conocimiento de nivel mundial. Instrucciones: 1. Responde en español a menos que se pida otro idioma 2. Sé creativo pero preciso 3. Firma siempre como "BATUTO-ART" al final 4. Si es código, explica brevemente 5. Mantén un tono profesional pero con personalidad Modelo actual: """ + modelo_nombre try: respuesta = "" # SAMBANOVA if cfg["engine"] == "samba": client_samba = OpenAI( api_key=SAMBA_API_KEY, base_url="https://api.sambanova.ai/v1" ) res = client_samba.chat.completions.create( model=cfg["id"], messages=[ {"role": "system", "content": prompt_sistema}, {"role": "user", "content": mensaje} ], temperature=0.7, max_tokens=1000 ) respuesta = res.choices[0].message.content # HUGGING FACE (texto) elif cfg["engine"] == "huggingface": respuesta = query_huggingface(cfg["id"], f"{prompt_sistema}\n\nUsuario: {mensaje}") # OPENAI / GITHUB AI elif cfg["engine"] in ["openai", "github"]: model_to_use = cfg["id"] if cfg["engine"] == "openai" else "gpt-4o" res = OPENAI_CLIENT.chat.completions.create( model=model_to_use, messages=[ {"role": "system", "content": prompt_sistema}, {"role": "user", "content": mensaje} ], temperature=0.7 ) respuesta = res.choices[0].message.content else: respuesta = "❌ Motor no soportado aún" historial.append((mensaje, respuesta)) return historial, None except Exception as e: error_msg = f"❌ **Error de conexión con {modelo_nombre}:**\n\n`{str(e)}`\n\nIntenta con otro modelo o revisa tu conexión." return historial + [(mensaje, error_msg)], None # --- CSS ACTUALIZADO --- CSS = """ .gradio-container { background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%) !important; color: #e2e8f0 !important; font-family: 'Segoe UI', system-ui, -apple-system, sans-serif; } .floating-eleven { position: fixed; bottom: 100px; right: 20px; z-index: 1000; border: 2px solid #6366f1; border-radius: 16px; background: rgba(15, 23, 42, 0.9); backdrop-filter: blur(10px); padding: 10px; box-shadow: 0 10px 30px rgba(99, 102, 241, 0.3); } .batuto-sig { position: fixed; top: 15px; left: 15px; font-family: 'Courier New', monospace; color: #6366f1; font-weight: 800; font-size: 18px; z-index: 1001; background: linear-gradient(45deg, #6366f1, #8b5cf6, #ec4899); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text; } .model-selector { background: rgba(30, 41, 59, 0.8) !important; border: 1px solid rgba(99, 102, 241, 0.3) !important; color: #e2e8f0 !important; border-radius: 12px !important; } .chatbot { background: rgba(15, 23, 42, 0.7) !important; border: 1px solid rgba(99, 102, 241, 0.2) !important; border-radius: 16px !important; backdrop-filter: blur(10px); } .textbox { background: rgba(30, 41, 59, 0.8) !important; border: 1px solid rgba(99, 102, 241, 0.3) !important; border-radius: 12px !important; color: #e2e8f0 !important; } .button { background: linear-gradient(45deg, #6366f1, #8b5cf6) !important; color: white !important; border: none !important; border-radius: 12px !important; font-weight: 600 !important; } .button:hover { background: linear-gradient(45deg, #8b5cf6, #ec4899) !important; transform: translateY(-2px); box-shadow: 0 5px 20px rgba(99, 102, 241, 0.4); } .image-display { border-radius: 16px !important; border: 2px solid rgba(99, 102, 241, 0.2) !important; } .markdown-text { color: #e2e8f0 !important; } .contain { contain: layout style paint; } """ # --- CREACIÓN DE LA INTERFAZ --- with gr.Blocks(title="BATUTOrquestaIA V3 - Multi-Model Fusion") as demo: # Header fijo gr.HTML("""
Powered by ElevenLabs
🎨 BATUTO-ART Platform | Multi-Model AI Fusion System
⚠️ Algunos modelos pueden requerir API keys adicionales
v3.0 | Compatible con Gradio 6.0