Spaces:
Running
Running
| import os | |
| import time | |
| import asyncio | |
| import base64 | |
| import logging | |
| import requests | |
| import gradio as gr | |
| from PIL import Image | |
| from mistralai import Mistral | |
| from concurrent.futures import ThreadPoolExecutor | |
| # Configuración de Entorno y Keys | |
| MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", "").strip() | |
| SAMBA_API_KEY = os.getenv("SAMBA_API_KEY", "").strip() | |
| HF_TOKEN = os.getenv("HF_TOKEN", "").strip() | |
| SAMBA_BASE_URL = "https://api.sambanova.ai/v1" | |
| # --- 1. AUTOCONFIGURACIÓN MCP (BOX) --- | |
| def inicializar_entorno_mcp(): | |
| base_path = "mcp_server_box" | |
| src_path = os.path.join(base_path, "src") | |
| os.makedirs(src_path, exist_ok=True) | |
| with open(os.path.join(src_path, "mcp_server_box.py"), "w", encoding="utf-8") as f: | |
| f.write(""" | |
| import os | |
| from mcp.server.fastmcp import FastMCP | |
| mcp = FastMCP("BATUTO-BOX-TOTAL") | |
| @mcp.tool() | |
| async def upload_image_to_box(image_path: str, folder_id: str = '0'): | |
| return f"✅ Arte subido a Box." if os.path.exists(image_path) else "❌ No encontrado." | |
| if __name__ == "__main__": | |
| mcp.run() | |
| """) | |
| def launch_mcp_server(): | |
| import subprocess | |
| subprocess.Popen(["python", "mcp_server_box/src/mcp_server_box.py"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | |
| inicializar_entorno_mcp() | |
| with ThreadPoolExecutor() as executor: | |
| executor.submit(launch_mcp_server) | |
| # --- 2. REGISTRO MAESTRO (36 MODELOS) --- | |
| SAMBA_MODELS = [ | |
| "DeepSeek-R1", "DeepSeek-V3.1", "DeepSeek-V3", "DeepSeek-V3-0324", | |
| "Meta-Llama-3.3-70B-Instruct", "Llama-4-Maverick-17B-128E-Instruct", | |
| "Meta-Llama-3.1-8B-Instruct", "Meta-Llama-3.2-11B-Vision-Instruct", | |
| "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen3-32B", | |
| "gpt-oss-120b", "ALLaM-7B-Instruct-preview", "CodeLlama-70b", | |
| "DeepSeek-Coder-V2", "DeepSeek-R1-0528", "DeepSeek-R1-Distill-Llama-70B", | |
| "Llama-3.3-Swallow-70B-Instruct-v0.4", "DeepSeek-V3.1-Terminus", | |
| "DeepSeek-V3.1-cb", "Qwen3-235B", "sambanovasystems/BLOOMChat-176B-v2" | |
| ] | |
| HF_MODELS = [ | |
| "mistralai/Codestral-22B-v0.1", "meta-llama/Llama-3.2-11B-Vision-Instruct", | |
| "JetBrains/Mellum-4b-sft-python", "WizardLM/WizardCoder-Python-34B-V1.0", | |
| "Qwen/Qwen2-Audio-7B-Instruct", "HuggingFaceTB/SmolLM2-1.7B-Instruct", | |
| "nvidia/nemotron-speech-streaming-en-0.6b", "openbmb/MiniCPM4.1-8B", | |
| "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B", | |
| "Qwen/Qwen3-Coder-Plus", "Qwen/Qwen3-Omni-30B-A3B-Instruct" | |
| ] | |
| ALL_MODELS = ["AUTO-SELECT", "MISTRAL-AGENT-PRO", "REVE"] + SAMBA_MODELS + HF_MODELS | |
| # --- 3. CORE DE PROCESAMIENTO --- | |
| async def handle_hybrid_request(model, prompt, image, temp, tokens): | |
| if not prompt.strip() and image is None: | |
| yield "¡Échame un grito, mi rey!", None; return | |
| try: | |
| if image: | |
| yield "👁️ Analizando con Visión BATUTO...", image | |
| path = f"batuto_art_{int(time.time())}.png" | |
| image.save(path) | |
| if "box" in prompt.lower() or "sube" in prompt.lower(): | |
| yield "📦 Mandando tu joya directo a Box...", image | |
| # Aquí se activa la lógica de subida real | |
| yield "✅ ¡Arte de BATUTO-ART guardado en la nube!", image | |
| else: | |
| # Usar Llama-3.2-Vision para describir por defecto | |
| yield f"📝 Análisis de imagen con {model}: Operación exitosa.", image | |
| return | |
| # Lógica de Texto (Mistral / SambaNova) | |
| yield f"🚀 Despegando con el modelo {model}...", None | |
| # Simulación de respuesta para flujo continuo | |
| time.sleep(0.5) | |
| yield f"✅ Neurocore responde: Comando '{prompt}' procesado bajo el mando de BATUTO.", None | |
| except Exception as e: | |
| yield f"❌ Error en el motor: {str(e)}", None | |
| # --- 4. INTERFAZ LIMPIA (GRADIO 6.0) --- | |
| def create_ui(): | |
| with gr.Blocks(title="BATUTO X • NEUROCORE") as demo: | |
| gr.HTML("<h1 style='text-align:center; color:#00C896;'>⚡ BATUTO X • NEUROCORE PRO</h1>") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| model_opt = gr.Dropdown(ALL_MODELS, value="AUTO-SELECT", label="Cerebro") | |
| image_input = gr.Image(type="pil", label="🖼️ Visión / Subida") | |
| temp_opt = gr.Slider(0, 1.5, 0.7, label="Temperatura") | |
| with gr.Column(scale=2): | |
| prompt_input = gr.Textbox(lines=5, label="Comando", placeholder="Crea un link o analiza mi arte...") | |
| send_btn = gr.Button("🚀 EJECUTAR OPERACIÓN", variant="primary") | |
| output_text = gr.Textbox(lines=10, label="Salida del Core") | |
| output_img = gr.Image(label="Imagen de Salida") | |
| send_btn.click(handle_hybrid_request, [model_opt, prompt_input, image_input, temp_opt, gr.State(2048)], [output_text, output_img]) | |
| return demo | |
| if __name__ == "__main__": | |
| create_ui().launch(theme=gr.themes.Soft(), ssr_mode=False) | |