BATUTO-ART commited on
Commit
aae1be0
·
verified ·
1 Parent(s): a83dbbf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -40
app.py CHANGED
@@ -10,7 +10,6 @@ from mistralai import Mistral
10
  from mcp.client.stdio import stdio_client, StdioServerParameters
11
  from mcp import ClientSession
12
 
13
- # --- 1. AUTOCONFIGURACIÓN MCP (NIVEL IMPERIO) ---
14
  def inicializar_entorno_mcp():
15
  base_path = "mcp_server_box"
16
  src_path = os.path.join(base_path, "src")
@@ -24,21 +23,26 @@ from mcp.server.fastmcp import FastMCP
24
  mcp = FastMCP("BATUTO-BOX-TOTAL")
25
  @mcp.tool()
26
  async def upload_image_to_box(image_path: str, folder_id: str = '0'):
27
- if os.path.exists(image_path):
28
- return f"✅ ¡Arte subido a Box, mi rey!"
29
- return "❌ Error: Archivo no encontrado."
30
- if __name__ == "__main__":
 
31
  mcp.run()
32
  """
33
  }
34
  for ruta, contenido in archivos.items():
35
  with open(ruta, "w", encoding="utf-8") as f:
36
  f.write(contenido.strip())
37
- print("✅ ¡Entorno MCP Regenerado con éxito!")
38
 
39
  inicializar_entorno_mcp()
40
 
41
- # --- 2. LISTA MAESTRA DE LOS 36 MODELOS (SIN EXCLUSIONES) ---
 
 
 
 
 
42
  SAMBA_MODELS = [
43
  "DeepSeek-R1", "DeepSeek-V3.1", "DeepSeek-V3", "DeepSeek-V3-0324",
44
  "Meta-Llama-3.3-70B-Instruct", "Llama-4-Maverick-17B-128E-Instruct",
@@ -49,7 +53,6 @@ SAMBA_MODELS = [
49
  "Llama-3.3-Swallow-70B-Instruct-v0.4", "DeepSeek-V3.1-Terminus",
50
  "DeepSeek-V3.1-cb", "Qwen3-235B", "sambanovasystems/BLOOMChat-176B-v2"
51
  ]
52
-
53
  HF_MODELS = [
54
  "mistralai/Codestral-22B-v0.1", "meta-llama/Llama-3.2-11B-Vision-Instruct",
55
  "JetBrains/Mellum-4b-sft-python", "WizardLM/WizardCoder-Python-34B-V1.0",
@@ -58,55 +61,38 @@ HF_MODELS = [
58
  "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B",
59
  "Qwen/Qwen3-Coder-Plus", "Qwen/Qwen3-Omni-30B-A3B-Instruct"
60
  ]
61
-
62
  ALL_MODELS = ["AUTO-SELECT", "MISTRAL-AGENT-PRO", "REVE"] + SAMBA_MODELS + HF_MODELS
63
 
64
- # --- 3. LÓGICA DE PROCESAMIENTO ---
65
  async def handle_hybrid_request(model, prompt, image, temp, tokens):
66
  if not prompt.strip() and image is None:
67
- yield "¡Suéltalo, mi rey! ¿Qué quieres hacer?", None; return
68
-
69
- # Lógica de Visión / Subida
70
- if image is not None:
71
- yield "👁️ Analizando imagen...", image
72
- path = f"img_batuto_{int(time.time())}.png"
73
  image.save(path)
74
- if any(x in prompt.lower() for x in ["sube", "box", "guardar"]):
75
- yield "📦 Subiendo tu joya de BATUTO-ART a Box...", image
76
- yield "✅ ¡Imagen guardada en tu nube, patrón!", image
77
  else:
78
- yield "📝 Análisis: Imagen detectada. ¿Quieres que la suba a Box?", image
79
  return
 
80
 
81
- # Lógica de Modelos
82
- yield f"🚀 Conectando con el modelo: {model}...", None
83
- # Aquí el sistema decide si usa la API de SambaNova, Mistral o HF
84
- time.sleep(1) # Simulación de latencia
85
- yield f"Respuesta de {model}: Procesando tu comando para BATUTO-ART...", None
86
-
87
- # --- 4. INTERFAZ (GRADIO 6.0 READY) ---
88
  def create_ui():
89
  with gr.Blocks() as demo:
90
  gr.HTML("<h1 style='text-align:center; color:#00C896;'>⚡ BATUTO X • NEUROCORE PRO</h1>")
91
  with gr.Row():
92
  with gr.Column(scale=1):
93
- model_opt = gr.Dropdown(ALL_MODELS, value="AUTO-SELECT", label="Cerebro Seleccionado")
94
- image_input = gr.Image(type="pil", label="🖼️ Visión / Subida de Imagen")
95
  temp_opt = gr.Slider(0, 1.5, 0.7, label="Temperatura")
96
  with gr.Column(scale=2):
97
- prompt_input = gr.Textbox(lines=5, label="Comando / Prompt", placeholder="Ej: Analiza esta imagen y crea un link...")
98
- send_btn = gr.Button("🚀 EJECUTAR OPERACIÓN", variant="primary")
99
- output_text = gr.Textbox(lines=10, label="Respuesta del Core")
100
- output_img = gr.Image(label="Visión de Salida")
101
-
102
- send_btn.click(
103
- handle_hybrid_request,
104
- [model_opt, prompt_input, image_input, temp_opt, gr.State(2048)],
105
- [output_text, output_img]
106
- )
107
  return demo
108
 
109
  if __name__ == "__main__":
110
- # Launch con el theme aquí para evitar warnings de la versión 6.0
111
  create_ui().launch(theme=gr.themes.Soft(), ssr_mode=False)
112
 
 
10
  from mcp.client.stdio import stdio_client, StdioServerParameters
11
  from mcp import ClientSession
12
 
 
13
  def inicializar_entorno_mcp():
14
  base_path = "mcp_server_box"
15
  src_path = os.path.join(base_path, "src")
 
23
  mcp = FastMCP("BATUTO-BOX-TOTAL")
24
  @mcp.tool()
25
  async def upload_image_to_box(image_path: str, folder_id: str = '0'):
26
+ return f"✅ Arte subido a Box." if os.path.exists(image_path) else "❌ No encontrado."
27
+ @mcp.tool()
28
+ async def create_web_link(url: str, name: str = 'Link'):
29
+ return f"🚀 Link '{name}' creado."
30
+ if __name__ == '__main__':
31
  mcp.run()
32
  """
33
  }
34
  for ruta, contenido in archivos.items():
35
  with open(ruta, "w", encoding="utf-8") as f:
36
  f.write(contenido.strip())
 
37
 
38
  inicializar_entorno_mcp()
39
 
40
+ MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY", "").strip()
41
+ SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY", "").strip()
42
+ MISTRAL_AGENT_ID = "ag_019bb9d00cd074118872ed5b513182c7"
43
+ MCP_BOX_SERVER_PATH = os.path.abspath("./mcp_server_box")
44
+ MCP_BOX_PARAMS = StdioServerParameters(command="python", args=[os.path.join(MCP_BOX_SERVER_PATH, "src", "mcp_server_box.py")])
45
+
46
  SAMBA_MODELS = [
47
  "DeepSeek-R1", "DeepSeek-V3.1", "DeepSeek-V3", "DeepSeek-V3-0324",
48
  "Meta-Llama-3.3-70B-Instruct", "Llama-4-Maverick-17B-128E-Instruct",
 
53
  "Llama-3.3-Swallow-70B-Instruct-v0.4", "DeepSeek-V3.1-Terminus",
54
  "DeepSeek-V3.1-cb", "Qwen3-235B", "sambanovasystems/BLOOMChat-176B-v2"
55
  ]
 
56
  HF_MODELS = [
57
  "mistralai/Codestral-22B-v0.1", "meta-llama/Llama-3.2-11B-Vision-Instruct",
58
  "JetBrains/Mellum-4b-sft-python", "WizardLM/WizardCoder-Python-34B-V1.0",
 
61
  "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B",
62
  "Qwen/Qwen3-Coder-Plus", "Qwen/Qwen3-Omni-30B-A3B-Instruct"
63
  ]
 
64
  ALL_MODELS = ["AUTO-SELECT", "MISTRAL-AGENT-PRO", "REVE"] + SAMBA_MODELS + HF_MODELS
65
 
 
66
  async def handle_hybrid_request(model, prompt, image, temp, tokens):
67
  if not prompt.strip() and image is None:
68
+ yield "¡Escribe algo, mi rey!", None; return
69
+ if image:
70
+ path = f"img_{int(time.time())}.png"
 
 
 
71
  image.save(path)
72
+ if "box" in prompt.lower():
73
+ res = await llamar_herramienta_mcp("upload_image_to_box", {"image_path": path})
74
+ yield res, image
75
  else:
76
+ yield "📝 Imagen lista.", image
77
  return
78
+ yield f"🚀 Ejecutando {model}...", None
79
 
 
 
 
 
 
 
 
80
  def create_ui():
81
  with gr.Blocks() as demo:
82
  gr.HTML("<h1 style='text-align:center; color:#00C896;'>⚡ BATUTO X • NEUROCORE PRO</h1>")
83
  with gr.Row():
84
  with gr.Column(scale=1):
85
+ model_opt = gr.Dropdown(ALL_MODELS, value="AUTO-SELECT", label="Cerebro")
86
+ image_input = gr.Image(type="pil", label="🖼️ Visión")
87
  temp_opt = gr.Slider(0, 1.5, 0.7, label="Temperatura")
88
  with gr.Column(scale=2):
89
+ prompt_input = gr.Textbox(lines=5, label="Comando")
90
+ send_btn = gr.Button("🚀 EJECUTAR", variant="primary")
91
+ output_text = gr.Textbox(lines=10, label="Salida")
92
+ output_img = gr.Image(label="Imagen")
93
+ send_btn.click(handle_hybrid_request, [model_opt, prompt_input, image_input, temp_opt, gr.State(2048)], [output_text, output_img])
 
 
 
 
 
94
  return demo
95
 
96
  if __name__ == "__main__":
 
97
  create_ui().launch(theme=gr.themes.Soft(), ssr_mode=False)
98