Spaces:
Running
Running
| import os | |
| import re | |
| import time | |
| import json | |
| import base64 | |
| import asyncio | |
| import threading | |
| import datetime | |
| from io import BytesIO | |
| from typing import AsyncGenerator, List, Tuple, Optional | |
| import aiohttp | |
| import gradio as gr | |
| from PIL import Image | |
| import warnings | |
| import requests | |
| import concurrent.futures | |
| # 忽略asyncio警告 | |
| warnings.filterwarnings("ignore", category=DeprecationWarning) | |
| # 设置事件循环策略 | |
| if hasattr(asyncio, 'WindowsProactorEventLoopPolicy') and isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy): | |
| asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) | |
| # API配置 | |
| SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY", "").strip() | |
| HF_TOKEN = os.getenv("HF_TOKEN", "").strip() | |
| REVE_API_KEY = os.getenv("REVE_API_KEY", "").strip() | |
| # 目录配置 | |
| OUTPUT_DIR = "generaciones_batuto" | |
| REVE_OUTPUT_DIR = "generaciones_reve" | |
| LOG_FILE = "neurocore_logs.jsonl" | |
| os.makedirs(OUTPUT_DIR, exist_ok=True) | |
| os.makedirs(REVE_OUTPUT_DIR, exist_ok=True) | |
| # 模型列表 | |
| SAMBA_MODELS = [ | |
| "DeepSeek-R1", "DeepSeek-V3.1", "DeepSeek-V3", "DeepSeek-V3-0324", | |
| "Meta-Llama-3.3-70B-Instruct", "Llama-4-Maverick-17B-128E-Instruct", | |
| "Meta-Llama-3.1-8B-Instruct", "Meta-Llama-3.2-11B-Vision-Instruct", | |
| "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen3-32B", | |
| "gpt-oss-120b", "ALLaM-7B-Instruct-preview", "CodeLlama-70b", | |
| "DeepSeek-Coder-V2", "DeepSeek-R1-0528", "DeepSeek-R1-Distill-Llama-70B", | |
| "Llama-3.3-Swallow-70B-Instruct-v0.4", "DeepSeek-V3.1-Terminus", | |
| "DeepSeek-V3.1-cb", "Qwen3-235B", "sambanovasystems/BLOOMChat-176B-v2" | |
| ] | |
| HF_MODELS = [ | |
| "mistralai/Codestral-22B-v0.1", "meta-llama/Llama-3.2-11B-Vision-Instruct", | |
| "JetBrains/Mellum-4b-sft-python", "WizardLM/WizardCoder-Python-34B-V1.0", | |
| "Qwen/Qwen2-Audio-7B-Instruct", "HuggingFaceTB/SmolLM2-1.7B-Instruct", | |
| "nvidia/nemotron-speech-streaming-en-0.6b", "openbmb/MiniCPM4.1-8B", | |
| "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B", | |
| "Qwen/Qwen3-Coder-Plus", "Qwen/Qwen3-Omni-30B-A3B-Instruct" | |
| ] | |
| # 添加REVE作为特殊模型 | |
| ALL_MODELS = ["AUTO-SELECT", "REVE"] + SAMBA_MODELS + HF_MODELS | |
| CSS = """ | |
| :root{--primary:#00C896;--secondary:#00FFE0;--bg:#000;--border:rgba(0,200,150,.35);} | |
| body,.gradio-container{background:#000!important; color: #fff !important;} | |
| .panel{border:1px solid var(--border);border-radius:16px;padding:12px} | |
| .dark .gradio-container {background: #000 !important;} | |
| .dark .gr-button-primary {background: linear-gradient(45deg, #00C896, #00FFE0) !important;} | |
| .gr-button-primary {background: linear-gradient(45deg, #00C896, #00FFE0) !important;} | |
| """ | |
| def log_event(data: dict): | |
| """记录事件日志""" | |
| data["timestamp"] = datetime.datetime.now().isoformat() | |
| with open(LOG_FILE, "a", encoding="utf-8") as f: | |
| f.write(json.dumps(data, ensure_ascii=False) + "\n") | |
| def save_generation(content, model_name, type="text"): | |
| """保存生成的文本""" | |
| filename = f"{model_name.replace('/', '_')}_{int(time.time())}.txt" | |
| path = os.path.join(OUTPUT_DIR, filename) | |
| with open(path, "w", encoding="utf-8") as f: | |
| f.write(content) | |
| return path | |
| def guardar_imagen_local(img, index): | |
| """保存图像到本地""" | |
| try: | |
| timestamp = int(time.time()) | |
| nombre_archivo = f"reve_{timestamp}_{index}.png" | |
| ruta_completa = os.path.join(REVE_OUTPUT_DIR, nombre_archivo) | |
| img.save(ruta_completa) | |
| return ruta_completa | |
| except Exception as e: | |
| print(f"⚠️ Error guardando: {e}") | |
| return None | |
| def llamar_api_reve(prompt, ratio, version, api_key, index): | |
| """调用REVE API生成单张图片""" | |
| API_URL = "https://api.reve.com/v1/image/create" | |
| payload = { | |
| "prompt": prompt, | |
| "aspect_ratio": ratio, | |
| "version": version | |
| } | |
| headers = { | |
| "Authorization": f"Bearer {api_key}", | |
| "Accept": "application/json", | |
| "Content-Type": "application/json" | |
| } | |
| try: | |
| response = requests.post(API_URL, headers=headers, json=payload, timeout=60) | |
| if response.status_code == 200: | |
| data = response.json() | |
| if "image" in data: | |
| img_bytes = base64.b64decode(data["image"]) | |
| img = Image.open(BytesIO(img_bytes)) | |
| guardar_imagen_local(img, index) | |
| return img, data.get('credits_used', 0), None | |
| return None, 0, f"Error {response.status_code}: {response.text}" | |
| except Exception as e: | |
| return None, 0, f"Excepción: {str(e)}" | |
| def generar_imagenes_batch(prompt, api_key, ratio="9:16", version="latest", num_imagenes=1): | |
| """批量生成REVE图片""" | |
| if not api_key: | |
| return [], "❌ ¡Falta la API Key de REVE! Configúrala en las variables de entorno." | |
| imagenes_nuevas = [] | |
| errores = [] | |
| creditos_totales = 0 | |
| with concurrent.futures.ThreadPoolExecutor(max_workers=num_imagenes) as executor: | |
| futuros = [ | |
| executor.submit(llamar_api_reve, prompt, ratio, version, api_key, i) | |
| for i in range(num_imagenes) | |
| ] | |
| for futuro in concurrent.futures.as_completed(futuros): | |
| img, creditos, error = futuro.result() | |
| if img: | |
| imagenes_nuevas.append(img) | |
| creditos_totales += creditos | |
| if error: | |
| errores.append(error) | |
| if imagenes_nuevas: | |
| return imagenes_nuevas, f"✅ {len(imagenes_nuevas)} imágenes generadas | Créditos usados: {creditos_totales}" | |
| else: | |
| return [], f"❌ Error: {'; '.join(errores[:2])}" | |
| def smart_select(prompt: str) -> str: | |
| """智能选择模型""" | |
| p = prompt.lower() | |
| if any(x in p for x in ["código", "python", "script", "programa", "code"]): | |
| return "DeepSeek-Coder-V2" | |
| if any(x in p for x in ["razona", "piensa", "matemáticas", "math", "logic", "resuelve"]): | |
| return "DeepSeek-R1" | |
| if any(x in p for x in ["vision", "mira", "describe", "imagen", "image"]): | |
| return "Meta-Llama-3.2-11B-Vision-Instruct" | |
| if any(x in p for x in ["audio", "sonido", "speech", "voz"]): | |
| return "Qwen/Qwen2-Audio-7B-Instruct" | |
| return "DeepSeek-V3.1" | |
| async def stream_samba(model: str, prompt: str, temp: float, tokens: int) -> AsyncGenerator[str, None]: | |
| """从Sambanova API流式获取响应""" | |
| url = "https://api.sambanova.ai/v1/chat/completions" | |
| headers = {"Authorization": f"Bearer {SAMBANOVA_API_KEY}", "Content-Type": "application/json"} | |
| payload = { | |
| "model": model, | |
| "messages": [{"role": "user", "content": prompt}], | |
| "temperature": temp, | |
| "max_tokens": tokens, | |
| "stream": True | |
| } | |
| full_res = "" | |
| timeout = aiohttp.ClientTimeout(total=60.0) | |
| try: | |
| async with aiohttp.ClientSession(timeout=timeout) as session: | |
| async with session.post(url, headers=headers, json=payload) as resp: | |
| if resp.status != 200: | |
| error_text = await resp.text() | |
| yield f"Error {resp.status}: {error_text}" | |
| return | |
| async for line in resp.content: | |
| if line: | |
| line = line.decode("utf-8").strip() | |
| if line.startswith("data: "): | |
| data_str = line[6:] | |
| if data_str == "[DONE]": | |
| break | |
| try: | |
| data = json.loads(data_str) | |
| if "choices" in data and data["choices"]: | |
| delta = data["choices"][0]["delta"].get("content", "") | |
| full_res += delta | |
| yield full_res | |
| except json.JSONDecodeError: | |
| continue | |
| except Exception as e: | |
| yield f"Error de conexión: {str(e)}" | |
| if full_res: | |
| save_generation(full_res, model) | |
| log_event({"model": model, "prompt": prompt[:100], "response_length": len(full_res)}) | |
| def handle_execution(model: str, prompt: str, temp: float, tokens: int, n: int, ratio: str, version: str): | |
| """处理执行请求""" | |
| if not prompt.strip(): | |
| return "Por favor ingresa un comando.", [] | |
| active_model = smart_select(prompt) if model == "AUTO-SELECT" else model | |
| # 处理REVE图像生成 | |
| if active_model == "REVE": | |
| if not REVE_API_KEY: | |
| return "❌ Error: Falta REVE_API_KEY en las variables de entorno.", [] | |
| images, message = generar_imagenes_batch( | |
| prompt, | |
| REVE_API_KEY, | |
| ratio, | |
| version, | |
| n | |
| ) | |
| return message, images | |
| # 处理文本模型 | |
| if active_model in SAMBA_MODELS and not SAMBANOVA_API_KEY: | |
| return "❌ Error: Falta SAMBANOVA_API_KEY en las variables de entorno.", [] | |
| # 返回流式生成器 | |
| return stream_samba(active_model, prompt, temp, tokens) | |
| def create_interface(): | |
| """创建Gradio界面""" | |
| with gr.Blocks(title="BATUTO X • Neurocore") as demo: | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: linear-gradient(45deg, #000, #001a14); border-radius: 16px; margin-bottom: 20px;"> | |
| <h1 style="color: #00C896; margin: 0; font-size: 2.5em;">⚡ BATUTO X • NEUROCORE PRO</h1> | |
| <p style="color: #00FFE0; margin-top: 10px;">Interfaz de Generación Multimodal Avanzada</p> | |
| </div> | |
| """) | |
| # 用于存储额外控件的状态 | |
| extra_controls_state = gr.State({"show_image_controls": False}) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Group(): | |
| model_opt = gr.Dropdown( | |
| ALL_MODELS, | |
| value="AUTO-SELECT", | |
| label="🧠 Modelo", | |
| info="Selecciona un modelo o usa AUTO-SELECT para detección inteligente" | |
| ) | |
| temp_opt = gr.Slider( | |
| 0, 1.5, 0.7, | |
| label="🌡️ Temperature", | |
| info="Controla la aleatoriedad (0 = determinístico, 1.5 = muy creativo)" | |
| ) | |
| tokens_opt = gr.Slider( | |
| 128, 8192, 2048, | |
| step=128, | |
| label="📏 Máximo Tokens", | |
| info="Longitud máxima de la respuesta" | |
| ) | |
| # REVE特定的控件(初始隐藏) | |
| with gr.Group(visible=False) as image_controls: | |
| num_opt = gr.Slider( | |
| 1, 4, 1, step=1, | |
| label="🖼️ Cantidad de Imágenes", | |
| info="Número de imágenes a generar" | |
| ) | |
| ratio_opt = gr.Dropdown( | |
| ["16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"], | |
| value="9:16", | |
| label="📐 Aspect Ratio", | |
| info="Proporción de la imagen" | |
| ) | |
| version_opt = gr.Dropdown( | |
| ["latest", "reve-create@20250915"], | |
| value="latest", | |
| label="🔧 Versión", | |
| info="Versión del modelo REVE" | |
| ) | |
| with gr.Column(scale=2): | |
| with gr.Group(): | |
| prompt_input = gr.Textbox( | |
| lines=5, | |
| label="💬 Entrada", | |
| placeholder="Escribe tu comando aquí...\nEjemplo: 'Genera un código Python para ordenar una lista' o 'Crea una imagen de un dragón cibernético'", | |
| elem_classes=["prompt-box"] | |
| ) | |
| send_btn = gr.Button( | |
| "🚀 EJECUTAR COMANDO", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Group(): | |
| canvas = gr.Textbox( | |
| lines=12, | |
| label="📤 Salida", | |
| interactive=False | |
| ) | |
| gallery = gr.Gallery( | |
| label="🎨 Galería de Imágenes", | |
| columns=2, | |
| height=400, | |
| visible=False | |
| ) | |
| # 根据模型选择显示/隐藏控件 | |
| def toggle_controls(model): | |
| if model == "REVE": | |
| return [ | |
| gr.Group(visible=True), # image_controls | |
| gr.Gallery(visible=True), # gallery | |
| gr.Textbox(visible=True), # canvas | |
| {"show_image_controls": True} | |
| ] | |
| else: | |
| return [ | |
| gr.Group(visible=False), # image_controls | |
| gr.Gallery(visible=False), # gallery | |
| gr.Textbox(visible=True), # canvas | |
| {"show_image_controls": False} | |
| ] | |
| model_opt.change( | |
| fn=toggle_controls, | |
| inputs=model_opt, | |
| outputs=[image_controls, gallery, canvas, extra_controls_state] | |
| ) | |
| # 连接执行按钮 | |
| send_btn.click( | |
| fn=handle_execution, | |
| inputs=[model_opt, prompt_input, temp_opt, tokens_opt, num_opt, ratio_opt, version_opt], | |
| outputs=[canvas, gallery] | |
| ) | |
| # 示例 | |
| with gr.Accordion("📚 Ejemplos de Uso", open=False): | |
| gr.Examples( | |
| examples=[ | |
| ["Escribe un programa en Python que implemente el algoritmo de ordenamiento quicksort", "AUTO-SELECT"], | |
| ["Explica la teoría de la relatividad de Einstein en términos simples", "AUTO-SELECT"], | |
| ["Genera una imagen de un dragón cibernético en una ciudad futurista", "REVE"], | |
| ["Analiza este código y sugiere mejoras: def factorial(n): return 1 if n==0 else n*factorial(n-1)", "AUTO-SELECT"], | |
| ["Resuelve esta ecuación: x² + 5x + 6 = 0", "AUTO-SELECT"] | |
| ], | |
| inputs=[prompt_input, model_opt], | |
| label="Haz clic en un ejemplo para cargarlo" | |
| ) | |
| return demo | |
| def main(): | |
| """主函数""" | |
| print("🚀 Iniciando BATUTO X Neurocore") | |
| print(f"📁 Directorio de salida: {os.path.abspath(OUTPUT_DIR)}") | |
| print(f"🎨 Directorio de imágenes REVE: {os.path.abspath(REVE_OUTPUT_DIR)}") | |
| print(f"📝 Archivo de logs: {os.path.abspath(LOG_FILE)}") | |
| if SAMBANOVA_API_KEY: | |
| print("✅ SAMBANOVA_API_KEY configurada") | |
| else: | |
| print("⚠️ SAMBANOVA_API_KEY no encontrada - modelos Sambanova no funcionarán") | |
| if REVE_API_KEY: | |
| print("✅ REVE_API_KEY configurada") | |
| else: | |
| print("⚠️ REVE_API_KEY no encontrada - generación de imágenes REVE no disponible") | |
| # 创建界面 | |
| demo = create_interface() | |
| # 启动应用 | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=int(os.getenv("PORT", "7860")), | |
| share=False, | |
| debug=False, | |
| show_error=True, | |
| css=CSS, | |
| theme=gr.themes.Default( | |
| primary_hue="emerald", | |
| neutral_hue="zinc", | |
| font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"] | |
| ) | |
| ) | |
| if __name__ == "__main__": | |
| main() |