File size: 15,848 Bytes
015ebd3
 
 
 
 
 
610d436
8dbeac5
015ebd3
610d436
015ebd3
 
 
8dbeac5
610d436
d0aba05
 
610d436
 
 
 
d0aba05
610d436
 
015ebd3
d0aba05
015ebd3
 
 
 
d0aba05
8dbeac5
d0aba05
8dbeac5
015ebd3
d0aba05
015ebd3
d0aba05
8dbeac5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0aba05
 
015ebd3
4d19917
 
 
 
610d436
 
d0aba05
4d19917
 
8dbeac5
d0aba05
8dbeac5
 
 
 
 
d0aba05
4d19917
8dbeac5
4d19917
 
8dbeac5
015ebd3
d0aba05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8dbeac5
d0aba05
8dbeac5
610d436
 
d0aba05
610d436
 
 
 
 
8dbeac5
015ebd3
610d436
d0aba05
015ebd3
610d436
015ebd3
8dbeac5
 
 
 
 
015ebd3
610d436
4d19917
610d436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0aba05
 
610d436
 
 
4d19917
 
d0aba05
4d19917
610d436
d0aba05
610d436
d0aba05
 
 
 
 
 
 
 
4d19917
d0aba05
 
 
 
 
4d19917
015ebd3
610d436
d0aba05
 
610d436
 
 
 
 
 
4d19917
d0aba05
 
 
610d436
 
 
 
 
 
 
 
 
d0aba05
610d436
 
 
 
 
d0aba05
610d436
 
 
 
 
 
d0aba05
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610d436
 
 
 
 
 
d0aba05
610d436
 
 
 
 
 
d0aba05
610d436
 
 
 
 
 
d0aba05
610d436
 
 
 
 
 
 
 
 
d0aba05
610d436
 
d0aba05
 
 
 
 
 
610d436
d0aba05
 
 
 
 
 
610d436
 
 
 
d0aba05
610d436
 
d0aba05
610d436
 
d0aba05
610d436
 
 
d0aba05
610d436
 
 
 
 
 
 
 
 
 
 
 
 
 
015ebd3
d0aba05
 
 
610d436
d0aba05
610d436
 
 
 
 
d0aba05
610d436
 
 
 
d0aba05
610d436
d0aba05
610d436
 
d0aba05
 
 
 
 
 
 
 
 
 
 
 
610d436
d0aba05
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
import os
import re
import time
import json
import base64
import asyncio
import threading
import datetime
from io import BytesIO
from typing import AsyncGenerator, List, Tuple, Optional

import aiohttp
import gradio as gr
from PIL import Image
import warnings
import requests
import concurrent.futures

# 忽略asyncio警告
warnings.filterwarnings("ignore", category=DeprecationWarning)

# 设置事件循环策略
if hasattr(asyncio, 'WindowsProactorEventLoopPolicy') and isinstance(asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy):
    asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

# API配置
SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY", "").strip()
HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
REVE_API_KEY = os.getenv("REVE_API_KEY", "").strip()

# 目录配置
OUTPUT_DIR = "generaciones_batuto"
REVE_OUTPUT_DIR = "generaciones_reve"
LOG_FILE = "neurocore_logs.jsonl"
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(REVE_OUTPUT_DIR, exist_ok=True)

# 模型列表
SAMBA_MODELS = [
    "DeepSeek-R1", "DeepSeek-V3.1", "DeepSeek-V3", "DeepSeek-V3-0324",
    "Meta-Llama-3.3-70B-Instruct", "Llama-4-Maverick-17B-128E-Instruct",
    "Meta-Llama-3.1-8B-Instruct", "Meta-Llama-3.2-11B-Vision-Instruct",
    "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Qwen3-32B",
    "gpt-oss-120b", "ALLaM-7B-Instruct-preview", "CodeLlama-70b",
    "DeepSeek-Coder-V2", "DeepSeek-R1-0528", "DeepSeek-R1-Distill-Llama-70B",
    "Llama-3.3-Swallow-70B-Instruct-v0.4", "DeepSeek-V3.1-Terminus",
    "DeepSeek-V3.1-cb", "Qwen3-235B", "sambanovasystems/BLOOMChat-176B-v2"
]

HF_MODELS = [
    "mistralai/Codestral-22B-v0.1", "meta-llama/Llama-3.2-11B-Vision-Instruct",
    "JetBrains/Mellum-4b-sft-python", "WizardLM/WizardCoder-Python-34B-V1.0",
    "Qwen/Qwen2-Audio-7B-Instruct", "HuggingFaceTB/SmolLM2-1.7B-Instruct",
    "nvidia/nemotron-speech-streaming-en-0.6b", "openbmb/MiniCPM4.1-8B",
    "naver-hyperclovax/HyperCLOVAX-SEED-Text-Instruct-0.5B",
    "Qwen/Qwen3-Coder-Plus", "Qwen/Qwen3-Omni-30B-A3B-Instruct"
]

# 添加REVE作为特殊模型
ALL_MODELS = ["AUTO-SELECT", "REVE"] + SAMBA_MODELS + HF_MODELS

CSS = """
:root{--primary:#00C896;--secondary:#00FFE0;--bg:#000;--border:rgba(0,200,150,.35);}
body,.gradio-container{background:#000!important; color: #fff !important;}
.panel{border:1px solid var(--border);border-radius:16px;padding:12px}
.dark .gradio-container {background: #000 !important;}
.dark .gr-button-primary {background: linear-gradient(45deg, #00C896, #00FFE0) !important;}
.gr-button-primary {background: linear-gradient(45deg, #00C896, #00FFE0) !important;}
"""

def log_event(data: dict):
    """记录事件日志"""
    data["timestamp"] = datetime.datetime.now().isoformat()
    with open(LOG_FILE, "a", encoding="utf-8") as f:
        f.write(json.dumps(data, ensure_ascii=False) + "\n")

def save_generation(content, model_name, type="text"):
    """保存生成的文本"""
    filename = f"{model_name.replace('/', '_')}_{int(time.time())}.txt"
    path = os.path.join(OUTPUT_DIR, filename)
    with open(path, "w", encoding="utf-8") as f:
        f.write(content)
    return path

def guardar_imagen_local(img, index):
    """保存图像到本地"""
    try:
        timestamp = int(time.time())
        nombre_archivo = f"reve_{timestamp}_{index}.png"
        ruta_completa = os.path.join(REVE_OUTPUT_DIR, nombre_archivo)
        img.save(ruta_completa)
        return ruta_completa
    except Exception as e:
        print(f"⚠️ Error guardando: {e}")
        return None

def llamar_api_reve(prompt, ratio, version, api_key, index):
    """调用REVE API生成单张图片"""
    API_URL = "https://api.reve.com/v1/image/create"
    
    payload = {
        "prompt": prompt,
        "aspect_ratio": ratio,
        "version": version
    }
    headers = {
        "Authorization": f"Bearer {api_key}",
        "Accept": "application/json",
        "Content-Type": "application/json"
    }

    try:
        response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
        if response.status_code == 200:
            data = response.json()
            if "image" in data:
                img_bytes = base64.b64decode(data["image"])
                img = Image.open(BytesIO(img_bytes))
                guardar_imagen_local(img, index)
                return img, data.get('credits_used', 0), None
        return None, 0, f"Error {response.status_code}: {response.text}"
    except Exception as e:
        return None, 0, f"Excepción: {str(e)}"

def generar_imagenes_batch(prompt, api_key, ratio="9:16", version="latest", num_imagenes=1):
    """批量生成REVE图片"""
    if not api_key:
        return [], "❌ ¡Falta la API Key de REVE! Configúrala en las variables de entorno."

    imagenes_nuevas = []
    errores = []
    creditos_totales = 0

    with concurrent.futures.ThreadPoolExecutor(max_workers=num_imagenes) as executor:
        futuros = [
            executor.submit(llamar_api_reve, prompt, ratio, version, api_key, i) 
            for i in range(num_imagenes)
        ]
        
        for futuro in concurrent.futures.as_completed(futuros):
            img, creditos, error = futuro.result()
            if img:
                imagenes_nuevas.append(img)
                creditos_totales += creditos
            if error:
                errores.append(error)

    if imagenes_nuevas:
        return imagenes_nuevas, f"✅ {len(imagenes_nuevas)} imágenes generadas | Créditos usados: {creditos_totales}"
    else:
        return [], f"❌ Error: {'; '.join(errores[:2])}"

def smart_select(prompt: str) -> str:
    """智能选择模型"""
    p = prompt.lower()
    if any(x in p for x in ["código", "python", "script", "programa", "code"]): 
        return "DeepSeek-Coder-V2"
    if any(x in p for x in ["razona", "piensa", "matemáticas", "math", "logic", "resuelve"]): 
        return "DeepSeek-R1"
    if any(x in p for x in ["vision", "mira", "describe", "imagen", "image"]): 
        return "Meta-Llama-3.2-11B-Vision-Instruct"
    if any(x in p for x in ["audio", "sonido", "speech", "voz"]):
        return "Qwen/Qwen2-Audio-7B-Instruct"
    return "DeepSeek-V3.1"

async def stream_samba(model: str, prompt: str, temp: float, tokens: int) -> AsyncGenerator[str, None]:
    """从Sambanova API流式获取响应"""
    url = "https://api.sambanova.ai/v1/chat/completions"
    headers = {"Authorization": f"Bearer {SAMBANOVA_API_KEY}", "Content-Type": "application/json"}
    payload = {
        "model": model,
        "messages": [{"role": "user", "content": prompt}],
        "temperature": temp,
        "max_tokens": tokens,
        "stream": True
    }
    
    full_res = ""
    timeout = aiohttp.ClientTimeout(total=60.0)
    
    try:
        async with aiohttp.ClientSession(timeout=timeout) as session:
            async with session.post(url, headers=headers, json=payload) as resp:
                if resp.status != 200:
                    error_text = await resp.text()
                    yield f"Error {resp.status}: {error_text}"
                    return
                
                async for line in resp.content:
                    if line:
                        line = line.decode("utf-8").strip()
                        if line.startswith("data: "):
                            data_str = line[6:]
                            if data_str == "[DONE]":
                                break
                            try:
                                data = json.loads(data_str)
                                if "choices" in data and data["choices"]:
                                    delta = data["choices"][0]["delta"].get("content", "")
                                    full_res += delta
                                    yield full_res
                            except json.JSONDecodeError:
                                continue
    except Exception as e:
        yield f"Error de conexión: {str(e)}"
    
    if full_res:
        save_generation(full_res, model)
        log_event({"model": model, "prompt": prompt[:100], "response_length": len(full_res)})

def handle_execution(model: str, prompt: str, temp: float, tokens: int, n: int, ratio: str, version: str):
    """处理执行请求"""
    if not prompt.strip():
        return "Por favor ingresa un comando.", []
    
    active_model = smart_select(prompt) if model == "AUTO-SELECT" else model
    
    # 处理REVE图像生成
    if active_model == "REVE":
        if not REVE_API_KEY:
            return "❌ Error: Falta REVE_API_KEY en las variables de entorno.", []
        
        images, message = generar_imagenes_batch(
            prompt, 
            REVE_API_KEY, 
            ratio, 
            version, 
            n
        )
        return message, images
    
    # 处理文本模型
    if active_model in SAMBA_MODELS and not SAMBANOVA_API_KEY:
        return "❌ Error: Falta SAMBANOVA_API_KEY en las variables de entorno.", []
    
    # 返回流式生成器
    return stream_samba(active_model, prompt, temp, tokens)

def create_interface():
    """创建Gradio界面"""
    with gr.Blocks(title="BATUTO X • Neurocore") as demo:
        gr.HTML("""
        <div style="text-align: center; padding: 20px; background: linear-gradient(45deg, #000, #001a14); border-radius: 16px; margin-bottom: 20px;">
            <h1 style="color: #00C896; margin: 0; font-size: 2.5em;">⚡ BATUTO X • NEUROCORE PRO</h1>
            <p style="color: #00FFE0; margin-top: 10px;">Interfaz de Generación Multimodal Avanzada</p>
        </div>
        """)
        
        # 用于存储额外控件的状态
        extra_controls_state = gr.State({"show_image_controls": False})
        
        with gr.Row():
            with gr.Column(scale=1):
                with gr.Group():
                    model_opt = gr.Dropdown(
                        ALL_MODELS, 
                        value="AUTO-SELECT", 
                        label="🧠 Modelo",
                        info="Selecciona un modelo o usa AUTO-SELECT para detección inteligente"
                    )
                    
                    temp_opt = gr.Slider(
                        0, 1.5, 0.7, 
                        label="🌡️ Temperature",
                        info="Controla la aleatoriedad (0 = determinístico, 1.5 = muy creativo)"
                    )
                    
                    tokens_opt = gr.Slider(
                        128, 8192, 2048, 
                        step=128,
                        label="📏 Máximo Tokens",
                        info="Longitud máxima de la respuesta"
                    )
                    
                    # REVE特定的控件(初始隐藏)
                    with gr.Group(visible=False) as image_controls:
                        num_opt = gr.Slider(
                            1, 4, 1, step=1, 
                            label="🖼️ Cantidad de Imágenes",
                            info="Número de imágenes a generar"
                        )
                        
                        ratio_opt = gr.Dropdown(
                            ["16:9", "9:16", "3:2", "2:3", "4:3", "3:4", "1:1"], 
                            value="9:16",
                            label="📐 Aspect Ratio",
                            info="Proporción de la imagen"
                        )
                        
                        version_opt = gr.Dropdown(
                            ["latest", "reve-create@20250915"], 
                            value="latest",
                            label="🔧 Versión",
                            info="Versión del modelo REVE"
                        )
            
            with gr.Column(scale=2):
                with gr.Group():
                    prompt_input = gr.Textbox(
                        lines=5, 
                        label="💬 Entrada",
                        placeholder="Escribe tu comando aquí...\nEjemplo: 'Genera un código Python para ordenar una lista' o 'Crea una imagen de un dragón cibernético'",
                        elem_classes=["prompt-box"]
                    )
        
        send_btn = gr.Button(
            "🚀 EJECUTAR COMANDO", 
            variant="primary", 
            size="lg"
        )
        
        with gr.Group():
            canvas = gr.Textbox(
                lines=12, 
                label="📤 Salida",
                interactive=False
            )
        
        gallery = gr.Gallery(
            label="🎨 Galería de Imágenes",
            columns=2,
            height=400,
            visible=False
        )
        
        # 根据模型选择显示/隐藏控件
        def toggle_controls(model):
            if model == "REVE":
                return [
                    gr.Group(visible=True),  # image_controls
                    gr.Gallery(visible=True),  # gallery
                    gr.Textbox(visible=True),  # canvas
                    {"show_image_controls": True}
                ]
            else:
                return [
                    gr.Group(visible=False),  # image_controls
                    gr.Gallery(visible=False),  # gallery
                    gr.Textbox(visible=True),  # canvas
                    {"show_image_controls": False}
                ]
        
        model_opt.change(
            fn=toggle_controls,
            inputs=model_opt,
            outputs=[image_controls, gallery, canvas, extra_controls_state]
        )
        
        # 连接执行按钮
        send_btn.click(
            fn=handle_execution,
            inputs=[model_opt, prompt_input, temp_opt, tokens_opt, num_opt, ratio_opt, version_opt],
            outputs=[canvas, gallery]
        )
        
        # 示例
        with gr.Accordion("📚 Ejemplos de Uso", open=False):
            gr.Examples(
                examples=[
                    ["Escribe un programa en Python que implemente el algoritmo de ordenamiento quicksort", "AUTO-SELECT"],
                    ["Explica la teoría de la relatividad de Einstein en términos simples", "AUTO-SELECT"],
                    ["Genera una imagen de un dragón cibernético en una ciudad futurista", "REVE"],
                    ["Analiza este código y sugiere mejoras: def factorial(n): return 1 if n==0 else n*factorial(n-1)", "AUTO-SELECT"],
                    ["Resuelve esta ecuación: x² + 5x + 6 = 0", "AUTO-SELECT"]
                ],
                inputs=[prompt_input, model_opt],
                label="Haz clic en un ejemplo para cargarlo"
            )
    
    return demo

def main():
    """主函数"""
    print("🚀 Iniciando BATUTO X Neurocore")
    print(f"📁 Directorio de salida: {os.path.abspath(OUTPUT_DIR)}")
    print(f"🎨 Directorio de imágenes REVE: {os.path.abspath(REVE_OUTPUT_DIR)}")
    print(f"📝 Archivo de logs: {os.path.abspath(LOG_FILE)}")
    
    if SAMBANOVA_API_KEY:
        print("✅ SAMBANOVA_API_KEY configurada")
    else:
        print("⚠️  SAMBANOVA_API_KEY no encontrada - modelos Sambanova no funcionarán")
    
    if REVE_API_KEY:
        print("✅ REVE_API_KEY configurada")
    else:
        print("⚠️  REVE_API_KEY no encontrada - generación de imágenes REVE no disponible")
    
    # 创建界面
    demo = create_interface()
    
    # 启动应用
    demo.launch(
        server_name="0.0.0.0",
        server_port=int(os.getenv("PORT", "7860")),
        share=False,
        debug=False,
        show_error=True,
        css=CSS,
        theme=gr.themes.Default(
            primary_hue="emerald",
            neutral_hue="zinc",
            font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"]
        )
    )

if __name__ == "__main__":
    main()