import gradio as gr import torch import spaces import os import sys import shutil import importlib.util from huggingface_hub import snapshot_download # ----------------------------------------------------------------------------- # CONFIGURACIÓN # ----------------------------------------------------------------------------- MODEL_ID = "NewBie-AI/NewBie-image-Exp0.1" GITHUB_REPO_URL = "https://github.com/NewBie-AI/NewBie" # El origen del código perdido LOCAL_MODEL_DIR = "./model_weights" LOCAL_CODE_DIR = "./newbie_code" # ----------------------------------------------------------------------------- # FUNCIÓN DE RESCATE: CLONAR CÓDIGO + DESCARGAR PESOS # ----------------------------------------------------------------------------- def load_hybrid_pipeline(): print(f"🚨 INICIANDO PROTOCOLO DE RESCATE PARA {MODEL_ID}...") # 1. Descargar Pesos (Hugging Face) if not os.path.exists(LOCAL_MODEL_DIR): print(" ⬇️ Descargando pesos del modelo (Safetensors)...") snapshot_download( repo_id=MODEL_ID, local_dir=LOCAL_MODEL_DIR, ignore_patterns=["*.msgpack", "*.bin"] # Optimizamos descarga ) # 2. Descargar Código (GitHub) if not os.path.exists(LOCAL_CODE_DIR): print(f" ⬇️ Clonando código fuente desde {GITHUB_REPO_URL}...") # Usamos git clone para traer el código que falta en HF os.system(f"git clone {GITHUB_REPO_URL} {LOCAL_CODE_DIR}") # 3. Preparar el entorno de Python # Añadimos la carpeta clonada al path para que Python "vea" los archivos nuevos sys.path.append(os.path.abspath(LOCAL_CODE_DIR)) # 4. BUSCAR LA CLASE 'NewbiePipeline' MANUALMENTE print(" 🕵️‍♂️ Buscando la clase perdida 'NewbiePipeline' en el código clonado...") pipeline_class = None # Escaneamos recursivamente el repo de GitHub clonado for root, dirs, files in os.walk(LOCAL_CODE_DIR): for file in files: if file.endswith(".py"): path = os.path.join(root, file) try: with open(path, "r", encoding="utf-8", errors="ignore") as f: if "class NewbiePipeline" in f.read(): print(f" 🎯 ¡CÓDIGO ENCONTRADO EN!: {file}") # Importación dinámica (Magia negra de Python) spec = importlib.util.spec_from_file_location("dynamic_pipeline", path) module = importlib.util.module_from_spec(spec) sys.modules["dynamic_pipeline"] = module spec.loader.exec_module(module) pipeline_class = getattr(module, "NewbiePipeline") break except Exception: continue if pipeline_class: break if not pipeline_class: raise RuntimeError("❌ No se encontró 'class NewbiePipeline' ni siquiera en el GitHub. El código ha cambiado.") # 5. INSTANCIAR EL PIPELINE print(" 🚀 Conectando código clonado con pesos descargados...") pipe = pipeline_class.from_pretrained( LOCAL_MODEL_DIR, torch_dtype=torch.bfloat16, trust_remote_code=True, local_files_only=True ) return pipe # Ejecutar carga pipe = None try: pipe = load_hybrid_pipeline() print(" ✅ ¡MODELO CARGADO EXITOSAMENTE!") except Exception as e: print(f"❌ ERROR CRÍTICO: {e}") # ----------------------------------------------------------------------------- # LÓGICA ZEROGPU # ----------------------------------------------------------------------------- @spaces.GPU(duration=120) def generate_image(prompt, negative_prompt, steps, cfg, width, height): if pipe is None: raise gr.Error("El modelo no está cargado. Revisa la consola.") print("🎨 Generando...") pipe.to("cuda") try: image = pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=int(steps), guidance_scale=float(cfg), width=int(width), height=int(height) ).images[0] return image except Exception as e: raise gr.Error(f"Error generando imagen: {e}") # ----------------------------------------------------------------------------- # INTERFAZ # ----------------------------------------------------------------------------- css = """ """ DEFAULT_PROMPT = """ 1girl red_eyes, white_hair, long_hair kimono, floral_print standing, holding_fan """ with gr.Blocks() as demo: gr.HTML(css) gr.Markdown("# ⛩️ NewBie Anime (GitHub Rescue Edition)") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt (XML)", value=DEFAULT_PROMPT, lines=8) neg = gr.Textbox(label="Negative", value="low quality, bad anatomy") btn = gr.Button("Generar", variant="primary") steps = gr.Slider(10, 50, value=28, label="Pasos") cfg = gr.Slider(1, 15, value=7.0, label="CFG") width = gr.Slider(512, 1280, value=1024, step=64, label="Ancho") height = gr.Slider(512, 1280, value=1024, step=64, label="Alto") with gr.Column(): out = gr.Image(label="Resultado") btn.click(generate_image, inputs=[prompt, neg, steps, cfg, width, height], outputs=out) if __name__ == "__main__": demo.launch()