|
|
import gradio as gr |
|
|
import torch |
|
|
import spaces |
|
|
import os |
|
|
import shutil |
|
|
import glob |
|
|
from diffusers import DiffusionPipeline |
|
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MODEL_ID = "NewBie-AI/NewBie-image-Exp0.1" |
|
|
LOCAL_DIR = "./newbie_fixed_model" |
|
|
|
|
|
def list_all_files(directory): |
|
|
"""Función auxiliar para ver qué rayos se descargó""" |
|
|
print(f"\n📂 LISTADO DE ARCHIVOS EN {directory}:") |
|
|
for root, dirs, files in os.walk(directory): |
|
|
level = root.replace(directory, '').count(os.sep) |
|
|
indent = ' ' * 4 * (level) |
|
|
print(f"{indent}{os.path.basename(root)}/") |
|
|
subindent = ' ' * 4 * (level + 1) |
|
|
for f in files: |
|
|
print(f"{subindent}{f}") |
|
|
print("------------------------------------------------\n") |
|
|
|
|
|
def load_deep_fixed_pipeline(): |
|
|
print(f"🛠️ Iniciando protocolo de Búsqueda Profunda para {MODEL_ID}...") |
|
|
|
|
|
|
|
|
if not os.path.exists(LOCAL_DIR): |
|
|
print(" ⬇️ Descargando snapshot...") |
|
|
snapshot_download(repo_id=MODEL_ID, local_dir=LOCAL_DIR) |
|
|
|
|
|
|
|
|
list_all_files(LOCAL_DIR) |
|
|
|
|
|
|
|
|
all_py_files = [] |
|
|
for root, dirs, files in os.walk(LOCAL_DIR): |
|
|
for file in files: |
|
|
if file.endswith(".py"): |
|
|
full_path = os.path.join(root, file) |
|
|
all_py_files.append(full_path) |
|
|
|
|
|
print(f" 🔎 Se encontraron {len(all_py_files)} archivos Python: {all_py_files}") |
|
|
|
|
|
if not all_py_files: |
|
|
raise RuntimeError("❌ ERROR FATAL: No se encontró NINGÚN archivo .py en el repositorio. El modelo no se puede ejecutar.") |
|
|
|
|
|
|
|
|
transformer_folder = os.path.join(LOCAL_DIR, "transformer") |
|
|
os.makedirs(transformer_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
with open(os.path.join(transformer_folder, "__init__.py"), "w") as f: f.write("") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for py_file in all_py_files: |
|
|
filename = os.path.basename(py_file) |
|
|
|
|
|
|
|
|
shutil.copy(py_file, os.path.join(transformer_folder, filename)) |
|
|
|
|
|
|
|
|
root_dest = os.path.join(LOCAL_DIR, filename) |
|
|
if not os.path.exists(root_dest): |
|
|
shutil.copy(py_file, root_dest) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if "modeling" in filename or "transformer" in filename.lower(): |
|
|
print(f" ✅ Posible archivo de modelado detectado: {filename}") |
|
|
|
|
|
shutil.copy(py_file, os.path.join(transformer_folder, "transformer.py")) |
|
|
|
|
|
|
|
|
if not os.path.exists(os.path.join(transformer_folder, "transformer.py")): |
|
|
print(" ⚠️ No se detectó un nombre obvio. Usando el archivo .py más grande como transformer.py") |
|
|
largest_py = max(all_py_files, key=os.path.getsize) |
|
|
shutil.copy(largest_py, os.path.join(transformer_folder, "transformer.py")) |
|
|
|
|
|
|
|
|
print(" 🚀 Intentando cargar pipeline...") |
|
|
|
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
|
LOCAL_DIR, |
|
|
torch_dtype=torch.bfloat16, |
|
|
trust_remote_code=True, |
|
|
local_files_only=True |
|
|
) |
|
|
return pipe |
|
|
|
|
|
|
|
|
pipe = None |
|
|
try: |
|
|
pipe = load_deep_fixed_pipeline() |
|
|
except Exception as e: |
|
|
print(f"❌ Error durante la carga: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@spaces.GPU(duration=120) |
|
|
def generate_image(prompt, negative_prompt, steps, cfg, width, height): |
|
|
if pipe is None: |
|
|
raise gr.Error("El modelo falló al cargar. Revisa los logs de la consola (Files listed above).") |
|
|
|
|
|
print("🎨 Generando...") |
|
|
pipe.to("cuda") |
|
|
|
|
|
try: |
|
|
image = pipe( |
|
|
prompt=prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
num_inference_steps=int(steps), |
|
|
guidance_scale=float(cfg), |
|
|
width=int(width), |
|
|
height=int(height) |
|
|
).images[0] |
|
|
return image |
|
|
except Exception as e: |
|
|
raise gr.Error(f"Error de inferencia: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
css = """ |
|
|
.container { max-width: 900px; margin: auto; } |
|
|
""" |
|
|
|
|
|
DEFAULT_PROMPT = """<character_1> |
|
|
<gender>1girl</gender> |
|
|
<appearance>red_eyes, white_hair, long_hair</appearance> |
|
|
<clothing>kimono, floral_print</clothing> |
|
|
<action>standing, holding_fan</action> |
|
|
</character_1> |
|
|
<general_tags> |
|
|
<quality>best quality, masterpiece, 4k</quality> |
|
|
<style>anime, vivid_colors</style> |
|
|
</general_tags>""" |
|
|
|
|
|
DEFAULT_NEG = "low quality, bad anatomy, worst quality, watermark, text" |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.HTML(f"<style>{css}</style>") |
|
|
gr.Markdown("# ⛩️ NewBie Anime Generator (Deep Fix)") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
prompt = gr.Textbox(label="Prompt (XML)", value=DEFAULT_PROMPT, lines=10) |
|
|
neg = gr.Textbox(label="Negative", value=DEFAULT_NEG) |
|
|
btn = gr.Button("Generar", variant="primary") |
|
|
with gr.Row(): |
|
|
steps = gr.Slider(10, 50, value=28, label="Pasos") |
|
|
cfg = gr.Slider(1, 15, value=7.0, label="CFG") |
|
|
width = gr.Slider(512, 1280, value=1024, step=64, label="Ancho") |
|
|
height = gr.Slider(512, 1280, value=1024, step=64, label="Alto") |
|
|
with gr.Column(): |
|
|
out = gr.Image(label="Resultado") |
|
|
|
|
|
btn.click(generate_image, inputs=[prompt, neg, steps, cfg, width, height], outputs=out) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |