nucleus-image / app.py
multimodalart's picture
multimodalart HF Staff
Update app.py
f283ab2 verified
import os
import random
import gradio as gr
import numpy as np
import spaces
import torch
from diffusers import DiffusionPipeline
# Polyfill: expose torch.ops.aten._grouped_mm as F.grouped_mm if not already present
if not hasattr(torch.nn.functional, "grouped_mm"):
def _grouped_mm(input, mat2, *, offs=None, bias=None, out_dtype=None):
return torch.ops.aten._grouped_mm(input, mat2, offs=offs, bias=bias, out_dtype=out_dtype)
torch.nn.functional.grouped_mm = _grouped_mm
MODEL_NAME = "NucleusAI/Nucleus-Image"
MAX_SEED = np.iinfo(np.int32).max
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load pipeline at startup (weights downloaded once, moved to GPU inside the @spaces.GPU function)
pipe = DiffusionPipeline.from_pretrained(MODEL_NAME, torch_dtype=dtype)
# Try to enable Text KV cache (optional — falls back gracefully if unavailable)
try:
from diffusers import TextKVCacheConfig
config = TextKVCacheConfig()
pipe.transformer.enable_cache(config)
print("Text KV cache enabled.")
except Exception as e:
print(f"Text KV cache not enabled: {e}")
pipe.to(device)
ASPECT_RATIOS = {
"1:1 (1024x1024)": (1024, 1024),
"16:9 (1344x768)": (1344, 768),
"9:16 (768x1344)": (768, 1344),
"4:3 (1184x896)": (1184, 896),
"3:4 (896x1184)": (896, 1184),
"3:2 (1248x832)": (1248, 832),
"2:3 (832x1248)": (832, 1248),
}
@spaces.GPU(duration=120)
def generate(
prompt: str,
aspect_ratio: str,
num_inference_steps: int,
guidance_scale: float,
seed: int,
randomize_seed: bool,
progress=gr.Progress(track_tqdm=True),
):
if not prompt or not prompt.strip():
raise gr.Error("Please enter a prompt.")
if randomize_seed:
seed = random.randint(0, MAX_SEED)
width, height = ASPECT_RATIOS[aspect_ratio]
generator = torch.Generator(device=device).manual_seed(int(seed))
image = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=int(num_inference_steps),
guidance_scale=float(guidance_scale),
generator=generator,
).images[0]
return image, seed
EXAMPLES = [
"A weathered lighthouse on a rocky coastline at golden hour, waves crashing against the rocks below, seagulls circling overhead, dramatic clouds painted in shades of amber and violet",
"A cozy cabin in a snowy pine forest at night, warm light glowing from the windows, aurora borealis dancing in the sky above",
"A futuristic cyberpunk city street at night, neon signs reflecting in puddles, flying cars, dense fog, cinematic lighting",
"A tiny astronaut exploring a giant mushroom forest on an alien planet, bioluminescent plants, dreamlike atmosphere, highly detailed",
"Portrait of a wise old wizard with a long white beard, intricate robes, holding a glowing crystal staff, fantasy art, painterly style",
]
CSS = """
#col-container { max-width: 960px; margin: 0 auto; }
"""
with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Nucleus-Image
**17B sparse MoE diffusion transformer** that activates only ~2B parameters per forward pass — fast, high-quality image generation.
Fully open-source: weights, training code, and dataset. [[model]](https://huggingface.co/NucleusAI/Nucleus-Image) [[paper]](https://arxiv.org/abs/2604.12163) [[code]](https://github.com/NucleusAI/Nucleus-Image)
"""
)
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe the image you want to generate...",
lines=3,
scale=4,
)
run_btn = gr.Button("Generate", variant="primary", scale=1)
result = gr.Image(label="Result", show_label=False, format="png")
with gr.Accordion("Advanced Settings", open=False):
aspect_ratio = gr.Dropdown(
label="Aspect Ratio",
choices=list(ASPECT_RATIOS.keys()),
value="16:9 (1344x768)",
)
with gr.Row():
num_inference_steps = gr.Slider(
label="Inference Steps", minimum=10, maximum=80, step=1, value=50
)
guidance_scale = gr.Slider(
label="Guidance Scale", minimum=1.0, maximum=15.0, step=0.5, value=4.0
)
with gr.Row():
seed = gr.Slider(
label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
gr.Examples(examples=EXAMPLES, inputs=prompt, label="Example prompts")
inputs = [prompt, aspect_ratio, num_inference_steps, guidance_scale, seed, randomize_seed]
outputs = [result, seed]
gr.on(
[run_btn.click, prompt.submit],
generate,
inputs=inputs,
outputs=outputs,
)
if __name__ == "__main__":
demo.queue().launch()