import gradio as gr import numpy as np import random import torch from diffusers import DiffusionPipeline # Ensure the model runs on CPU device = "cpu" dtype = torch.float32 # Use float32 for CPU compatibility # Load model from Hugging Face (it will cache locally in Hugging Face Spaces) pipe = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype, low_cpu_mem_usage=True ).to(device) MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 def infer(prompt, seed=42, randomize_seed=False, width=512, height=512, num_inference_steps=4): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device=device).manual_seed(seed) image = pipe( prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=0.0 ).images[0] return image, seed examples = [ "a tiny astronaut hatching from an egg on the moon", "a cat holding a sign that says hello world", "an anime illustration of a wiener schnitzel", ] css=""" #col-container { margin: 0 auto; max-width: 520px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown("""# FLUX.1 [schnell] 12B param rectified flow transformer distilled from FLUX.1 [pro] """) with gr.Row(): prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Row(): width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512) height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=512) num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=4) gr.Examples( examples=examples, fn=infer, inputs=[prompt], outputs=[result, seed], cache_examples="lazy" ) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps], outputs=[result, seed] ) demo.launch()