Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,8 @@
|
|
| 1 |
import torch
|
| 2 |
import spaces
|
| 3 |
import gradio as gr
|
|
|
|
|
|
|
| 4 |
from diffusers import ZImagePipeline
|
| 5 |
|
| 6 |
# Load the pipeline once at startup
|
|
@@ -28,23 +30,21 @@ spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant
|
|
| 28 |
|
| 29 |
pipe_no_lora.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
|
| 30 |
spaces.aoti_blocks_load(pipe_no_lora.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
|
| 31 |
-
|
| 32 |
print("Pipeline loaded!")
|
| 33 |
|
| 34 |
@spaces.GPU
|
| 35 |
-
def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed, progress=gr.Progress(track_tqdm=True)):
|
| 36 |
"""Generate an image from the given prompt."""
|
| 37 |
-
|
| 38 |
-
seed =
|
| 39 |
-
|
| 40 |
-
generator = torch.Generator("cuda").manual_seed(int(seed))
|
| 41 |
image = pipe(
|
| 42 |
prompt=prompt,
|
| 43 |
height=int(height),
|
| 44 |
width=int(width),
|
| 45 |
num_inference_steps=int(num_inference_steps),
|
| 46 |
guidance_scale=0.0, # Guidance should be 0 for Turbo models
|
| 47 |
-
generator=
|
| 48 |
).images[0]
|
| 49 |
|
| 50 |
image_no_lora = pipe_no_lora(
|
|
@@ -53,7 +53,7 @@ def generate_image(prompt, height, width, num_inference_steps, seed, randomize_s
|
|
| 53 |
width=int(width),
|
| 54 |
num_inference_steps=int(num_inference_steps),
|
| 55 |
guidance_scale=0.0, # Guidance should be 0 for Turbo models
|
| 56 |
-
generator=
|
| 57 |
).images[0]
|
| 58 |
|
| 59 |
return (image_no_lora,image), seed
|
|
@@ -124,7 +124,7 @@ with gr.Blocks() as demo:
|
|
| 124 |
)
|
| 125 |
randomize_seed = gr.Checkbox(
|
| 126 |
label="Randomize Seed",
|
| 127 |
-
value=
|
| 128 |
)
|
| 129 |
|
| 130 |
generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
|
|
|
|
| 1 |
import torch
|
| 2 |
import spaces
|
| 3 |
import gradio as gr
|
| 4 |
+
import random
|
| 5 |
+
import numpy as np
|
| 6 |
from diffusers import ZImagePipeline
|
| 7 |
|
| 8 |
# Load the pipeline once at startup
|
|
|
|
| 30 |
|
| 31 |
pipe_no_lora.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
|
| 32 |
spaces.aoti_blocks_load(pipe_no_lora.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
|
| 33 |
+
MAX_SEED = np.iinfo(np.int32).max
|
| 34 |
print("Pipeline loaded!")
|
| 35 |
|
| 36 |
@spaces.GPU
|
| 37 |
+
def generate_image(prompt, height, width, num_inference_steps, seed=42, randomize_seed=True, progress=gr.Progress(track_tqdm=True)):
|
| 38 |
"""Generate an image from the given prompt."""
|
| 39 |
+
if randomize_seed:
|
| 40 |
+
seed = random.randint(0, MAX_SEED)
|
|
|
|
|
|
|
| 41 |
image = pipe(
|
| 42 |
prompt=prompt,
|
| 43 |
height=int(height),
|
| 44 |
width=int(width),
|
| 45 |
num_inference_steps=int(num_inference_steps),
|
| 46 |
guidance_scale=0.0, # Guidance should be 0 for Turbo models
|
| 47 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 48 |
).images[0]
|
| 49 |
|
| 50 |
image_no_lora = pipe_no_lora(
|
|
|
|
| 53 |
width=int(width),
|
| 54 |
num_inference_steps=int(num_inference_steps),
|
| 55 |
guidance_scale=0.0, # Guidance should be 0 for Turbo models
|
| 56 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 57 |
).images[0]
|
| 58 |
|
| 59 |
return (image_no_lora,image), seed
|
|
|
|
| 124 |
)
|
| 125 |
randomize_seed = gr.Checkbox(
|
| 126 |
label="Randomize Seed",
|
| 127 |
+
value=True,
|
| 128 |
)
|
| 129 |
|
| 130 |
generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
|