updating with zero gpu
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ import io
|
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
| 8 |
import os
|
|
|
|
| 9 |
|
| 10 |
# Configuration
|
| 11 |
repo_id = "diffusers/FLUX.2-dev-bnb-4bit"
|
|
@@ -44,7 +45,7 @@ try:
|
|
| 44 |
repo_id,
|
| 45 |
text_encoder=None,
|
| 46 |
torch_dtype=torch_dtype,
|
| 47 |
-
device_map="
|
| 48 |
)
|
| 49 |
if not torch.cuda.is_available():
|
| 50 |
pipe = pipe.to(device)
|
|
@@ -53,6 +54,13 @@ except Exception as e:
|
|
| 53 |
print(f"Error loading pipeline: {e}")
|
| 54 |
raise
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
def generate_image(
|
| 57 |
prompt: str,
|
| 58 |
input_image: Image.Image = None,
|
|
@@ -74,9 +82,6 @@ def generate_image(
|
|
| 74 |
if not prompt or prompt.strip() == "":
|
| 75 |
raise gr.Error("Please enter a prompt!")
|
| 76 |
|
| 77 |
-
if not torch.cuda.is_available():
|
| 78 |
-
raise gr.Error("This Space requires a GPU to run. Please try a different Space or upgrade your hardware.")
|
| 79 |
-
|
| 80 |
progress(0, desc="Encoding prompt...")
|
| 81 |
|
| 82 |
try:
|
|
@@ -132,7 +137,7 @@ with gr.Blocks(
|
|
| 132 |
|
| 133 |
Supports both **text-to-image** and **image-to-image** generation.
|
| 134 |
|
| 135 |
-
|
| 136 |
"""
|
| 137 |
)
|
| 138 |
|
|
@@ -197,6 +202,7 @@ with gr.Blocks(
|
|
| 197 |
- Start with 28 steps for a good balance of quality and speed
|
| 198 |
- Higher guidance scale follows your prompt more strictly
|
| 199 |
- Use the same seed to reproduce results
|
|
|
|
| 200 |
"""
|
| 201 |
)
|
| 202 |
|
|
@@ -261,4 +267,4 @@ with gr.Blocks(
|
|
| 261 |
)
|
| 262 |
|
| 263 |
if __name__ == "__main__":
|
| 264 |
-
demo.queue(max_size=20).launch()
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
| 8 |
import os
|
| 9 |
+
import spaces # Import spaces for Zero GPU
|
| 10 |
|
| 11 |
# Configuration
|
| 12 |
repo_id = "diffusers/FLUX.2-dev-bnb-4bit"
|
|
|
|
| 45 |
repo_id,
|
| 46 |
text_encoder=None,
|
| 47 |
torch_dtype=torch_dtype,
|
| 48 |
+
device_map="cuda"
|
| 49 |
)
|
| 50 |
if not torch.cuda.is_available():
|
| 51 |
pipe = pipe.to(device)
|
|
|
|
| 54 |
print(f"Error loading pipeline: {e}")
|
| 55 |
raise
|
| 56 |
|
| 57 |
+
def get_duration(num_inference_steps: int, input_image: Image.Image = None):
|
| 58 |
+
"""Calculate dynamic GPU duration based on inference steps and input image."""
|
| 59 |
+
num_images = 0 if input_image is None else 1
|
| 60 |
+
step_duration = 1 + 0.7 * num_images
|
| 61 |
+
return max(65, num_inference_steps * step_duration + 10)
|
| 62 |
+
|
| 63 |
+
@spaces.GPU(duration=get_duration) # Dynamic GPU allocation
|
| 64 |
def generate_image(
|
| 65 |
prompt: str,
|
| 66 |
input_image: Image.Image = None,
|
|
|
|
| 82 |
if not prompt or prompt.strip() == "":
|
| 83 |
raise gr.Error("Please enter a prompt!")
|
| 84 |
|
|
|
|
|
|
|
|
|
|
| 85 |
progress(0, desc="Encoding prompt...")
|
| 86 |
|
| 87 |
try:
|
|
|
|
| 137 |
|
| 138 |
Supports both **text-to-image** and **image-to-image** generation.
|
| 139 |
|
| 140 |
+
⚡ **Powered by Hugging Face Zero GPU** - Automatic GPU allocation on demand!
|
| 141 |
"""
|
| 142 |
)
|
| 143 |
|
|
|
|
| 202 |
- Start with 28 steps for a good balance of quality and speed
|
| 203 |
- Higher guidance scale follows your prompt more strictly
|
| 204 |
- Use the same seed to reproduce results
|
| 205 |
+
- First generation may take longer as the model loads
|
| 206 |
"""
|
| 207 |
)
|
| 208 |
|
|
|
|
| 267 |
)
|
| 268 |
|
| 269 |
if __name__ == "__main__":
|
| 270 |
+
demo.queue(max_size=20).launch()
|