Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,26 +2,27 @@ import gradio as gr
|
|
| 2 |
from diffusers import AutoPipelineForText2Image
|
| 3 |
import torch
|
| 4 |
|
| 5 |
-
# Load sd-turbo
|
| 6 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 7 |
"stabilityai/sd-turbo",
|
| 8 |
-
torch_dtype=torch.float32 # Use float32 for CPU
|
| 9 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 10 |
|
| 11 |
-
#
|
| 12 |
-
pipe("a
|
| 13 |
|
|
|
|
| 14 |
def generate_image(prompt):
|
| 15 |
image = pipe(prompt).images[0]
|
| 16 |
return image
|
| 17 |
|
| 18 |
-
#
|
| 19 |
iface = gr.Interface(
|
| 20 |
fn=generate_image,
|
| 21 |
inputs="text",
|
| 22 |
outputs="image",
|
| 23 |
-
title="Text
|
| 24 |
-
description="
|
| 25 |
)
|
| 26 |
|
| 27 |
-
iface.launch(
|
|
|
|
| 2 |
from diffusers import AutoPipelineForText2Image
|
| 3 |
import torch
|
| 4 |
|
| 5 |
+
# Load the sd-turbo model
|
| 6 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 7 |
"stabilityai/sd-turbo",
|
| 8 |
+
torch_dtype=torch.float32 # Use float32 for CPU (or float16 for GPU)
|
| 9 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
| 10 |
|
| 11 |
+
# Warm-up to reduce first-time delay (optional)
|
| 12 |
+
pipe("a test prompt")
|
| 13 |
|
| 14 |
+
# Gradio interface (simple)
|
| 15 |
def generate_image(prompt):
|
| 16 |
image = pipe(prompt).images[0]
|
| 17 |
return image
|
| 18 |
|
| 19 |
+
# Use only launch() here (no enable_queue)
|
| 20 |
iface = gr.Interface(
|
| 21 |
fn=generate_image,
|
| 22 |
inputs="text",
|
| 23 |
outputs="image",
|
| 24 |
+
title="Text-to-Image with SD-Turbo",
|
| 25 |
+
description="Fast free text-to-image generation using stabilityai/sd-turbo"
|
| 26 |
)
|
| 27 |
|
| 28 |
+
iface.launch()
|