Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,7 +18,7 @@ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
|
| 18 |
pipe = pipe.to(device)
|
| 19 |
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
-
MAX_IMAGE_SIZE =
|
| 22 |
|
| 23 |
@spaces.GPU(duration=65)
|
| 24 |
def infer(
|
|
@@ -28,8 +28,8 @@ def infer(
|
|
| 28 |
randomize_seed=False,
|
| 29 |
width=1024,
|
| 30 |
height=1024,
|
| 31 |
-
guidance_scale=
|
| 32 |
-
num_inference_steps=
|
| 33 |
progress=gr.Progress(track_tqdm=True),
|
| 34 |
):
|
| 35 |
if randomize_seed:
|
|
@@ -63,8 +63,7 @@ css = """
|
|
| 63 |
|
| 64 |
with gr.Blocks(css=css) as demo:
|
| 65 |
with gr.Column(elem_id="col-container"):
|
| 66 |
-
gr.Markdown(" # [Stable Diffusion 3.5
|
| 67 |
-
gr.Markdown("[Learn more](https://stability.ai/news/introducing-stable-diffusion-3-5) about the Stable Diffusion 3.5 series. Try on [Stability AI API](https://platform.stability.ai/docs/api-reference#tag/Generate/paths/~1v2beta~1stable-image~1generate~1sd3/post), or [download model](https://huggingface.co/stabilityai/stable-diffusion-3.5-large) to run locally with ComfyUI or diffusers.")
|
| 68 |
with gr.Row():
|
| 69 |
prompt = gr.Text(
|
| 70 |
label="Prompt",
|
|
@@ -119,7 +118,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 119 |
minimum=0.0,
|
| 120 |
maximum=7.5,
|
| 121 |
step=0.1,
|
| 122 |
-
value=
|
| 123 |
)
|
| 124 |
|
| 125 |
num_inference_steps = gr.Slider(
|
|
@@ -127,7 +126,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 127 |
minimum=1,
|
| 128 |
maximum=50,
|
| 129 |
step=1,
|
| 130 |
-
value=
|
| 131 |
)
|
| 132 |
|
| 133 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")
|
|
|
|
| 18 |
pipe = pipe.to(device)
|
| 19 |
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
+
MAX_IMAGE_SIZE = 1440
|
| 22 |
|
| 23 |
@spaces.GPU(duration=65)
|
| 24 |
def infer(
|
|
|
|
| 28 |
randomize_seed=False,
|
| 29 |
width=1024,
|
| 30 |
height=1024,
|
| 31 |
+
guidance_scale=3,
|
| 32 |
+
num_inference_steps=20,
|
| 33 |
progress=gr.Progress(track_tqdm=True),
|
| 34 |
):
|
| 35 |
if randomize_seed:
|
|
|
|
| 63 |
|
| 64 |
with gr.Blocks(css=css) as demo:
|
| 65 |
with gr.Column(elem_id="col-container"):
|
| 66 |
+
gr.Markdown(" # [Stable Diffusion 3.5 Medium Turbo")
|
|
|
|
| 67 |
with gr.Row():
|
| 68 |
prompt = gr.Text(
|
| 69 |
label="Prompt",
|
|
|
|
| 118 |
minimum=0.0,
|
| 119 |
maximum=7.5,
|
| 120 |
step=0.1,
|
| 121 |
+
value=3,
|
| 122 |
)
|
| 123 |
|
| 124 |
num_inference_steps = gr.Slider(
|
|
|
|
| 126 |
minimum=1,
|
| 127 |
maximum=50,
|
| 128 |
step=1,
|
| 129 |
+
value=20,
|
| 130 |
)
|
| 131 |
|
| 132 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")
|