more images, or breakage
Browse files- app.py +265 -31
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -1,13 +1,14 @@
|
|
| 1 |
-
import spaces
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import numpy as np
|
| 4 |
import PIL.Image
|
| 5 |
-
from PIL import Image
|
| 6 |
-
import random
|
| 7 |
-
from diffusers import StableDiffusionXLPipeline
|
| 8 |
-
from diffusers import EulerAncestralDiscreteScheduler
|
| 9 |
import torch
|
| 10 |
from compel import Compel, ReturnedEmbeddingsType
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 13 |
|
|
@@ -53,7 +54,7 @@ def process_long_prompt(prompt, negative_prompt=""):
|
|
| 53 |
print(f"Long prompt processing failed: {e}, falling back to standard processing")
|
| 54 |
return None, None
|
| 55 |
|
| 56 |
-
@spaces.GPU
|
| 57 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
|
| 58 |
# 変更: Remove the 60-word limit warning and add long prompt check
|
| 59 |
use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
|
|
@@ -66,15 +67,13 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 66 |
try:
|
| 67 |
# 追加: Try long prompt processing first if prompt is long
|
| 68 |
if use_long_prompt:
|
| 69 |
-
print("
|
| 70 |
conditioning, pooled = process_long_prompt(prompt, negative_prompt)
|
| 71 |
|
| 72 |
if conditioning is not None:
|
| 73 |
output_image = pipe(
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
negative_prompt_embeds=conditioning[1:2],
|
| 77 |
-
negative_pooled_prompt_embeds=pooled[1:2],
|
| 78 |
guidance_scale=guidance_scale,
|
| 79 |
num_inference_steps=num_inference_steps,
|
| 80 |
width=width,
|
|
@@ -82,18 +81,151 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 82 |
generator=generator
|
| 83 |
).images[0]
|
| 84 |
seed = seed - 1
|
|
|
|
| 85 |
output_image2 = pipe(
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
negative_prompt_embeds=conditioning[1:2],
|
| 89 |
-
negative_pooled_prompt_embeds=pooled[1:2],
|
| 90 |
guidance_scale=guidance_scale,
|
| 91 |
num_inference_steps=num_inference_steps,
|
| 92 |
width=width,
|
| 93 |
height=height,
|
| 94 |
generator=generator
|
| 95 |
).images[0]
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
# Fall back to standard processing
|
| 99 |
output_image = pipe(
|
|
@@ -115,7 +247,96 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 115 |
height=height,
|
| 116 |
generator=generator
|
| 117 |
).images[0]
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
except RuntimeError as e:
|
| 121 |
print(f"Error during generation: {e}")
|
|
@@ -127,7 +348,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
| 127 |
css = """
|
| 128 |
#col-container {
|
| 129 |
margin: 0 auto;
|
| 130 |
-
max-width:
|
| 131 |
}
|
| 132 |
"""
|
| 133 |
|
|
@@ -139,32 +360,45 @@ with gr.Blocks(css=css) as demo:
|
|
| 139 |
prompt = gr.Text(
|
| 140 |
label="Prompt",
|
| 141 |
show_label=False,
|
| 142 |
-
max_lines=
|
| 143 |
placeholder="Enter your prompt (long prompts are automatically supported)",
|
|
|
|
| 144 |
container=False,
|
| 145 |
)
|
| 146 |
|
| 147 |
run_button = gr.Button("Run", scale=0)
|
| 148 |
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
|
| 153 |
with gr.Accordion("Advanced Settings", open=False):
|
| 154 |
|
| 155 |
negative_prompt = gr.Text(
|
| 156 |
label="Negative prompt",
|
| 157 |
-
max_lines=
|
| 158 |
placeholder="Enter a negative prompt",
|
| 159 |
-
|
| 160 |
-
value=" ((dull, unimaginative, lifeless, tedious, lack of emotions, dry, flat, static, stiff, uninspired), bad quality, bad art, ugly, overexposed, too bright, washed out, high exposure, vague details, ambiguous shapes, undefined, poorly render, rough, low resolution, artifact, compression artifacts, low poly, blocky, banding, color bleed, texture seams, oversaturation, fused fingers, mutated, malformed eyes, missing iris sclera, poorly drawn background, poor body language)"
|
| 161 |
)
|
| 162 |
|
| 163 |
seed = gr.Slider(
|
| 164 |
label="Seed",
|
| 165 |
minimum=0,
|
| 166 |
maximum=MAX_SEED,
|
| 167 |
-
step=
|
| 168 |
value=0,
|
| 169 |
)
|
| 170 |
|
|
@@ -176,7 +410,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 176 |
minimum=256,
|
| 177 |
maximum=MAX_IMAGE_SIZE,
|
| 178 |
step=32,
|
| 179 |
-
value=
|
| 180 |
)
|
| 181 |
|
| 182 |
height = gr.Slider(
|
|
@@ -184,22 +418,22 @@ with gr.Blocks(css=css) as demo:
|
|
| 184 |
minimum=256,
|
| 185 |
maximum=MAX_IMAGE_SIZE,
|
| 186 |
step=32,
|
| 187 |
-
value=
|
| 188 |
)
|
| 189 |
|
| 190 |
with gr.Row():
|
| 191 |
guidance_scale = gr.Slider(
|
| 192 |
label="Guidance scale",
|
| 193 |
minimum=0.0,
|
| 194 |
-
maximum=
|
| 195 |
step=0.1,
|
| 196 |
-
value=
|
| 197 |
)
|
| 198 |
|
| 199 |
num_inference_steps = gr.Slider(
|
| 200 |
label="Number of inference steps",
|
| 201 |
minimum=1,
|
| 202 |
-
maximum=
|
| 203 |
step=1,
|
| 204 |
value=20,
|
| 205 |
)
|
|
@@ -207,7 +441,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 207 |
run_button.click(
|
| 208 |
fn=infer,
|
| 209 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
| 210 |
-
outputs=[result, result2],
|
| 211 |
|
| 212 |
)
|
| 213 |
|
|
|
|
| 1 |
+
#import spaces
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
import gradio as gr
|
| 5 |
import numpy as np
|
| 6 |
import PIL.Image
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
import torch
|
| 8 |
from compel import Compel, ReturnedEmbeddingsType
|
| 9 |
+
from diffusers import (EulerAncestralDiscreteScheduler,
|
| 10 |
+
StableDiffusionXLPipeline)
|
| 11 |
+
from PIL import Image
|
| 12 |
|
| 13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 14 |
|
|
|
|
| 54 |
print(f"Long prompt processing failed: {e}, falling back to standard processing")
|
| 55 |
return None, None
|
| 56 |
|
| 57 |
+
#@spaces.GPU
|
| 58 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
|
| 59 |
# 変更: Remove the 60-word limit warning and add long prompt check
|
| 60 |
use_long_prompt = len(prompt.split()) > 60 or len(prompt) > 300
|
|
|
|
| 67 |
try:
|
| 68 |
# 追加: Try long prompt processing first if prompt is long
|
| 69 |
if use_long_prompt:
|
| 70 |
+
print("Long prompts enabled.")
|
| 71 |
conditioning, pooled = process_long_prompt(prompt, negative_prompt)
|
| 72 |
|
| 73 |
if conditioning is not None:
|
| 74 |
output_image = pipe(
|
| 75 |
+
prompt=prompt,
|
| 76 |
+
negative_prompt=negative_prompt,
|
|
|
|
|
|
|
| 77 |
guidance_scale=guidance_scale,
|
| 78 |
num_inference_steps=num_inference_steps,
|
| 79 |
width=width,
|
|
|
|
| 81 |
generator=generator
|
| 82 |
).images[0]
|
| 83 |
seed = seed - 1
|
| 84 |
+
|
| 85 |
output_image2 = pipe(
|
| 86 |
+
prompt=prompt,
|
| 87 |
+
negative_prompt=negative_prompt,
|
|
|
|
|
|
|
| 88 |
guidance_scale=guidance_scale,
|
| 89 |
num_inference_steps=num_inference_steps,
|
| 90 |
width=width,
|
| 91 |
height=height,
|
| 92 |
generator=generator
|
| 93 |
).images[0]
|
| 94 |
+
seed = seed - 1
|
| 95 |
+
|
| 96 |
+
output_image3 = pipe(
|
| 97 |
+
prompt=prompt,
|
| 98 |
+
negative_prompt=negative_prompt,
|
| 99 |
+
guidance_scale=guidance_scale,
|
| 100 |
+
num_inference_steps=num_inference_steps,
|
| 101 |
+
width=width,
|
| 102 |
+
height=height,
|
| 103 |
+
generator=generator
|
| 104 |
+
).images[0]
|
| 105 |
+
seed = seed - 1
|
| 106 |
+
|
| 107 |
+
output_image4 = pipe(
|
| 108 |
+
prompt=prompt,
|
| 109 |
+
negative_prompt=negative_prompt,
|
| 110 |
+
guidance_scale=guidance_scale,
|
| 111 |
+
num_inference_steps=num_inference_steps,
|
| 112 |
+
width=width,
|
| 113 |
+
height=height,
|
| 114 |
+
generator=generator
|
| 115 |
+
).images[0]
|
| 116 |
+
seed = seed - 1
|
| 117 |
+
|
| 118 |
+
output_image5 = pipe(
|
| 119 |
+
prompt=prompt,
|
| 120 |
+
negative_prompt=negative_prompt,
|
| 121 |
+
guidance_scale=guidance_scale,
|
| 122 |
+
num_inference_steps=num_inference_steps,
|
| 123 |
+
width=width,
|
| 124 |
+
height=height,
|
| 125 |
+
generator=generator
|
| 126 |
+
).images[0]
|
| 127 |
+
seed = seed - 1
|
| 128 |
+
|
| 129 |
+
output_image6 = pipe(
|
| 130 |
+
prompt=prompt,
|
| 131 |
+
negative_prompt=negative_prompt,
|
| 132 |
+
guidance_scale=guidance_scale,
|
| 133 |
+
num_inference_steps=num_inference_steps,
|
| 134 |
+
width=width,
|
| 135 |
+
height=height,
|
| 136 |
+
generator=generator
|
| 137 |
+
).images[0]
|
| 138 |
+
seed = seed - 1
|
| 139 |
+
|
| 140 |
+
output_image7 = pipe(
|
| 141 |
+
prompt=prompt,
|
| 142 |
+
negative_prompt=negative_prompt,
|
| 143 |
+
guidance_scale=guidance_scale,
|
| 144 |
+
num_inference_steps=num_inference_steps,
|
| 145 |
+
width=width,
|
| 146 |
+
height=height,
|
| 147 |
+
generator=generator
|
| 148 |
+
).images[0]
|
| 149 |
+
seed = seed - 1
|
| 150 |
+
|
| 151 |
+
output_image8 = pipe(
|
| 152 |
+
prompt=prompt,
|
| 153 |
+
negative_prompt=negative_prompt,
|
| 154 |
+
guidance_scale=guidance_scale,
|
| 155 |
+
num_inference_steps=num_inference_steps,
|
| 156 |
+
width=width,
|
| 157 |
+
height=height,
|
| 158 |
+
generator=generator
|
| 159 |
+
).images[0]
|
| 160 |
+
seed = seed - 1
|
| 161 |
+
|
| 162 |
+
output_image9 = pipe(
|
| 163 |
+
prompt=prompt,
|
| 164 |
+
negative_prompt=negative_prompt,
|
| 165 |
+
guidance_scale=guidance_scale,
|
| 166 |
+
num_inference_steps=num_inference_steps,
|
| 167 |
+
width=width,
|
| 168 |
+
height=height,
|
| 169 |
+
generator=generator
|
| 170 |
+
).images[0]
|
| 171 |
+
seed = seed - 1
|
| 172 |
+
|
| 173 |
+
output_image0 = pipe(
|
| 174 |
+
prompt=prompt,
|
| 175 |
+
negative_prompt=negative_prompt,
|
| 176 |
+
guidance_scale=guidance_scale,
|
| 177 |
+
num_inference_steps=num_inference_steps,
|
| 178 |
+
width=width,
|
| 179 |
+
height=height,
|
| 180 |
+
generator=generator
|
| 181 |
+
).images[0]
|
| 182 |
+
|
| 183 |
+
outputs = [
|
| 184 |
+
output_image,
|
| 185 |
+
output_image2,
|
| 186 |
+
output_image3,
|
| 187 |
+
output_image4,
|
| 188 |
+
output_image5,
|
| 189 |
+
output_image6,
|
| 190 |
+
output_image7,
|
| 191 |
+
output_image8,
|
| 192 |
+
output_image9,
|
| 193 |
+
output_image0,
|
| 194 |
+
]
|
| 195 |
+
|
| 196 |
+
return outputs
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
#output_image = pipe(
|
| 206 |
+
# prompt_embeds=conditioning[0:1],
|
| 207 |
+
# pooled_prompt_embeds=pooled[0:1],
|
| 208 |
+
# negative_prompt_embeds=conditioning[1:2],
|
| 209 |
+
# negative_pooled_prompt_embeds=pooled[1:2],
|
| 210 |
+
# guidance_scale=guidance_scale,
|
| 211 |
+
# num_inference_steps=num_inference_steps,
|
| 212 |
+
# width=width,
|
| 213 |
+
# height=height,
|
| 214 |
+
# generator=generator
|
| 215 |
+
#).images[0]
|
| 216 |
+
#seed = seed - 1
|
| 217 |
+
#output_image2 = pipe(
|
| 218 |
+
# prompt_embeds=conditioning[0:1],
|
| 219 |
+
# pooled_prompt_embeds=pooled[0:1],
|
| 220 |
+
# negative_prompt_embeds=conditioning[1:2],
|
| 221 |
+
# negative_pooled_prompt_embeds=pooled[1:2],
|
| 222 |
+
# guidance_scale=guidance_scale,
|
| 223 |
+
# num_inference_steps=num_inference_steps,
|
| 224 |
+
# width=width,
|
| 225 |
+
# height=height,
|
| 226 |
+
# generator=generator
|
| 227 |
+
#).images[0]
|
| 228 |
+
#return output_image, output_image2
|
| 229 |
|
| 230 |
# Fall back to standard processing
|
| 231 |
output_image = pipe(
|
|
|
|
| 247 |
height=height,
|
| 248 |
generator=generator
|
| 249 |
).images[0]
|
| 250 |
+
output_image3 = pipe(
|
| 251 |
+
prompt=prompt,
|
| 252 |
+
negative_prompt=negative_prompt,
|
| 253 |
+
guidance_scale=guidance_scale,
|
| 254 |
+
num_inference_steps=num_inference_steps,
|
| 255 |
+
width=width,
|
| 256 |
+
height=height,
|
| 257 |
+
generator=generator
|
| 258 |
+
).images[0]
|
| 259 |
+
seed = seed - 1
|
| 260 |
+
output_image4 = pipe(
|
| 261 |
+
prompt=prompt,
|
| 262 |
+
negative_prompt=negative_prompt,
|
| 263 |
+
guidance_scale=guidance_scale,
|
| 264 |
+
num_inference_steps=num_inference_steps,
|
| 265 |
+
width=width,
|
| 266 |
+
height=height,
|
| 267 |
+
generator=generator
|
| 268 |
+
).images[0]
|
| 269 |
+
output_image5 = pipe(
|
| 270 |
+
prompt=prompt,
|
| 271 |
+
negative_prompt=negative_prompt,
|
| 272 |
+
guidance_scale=guidance_scale,
|
| 273 |
+
num_inference_steps=num_inference_steps,
|
| 274 |
+
width=width,
|
| 275 |
+
height=height,
|
| 276 |
+
generator=generator
|
| 277 |
+
).images[0]
|
| 278 |
+
seed = seed - 1
|
| 279 |
+
output_image6 = pipe(
|
| 280 |
+
prompt=prompt,
|
| 281 |
+
negative_prompt=negative_prompt,
|
| 282 |
+
guidance_scale=guidance_scale,
|
| 283 |
+
num_inference_steps=num_inference_steps,
|
| 284 |
+
width=width,
|
| 285 |
+
height=height,
|
| 286 |
+
generator=generator
|
| 287 |
+
).images[0]
|
| 288 |
+
output_image7 = pipe(
|
| 289 |
+
prompt=prompt,
|
| 290 |
+
negative_prompt=negative_prompt,
|
| 291 |
+
guidance_scale=guidance_scale,
|
| 292 |
+
num_inference_steps=num_inference_steps,
|
| 293 |
+
width=width,
|
| 294 |
+
height=height,
|
| 295 |
+
generator=generator
|
| 296 |
+
).images[0]
|
| 297 |
+
seed = seed - 1
|
| 298 |
+
output_image8 = pipe(
|
| 299 |
+
prompt=prompt,
|
| 300 |
+
negative_prompt=negative_prompt,
|
| 301 |
+
guidance_scale=guidance_scale,
|
| 302 |
+
num_inference_steps=num_inference_steps,
|
| 303 |
+
width=width,
|
| 304 |
+
height=height,
|
| 305 |
+
generator=generator
|
| 306 |
+
).images[0]
|
| 307 |
+
output_image9 = pipe(
|
| 308 |
+
prompt=prompt,
|
| 309 |
+
negative_prompt=negative_prompt,
|
| 310 |
+
guidance_scale=guidance_scale,
|
| 311 |
+
num_inference_steps=num_inference_steps,
|
| 312 |
+
width=width,
|
| 313 |
+
height=height,
|
| 314 |
+
generator=generator
|
| 315 |
+
).images[0]
|
| 316 |
+
seed = seed - 1
|
| 317 |
+
output_image0 = pipe(
|
| 318 |
+
prompt=prompt,
|
| 319 |
+
negative_prompt=negative_prompt,
|
| 320 |
+
guidance_scale=guidance_scale,
|
| 321 |
+
num_inference_steps=num_inference_steps,
|
| 322 |
+
width=width,
|
| 323 |
+
height=height,
|
| 324 |
+
generator=generator
|
| 325 |
+
).images[0]
|
| 326 |
+
|
| 327 |
+
outputs = [
|
| 328 |
+
output_image,
|
| 329 |
+
output_image2,
|
| 330 |
+
output_image3,
|
| 331 |
+
output_image4,
|
| 332 |
+
output_image5,
|
| 333 |
+
output_image6,
|
| 334 |
+
output_image7,
|
| 335 |
+
output_image8,
|
| 336 |
+
output_image9,
|
| 337 |
+
output_image0,
|
| 338 |
+
]
|
| 339 |
+
return outputs
|
| 340 |
|
| 341 |
except RuntimeError as e:
|
| 342 |
print(f"Error during generation: {e}")
|
|
|
|
| 348 |
css = """
|
| 349 |
#col-container {
|
| 350 |
margin: 0 auto;
|
| 351 |
+
max-width: 2000px;
|
| 352 |
}
|
| 353 |
"""
|
| 354 |
|
|
|
|
| 360 |
prompt = gr.Text(
|
| 361 |
label="Prompt",
|
| 362 |
show_label=False,
|
| 363 |
+
max_lines=3,
|
| 364 |
placeholder="Enter your prompt (long prompts are automatically supported)",
|
| 365 |
+
value="Show me a glorious mountain range covered in colorful crystaline trees with a laser disco show from space.",
|
| 366 |
container=False,
|
| 367 |
)
|
| 368 |
|
| 369 |
run_button = gr.Button("Run", scale=0)
|
| 370 |
|
| 371 |
+
with gr.Row():
|
| 372 |
+
result = gr.Image(format="png", label="Result", show_label=False)
|
| 373 |
+
result2 = gr.Image(format="png", label="Result2", show_label=False)
|
| 374 |
+
with gr.Row():
|
| 375 |
+
result3 = gr.Image(format="png", label="Result3", show_label=False)
|
| 376 |
+
result4 = gr.Image(format="png", label="Result4", show_label=False)
|
| 377 |
+
with gr.Row():
|
| 378 |
+
result5 = gr.Image(format="png", label="Result5", show_label=False)
|
| 379 |
+
result6 = gr.Image(format="png", label="Result6", show_label=False)
|
| 380 |
+
with gr.Row():
|
| 381 |
+
result7 = gr.Image(format="png", label="Result7", show_label=False)
|
| 382 |
+
result8 = gr.Image(format="png", label="Result8", show_label=False)
|
| 383 |
+
with gr.Row():
|
| 384 |
+
result9 = gr.Image(format="png", label="Result9", show_label=False)
|
| 385 |
+
result0 = gr.Image(format="png", label="Result0", show_label=False)
|
| 386 |
|
| 387 |
|
| 388 |
with gr.Accordion("Advanced Settings", open=False):
|
| 389 |
|
| 390 |
negative_prompt = gr.Text(
|
| 391 |
label="Negative prompt",
|
| 392 |
+
max_lines=3,
|
| 393 |
placeholder="Enter a negative prompt",
|
| 394 |
+
value=" ((unimaginative, dry, flat, static, stiff, uninspired), bad quality overexposed, too bright, washed out, high exposure, low resolution, artifact, compression artifacts, low poly, blocky, banding, color bleed, texture seams, oversaturation, fused fingers, malformed eyes, missing iris sclera, poorly drawn background)"
|
|
|
|
| 395 |
)
|
| 396 |
|
| 397 |
seed = gr.Slider(
|
| 398 |
label="Seed",
|
| 399 |
minimum=0,
|
| 400 |
maximum=MAX_SEED,
|
| 401 |
+
step=20,
|
| 402 |
value=0,
|
| 403 |
)
|
| 404 |
|
|
|
|
| 410 |
minimum=256,
|
| 411 |
maximum=MAX_IMAGE_SIZE,
|
| 412 |
step=32,
|
| 413 |
+
value=2000,
|
| 414 |
)
|
| 415 |
|
| 416 |
height = gr.Slider(
|
|
|
|
| 418 |
minimum=256,
|
| 419 |
maximum=MAX_IMAGE_SIZE,
|
| 420 |
step=32,
|
| 421 |
+
value=2000,
|
| 422 |
)
|
| 423 |
|
| 424 |
with gr.Row():
|
| 425 |
guidance_scale = gr.Slider(
|
| 426 |
label="Guidance scale",
|
| 427 |
minimum=0.0,
|
| 428 |
+
maximum=30.0,
|
| 429 |
step=0.1,
|
| 430 |
+
value=7,
|
| 431 |
)
|
| 432 |
|
| 433 |
num_inference_steps = gr.Slider(
|
| 434 |
label="Number of inference steps",
|
| 435 |
minimum=1,
|
| 436 |
+
maximum=99,
|
| 437 |
step=1,
|
| 438 |
value=20,
|
| 439 |
)
|
|
|
|
| 441 |
run_button.click(
|
| 442 |
fn=infer,
|
| 443 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
|
| 444 |
+
outputs=[result, result2, result3, result4, result5, result6, result7, result8, result9, result0],
|
| 445 |
|
| 446 |
)
|
| 447 |
|
requirements.txt
CHANGED
|
@@ -6,4 +6,4 @@ transformers
|
|
| 6 |
xformers
|
| 7 |
compel
|
| 8 |
pydantic==2.10.6
|
| 9 |
-
gradio==5.12.0
|
|
|
|
| 6 |
xformers
|
| 7 |
compel
|
| 8 |
pydantic==2.10.6
|
| 9 |
+
gradio==5.12.0
|