Spaces:
Runtime error
Runtime error
File size: 4,629 Bytes
55bedbd eeba696 7706411 eeba696 bd50db4 eeba696 84881f0 77f993f 5514df7 77f993f eeba696 bd50db4 eeba696 55bedbd eeba696 bd50db4 eeba696 55bedbd eeba696 226281a 55bedbd eeba696 226281a eeba696 55bedbd eeba696 55bedbd eeba696 bd50db4 eeba696 55bedbd eeba696 55bedbd eeba696 55bedbd eeba696 55bedbd eeba696 55bedbd eeba696 55bedbd eeba696 55bedbd eeba696 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
import transformers
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
transformers.logging.set_verbosity_error()
# ๋ชจ๋ธ ๊ฒฝ๋ก ๋ชฉ๋ก
model_paths = {
"CompVis/stable-diffusion-v1-4": "CompVis/stable-diffusion-v1-4",
"runwayml/stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5",
"stabilityai/stable-diffusion-2-1": "stabilityai/stable-diffusion-2-1"
}
# ๋ชจ๋ธ์ ๋ก๋ํฉ๋๋ค.
pipes = {}
for name, path in model_paths.items():
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
pipes[name] = pipe
def generate_image(prompt, negative_prompt, model_name, steps, sampler, cfg_scale, width, height, seed):
pipe = pipes[model_name]
if seed == -1:
seed = None
generator = torch.manual_seed(seed) if seed is not None else None
output = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
)
image = output.images[0]
return image
css = """
#generate {
height: 100%;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Row():
model_name = gr.Dropdown(
label="Model (๋ชจ๋ธ ์ ํ)",
choices=list(model_paths.keys()),
value="runwayml/stable-diffusion-v1-5",
interactive=True
)
with gr.Tab("Text-to-Image (ํ
์คํธ์์ ์ด๋ฏธ์ง ์์ฑ)"):
with gr.Row():
with gr.Column(scale=6, min_width=600):
prompt = gr.Textbox(
label="Prompt (ํ๋กฌํํธ)",
value="a high-resolution photograph of a samoyed cub wearing a shiny silver astronaut suit",
lines=3
)
negative_prompt = gr.Textbox(
label="Negative Prompt (๋ค๊ฑฐํฐ๋ธ ํ๋กฌํํธ)",
value="low resolution, blurry, cartoon, deformed, ugly, disfigured, malformed, bad anatomy, unrealistic",
lines=3
)
with gr.Column():
generate_button = gr.Button("Generate (์์ฑ)", variant='primary')
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation Settings (์์ฑ ์ค์ )"):
with gr.Row():
sampler = gr.Dropdown(
label="Sampler (์ํ๋ฌ)",
choices=["ddim", "ddpm", "pndm"],
value="ddpm"
)
with gr.Row():
steps = gr.Slider(
label="Steps (์ํ๋ง ๋จ๊ณ ์)",
minimum=1,
maximum=50,
value=20,
step=1
)
with gr.Row():
width = gr.Slider(
label="Width (๋๋น, ํฝ์
)",
minimum=512,
maximum=1024,
value=512,
step=64
)
height = gr.Slider(
label="Height (๋์ด, ํฝ์
)",
minimum=512,
maximum=1024,
value=512,
step=64
)
cfg_scale = gr.Slider(
label="CFG Scale (CFG ์ค์ผ์ผ, ์ ์ด ๊ฐ๋)",
minimum=1,
maximum=20,
value=7,
step=1
)
seed = gr.Number(
label="Seed (์๋, ๊ณ ์ ๋ฒํธ)",
value=-1
)
with gr.Column(scale=2):
output_image = gr.Image(
label="Output Image (์์ฑ๋ ์ด๋ฏธ์ง)"
)
generate_button.click(
fn=generate_image,
inputs=[
prompt,
negative_prompt,
model_name,
steps,
sampler,
cfg_scale,
width,
height,
seed
],
outputs=output_image
)
demo.launch()
|