raidensreturn commited on
Commit
aef244e
·
verified ·
1 Parent(s): bdf0c51

Deploy Gradio app with multiple files

Browse files
Files changed (5) hide show
  1. app.py +166 -0
  2. config.py +30 -0
  3. models.py +68 -0
  4. requirements.txt +9 -0
  5. utils.py +18 -0
app.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from config import (
4
+ APP_DESCRIPTION,
5
+ APP_TITLE,
6
+ DEFAULT_GUIDANCE,
7
+ DEFAULT_HEIGHT,
8
+ DEFAULT_NEGATIVE_PROMPT,
9
+ DEFAULT_NUM_IMAGES,
10
+ DEFAULT_NUM_STEPS,
11
+ DEFAULT_PROMPT,
12
+ DEFAULT_SEED,
13
+ DEFAULT_WIDTH,
14
+ EXAMPLE_PROMPTS,
15
+ MAX_GUIDANCE,
16
+ MAX_HEIGHT,
17
+ MAX_NUM_IMAGES,
18
+ MAX_NUM_STEPS,
19
+ MAX_WIDTH,
20
+ MIN_GUIDANCE,
21
+ MIN_NUM_STEPS,
22
+ )
23
+ from models import run_generation
24
+ from utils import prepare_generator, sanitize_dimensions
25
+
26
+
27
+ def generate_images(
28
+ prompt: str,
29
+ negative_prompt: str,
30
+ guidance_scale: float,
31
+ num_inference_steps: int,
32
+ width: int,
33
+ height: int,
34
+ num_images: int,
35
+ seed: int,
36
+ ):
37
+ if not prompt.strip():
38
+ raise gr.Error("Please provide a prompt to describe your image.")
39
+ width, height = sanitize_dimensions(width, height, MAX_WIDTH, MAX_HEIGHT)
40
+ generator, final_seed = prepare_generator(seed)
41
+ images = run_generation(
42
+ prompt=prompt.strip(),
43
+ negative_prompt=negative_prompt.strip() if negative_prompt else None,
44
+ guidance_scale=guidance_scale,
45
+ num_inference_steps=num_inference_steps,
46
+ width=width,
47
+ height=height,
48
+ num_images=num_images,
49
+ generator=generator,
50
+ )
51
+ return images, final_seed
52
+
53
+
54
+ with gr.Blocks(fill_width=True) as demo:
55
+ gr.Markdown(
56
+ f"""
57
+ # {APP_TITLE}
58
+
59
+ {APP_DESCRIPTION}
60
+
61
+ [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
62
+ """
63
+ )
64
+
65
+ with gr.Row():
66
+ prompt_input = gr.Textbox(
67
+ label="Prompt",
68
+ value=DEFAULT_PROMPT,
69
+ placeholder="Describe what you want to see...",
70
+ lines=3,
71
+ )
72
+ negative_input = gr.Textbox(
73
+ label="Negative Prompt",
74
+ value=DEFAULT_NEGATIVE_PROMPT,
75
+ placeholder="Specify what to avoid (optional)",
76
+ lines=3,
77
+ )
78
+
79
+ with gr.Accordion("Generation Settings", open=False):
80
+ with gr.Row():
81
+ guidance_slider = gr.Slider(
82
+ minimum=MIN_GUIDANCE,
83
+ maximum=MAX_GUIDANCE,
84
+ step=0.1,
85
+ value=DEFAULT_GUIDANCE,
86
+ label="Guidance Scale",
87
+ )
88
+ steps_slider = gr.Slider(
89
+ minimum=MIN_NUM_STEPS,
90
+ maximum=MAX_NUM_STEPS,
91
+ step=1,
92
+ value=DEFAULT_NUM_STEPS,
93
+ label="Inference Steps",
94
+ )
95
+ with gr.Row():
96
+ width_slider = gr.Slider(
97
+ minimum=256,
98
+ maximum=MAX_WIDTH,
99
+ step=8,
100
+ value=DEFAULT_WIDTH,
101
+ label="Width (px)",
102
+ )
103
+ height_slider = gr.Slider(
104
+ minimum=256,
105
+ maximum=MAX_HEIGHT,
106
+ step=8,
107
+ value=DEFAULT_HEIGHT,
108
+ label="Height (px)",
109
+ )
110
+ with gr.Row():
111
+ num_images_slider = gr.Slider(
112
+ minimum=1,
113
+ maximum=MAX_NUM_IMAGES,
114
+ step=1,
115
+ value=DEFAULT_NUM_IMAGES,
116
+ label="Images per prompt",
117
+ )
118
+ seed_number = gr.Number(
119
+ value=DEFAULT_SEED,
120
+ label="Seed (-1 for random)",
121
+ precision=0,
122
+ )
123
+
124
+ generate_button = gr.Button("Generate", variant="primary")
125
+ gallery = gr.Gallery(
126
+ label="Generated Images",
127
+ columns=2,
128
+ height="auto",
129
+ object_fit="contain",
130
+ show_share_button=True,
131
+ )
132
+ seed_display = gr.Number(
133
+ label="Used Seed",
134
+ value=DEFAULT_SEED,
135
+ interactive=False,
136
+ precision=0,
137
+ )
138
+
139
+ generate_button.click(
140
+ fn=generate_images,
141
+ inputs=[
142
+ prompt_input,
143
+ negative_input,
144
+ guidance_slider,
145
+ steps_slider,
146
+ width_slider,
147
+ height_slider,
148
+ num_images_slider,
149
+ seed_number,
150
+ ],
151
+ outputs=[gallery, seed_display],
152
+ api_name="generate",
153
+ )
154
+
155
+ gr.Examples(
156
+ examples=[[example] for example in EXAMPLE_PROMPTS],
157
+ inputs=[prompt_input],
158
+ label="Prompt Ideas",
159
+ )
160
+
161
+ gr.ClearButton(
162
+ components=[prompt_input, negative_input, gallery],
163
+ value="Reset",
164
+ )
165
+
166
+ demo.queue(max_size=40).launch()
config.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ APP_TITLE = "Stable Diffusion 2.1 Turbocharged"
2
+ APP_DESCRIPTION = (
3
+ "Generate stunning visuals using the `Comfy-Org/stable_diffusion_2.1_repackaged` weights with ZeroGPU AoT acceleration."
4
+ )
5
+
6
+ MODEL_ID = "Comfy-Org/stable_diffusion_2.1_repackaged"
7
+
8
+ DEFAULT_PROMPT = "A futuristic city skyline at sunset, ultra detailed, cinematic lighting"
9
+ DEFAULT_NEGATIVE_PROMPT = "blurry, low quality, distorted, watermark"
10
+ DEFAULT_GUIDANCE = 7.5
11
+ DEFAULT_NUM_STEPS = 30
12
+ DEFAULT_WIDTH = 768
13
+ DEFAULT_HEIGHT = 512
14
+ DEFAULT_NUM_IMAGES = 2
15
+ DEFAULT_SEED = -1
16
+
17
+ MIN_GUIDANCE = 1.0
18
+ MAX_GUIDANCE = 15.0
19
+ MIN_NUM_STEPS = 10
20
+ MAX_NUM_STEPS = 50
21
+ MAX_WIDTH = 1024
22
+ MAX_HEIGHT = 1024
23
+ MAX_NUM_IMAGES = 4
24
+
25
+ EXAMPLE_PROMPTS = [
26
+ "A watercolor painting of a cozy reading nook with plants",
27
+ "An astronaut riding a horse on Mars in vibrant comic style",
28
+ "Dark fantasy castle on a cliff, volumetric fog, dramatic lighting",
29
+ "Macro photograph of a dew-covered flower, ultra sharp",
30
+ ]
models.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
4
+
5
+ from config import MODEL_ID, DEFAULT_GUIDANCE, DEFAULT_HEIGHT, DEFAULT_PROMPT, DEFAULT_WIDTH
6
+
7
+
8
+ def _load_pipeline() -> StableDiffusionPipeline:
9
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
10
+ pipe = StableDiffusionPipeline.from_pretrained(
11
+ MODEL_ID,
12
+ torch_dtype=dtype,
13
+ safety_checker=None,
14
+ requires_safety_checker=False,
15
+ )
16
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
17
+ if torch.cuda.is_available():
18
+ pipe = pipe.to("cuda")
19
+ pipe.set_progress_bar_config(disable=True)
20
+ pipe.enable_xformers_memory_efficient_attention()
21
+ return pipe
22
+
23
+
24
+ pipe = _load_pipeline()
25
+
26
+
27
+ @spaces.GPU(duration=1500)
28
+ def compile_unet():
29
+ with spaces.aoti_capture(pipe.unet) as call:
30
+ pipe(
31
+ prompt=DEFAULT_PROMPT,
32
+ negative_prompt=None,
33
+ guidance_scale=DEFAULT_GUIDANCE,
34
+ num_inference_steps=5,
35
+ width=DEFAULT_WIDTH,
36
+ height=DEFAULT_HEIGHT,
37
+ num_images_per_prompt=1,
38
+ )
39
+ exported = torch.export.export(pipe.unet, args=call.args, kwargs=call.kwargs)
40
+ return spaces.aoti_compile(exported)
41
+
42
+
43
+ compiled_unet = compile_unet()
44
+ spaces.aoti_apply(compiled_unet, pipe.unet)
45
+
46
+
47
+ @spaces.GPU(duration=90)
48
+ def run_generation(
49
+ prompt: str,
50
+ negative_prompt: str | None,
51
+ guidance_scale: float,
52
+ num_inference_steps: int,
53
+ width: int,
54
+ height: int,
55
+ num_images: int,
56
+ generator: torch.Generator,
57
+ ):
58
+ result = pipe(
59
+ prompt=prompt,
60
+ negative_prompt=negative_prompt,
61
+ guidance_scale=guidance_scale,
62
+ num_inference_steps=num_inference_steps,
63
+ width=width,
64
+ height=height,
65
+ num_images_per_prompt=num_images,
66
+ generator=generator,
67
+ )
68
+ return result.images
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ git+https://github.com/huggingface/diffusers
4
+ transformers
5
+ accelerate
6
+ safetensors
7
+ numpy
8
+ spaces
9
+ Pillow
utils.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from typing import Tuple
3
+
4
+ import torch
5
+
6
+
7
+ def prepare_generator(seed: int) -> Tuple[torch.Generator, int]:
8
+ final_seed = seed
9
+ if final_seed is None or final_seed < 0:
10
+ final_seed = random.randint(0, 2**31 - 1)
11
+ generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(final_seed)
12
+ return generator, final_seed
13
+
14
+
15
+ def sanitize_dimensions(width: int, height: int, max_width: int, max_height: int) -> Tuple[int, int]:
16
+ width = max(256, min(max_width, int(width // 8 * 8)))
17
+ height = max(256, min(max_height, int(height // 8 * 8)))
18
+ return width, height