raidensreturn commited on
Commit
4dfe4bb
·
verified ·
1 Parent(s): 866e2b7

Deploy Gradio app with multiple files

Browse files
Files changed (2) hide show
  1. app.py +144 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from typing import Optional
4
+
5
+ import gradio as gr
6
+ import spaces
7
+ import torch
8
+ from diffusers import DiffusionPipeline
9
+
10
+ MODEL_ID = "Comfy-Org/stable_diffusion_2.1_unclip_repackaged"
11
+ DTYPE = torch.float16
12
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ if DEVICE != "cuda":
15
+ raise EnvironmentError("This Space requires a GPU runtime to run Stable Diffusion 2.1 UNCLIP.")
16
+
17
+ pipe = DiffusionPipeline.from_pretrained(
18
+ MODEL_ID,
19
+ torch_dtype=DTYPE,
20
+ safety_checker=None,
21
+ use_safetensors=True,
22
+ )
23
+ if hasattr(pipe, "enable_xformers_memory_efficient_attention"):
24
+ pipe.enable_xformers_memory_efficient_attention()
25
+ pipe.to(DEVICE)
26
+ pipe.set_progress_bar_config(disable=True)
27
+
28
+
29
+ @spaces.GPU(duration=1500)
30
+ def compile_transformer():
31
+ """
32
+ Ahead-of-time compile the transformer for faster inference.
33
+ """
34
+ with spaces.aoti_capture(pipe.transformer) as call:
35
+ pipe(
36
+ prompt="high quality photo of a futuristic city skyline at sunset",
37
+ negative_prompt="low quality, blurry",
38
+ num_inference_steps=4,
39
+ guidance_scale=5.0,
40
+ width=512,
41
+ height=512,
42
+ )
43
+ exported = torch.export.export(
44
+ pipe.transformer,
45
+ args=call.args,
46
+ kwargs=call.kwargs,
47
+ )
48
+ return spaces.aoti_compile(exported)
49
+
50
+
51
+ compiled_transformer = compile_transformer()
52
+ spaces.aoti_apply(compiled_transformer, pipe.transformer)
53
+
54
+
55
+ @spaces.GPU(duration=60)
56
+ def generate_image(
57
+ prompt: str,
58
+ negative_prompt: str,
59
+ guidance_scale: float,
60
+ num_inference_steps: int,
61
+ width: int,
62
+ height: int,
63
+ seed: int,
64
+ ) -> torch.Tensor:
65
+ """
66
+ Run Stable Diffusion 2.1 UNCLIP to create an image.
67
+
68
+ Args:
69
+ prompt (str): Text prompt describing the desired image.
70
+ negative_prompt (str): Undesired attributes to avoid.
71
+ guidance_scale (float): CFG guidance strength.
72
+ num_inference_steps (int): Number of denoising steps.
73
+ width (int): Output image width.
74
+ height (int): Output image height.
75
+ seed (int): Random seed for reproducibility.
76
+
77
+ Returns:
78
+ torch.Tensor: Generated image.
79
+ """
80
+ cleaned_negative = negative_prompt.strip() or None
81
+ generator = torch.Generator(device=DEVICE)
82
+ generator.manual_seed(seed)
83
+ result = pipe(
84
+ prompt=prompt,
85
+ negative_prompt=cleaned_negative,
86
+ guidance_scale=guidance_scale,
87
+ num_inference_steps=num_inference_steps,
88
+ width=width,
89
+ height=height,
90
+ generator=generator,
91
+ )
92
+ return result.images[0]
93
+
94
+
95
+ with gr.Blocks(title="Stable Diffusion 2.1 UNCLIP Tester") as demo:
96
+ gr.Markdown(
97
+ """
98
+ # Stable Diffusion 2.1 UNCLIP (Comfy-Org)
99
+ [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
100
+
101
+ Experiment with prompts using the repackaged SD 2.1 UNCLIP model.
102
+ """
103
+ )
104
+
105
+ with gr.Row():
106
+ with gr.Column():
107
+ prompt = gr.Textbox(
108
+ label="Prompt",
109
+ value="A hyper-detailed matte painting of a floating city above the clouds, cinematic lighting",
110
+ lines=3,
111
+ placeholder="Describe what you want to generate...",
112
+ )
113
+ negative_prompt = gr.Textbox(
114
+ label="Negative Prompt",
115
+ value="low quality, blurry, distorted, watermark",
116
+ lines=3,
117
+ placeholder="Describe what to avoid...",
118
+ )
119
+ with gr.Row():
120
+ guidance_scale = gr.Slider(1.0, 15.0, value=7.5, step=0.1, label="Guidance Scale")
121
+ steps = gr.Slider(10, 60, value=30, step=1, label="Inference Steps")
122
+ with gr.Row():
123
+ width = gr.Slider(512, 1024, value=768, step=64, label="Width")
124
+ height = gr.Slider(512, 1024, value=768, step=64, label="Height")
125
+ seed = gr.Slider(0, 2_147_483_647, value=42, step=1, label="Seed")
126
+ random_seed_btn = gr.Button("Randomize Seed", variant="secondary")
127
+ generate_btn = gr.Button("Generate", variant="primary")
128
+
129
+ with gr.Column():
130
+ output_image = gr.Image(label="Generated Image", show_download_button=True)
131
+
132
+ random_seed_btn.click(
133
+ fn=lambda: random.randint(0, 2_147_483_647),
134
+ inputs=None,
135
+ outputs=seed,
136
+ )
137
+ generate_btn.click(
138
+ fn=generate_image,
139
+ inputs=[prompt, negative_prompt, guidance_scale, steps, width, height, seed],
140
+ outputs=output_image,
141
+ )
142
+
143
+ demo.queue()
144
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ torchvision
4
+ torchaudio
5
+ accelerate
6
+ safetensors
7
+ Pillow
8
+ git+https://github.com/huggingface/diffusers
9
+ xformers