fffiloni ciaochaos commited on
Commit
e54e757
·
0 Parent(s):

Duplicate from ioclab/brightness-controlnet

Browse files

Co-authored-by: Troy Ni <ciaochaos@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ .idea
3
+
4
+ venv
5
+ .venv
6
+
7
+ gradio_cached_examples
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Brightness ControlNet
3
+ emoji: 💻
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.27.0
8
+ app_file: app.py
9
+ pinned: false
10
+ tags:
11
+ - jax-diffusers-event
12
+ duplicated_from: ioclab/brightness-controlnet
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import gradio as gr
3
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
+ import torch
5
+ torch.backends.cuda.matmul.allow_tf32 = True
6
+ import gc
7
+
8
+ controlnet = ControlNetModel.from_pretrained("ioclab/control_v1p_sd15_brightness", torch_dtype=torch.float16, use_safetensors=True)
9
+
10
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
11
+ "runwayml/stable-diffusion-v1-5",
12
+ controlnet=controlnet,
13
+ torch_dtype=torch.float16,
14
+ safety_checker=None,
15
+ )
16
+
17
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
18
+
19
+ pipe.enable_xformers_memory_efficient_attention()
20
+ pipe.enable_model_cpu_offload()
21
+ pipe.enable_attention_slicing()
22
+
23
+ def infer(
24
+ prompt,
25
+ negative_prompt,
26
+ conditioning_image,
27
+ num_inference_steps=30,
28
+ size=768,
29
+ guidance_scale=7.0,
30
+ seed=1234,
31
+ ):
32
+
33
+ conditioning_image_raw = Image.fromarray(conditioning_image)
34
+ conditioning_image = conditioning_image_raw.convert('L')
35
+
36
+ g_cpu = torch.Generator()
37
+
38
+ if seed == -1:
39
+ generator = g_cpu.manual_seed(g_cpu.seed())
40
+ else:
41
+ generator = g_cpu.manual_seed(seed)
42
+
43
+ output_image = pipe(
44
+ prompt,
45
+ conditioning_image,
46
+ height=size,
47
+ width=size,
48
+ num_inference_steps=num_inference_steps,
49
+ generator=generator,
50
+ negative_prompt=negative_prompt,
51
+ guidance_scale=guidance_scale,
52
+ controlnet_conditioning_scale=1.0,
53
+ ).images[0]
54
+
55
+ del conditioning_image, conditioning_image_raw
56
+ gc.collect()
57
+
58
+ return output_image
59
+
60
+ with gr.Blocks() as demo:
61
+ gr.Markdown(
62
+ """
63
+ # ControlNet on Brightness
64
+
65
+ This is a demo on ControlNet based on brightness.
66
+ """)
67
+
68
+ with gr.Row():
69
+ with gr.Column():
70
+ prompt = gr.Textbox(
71
+ label="Prompt",
72
+ )
73
+ negative_prompt = gr.Textbox(
74
+ label="Negative Prompt",
75
+ )
76
+ conditioning_image = gr.Image(
77
+ label="Conditioning Image",
78
+ )
79
+ with gr.Accordion('Advanced options', open=False):
80
+ with gr.Row():
81
+ num_inference_steps = gr.Slider(
82
+ 10, 40, 20,
83
+ step=1,
84
+ label="Steps",
85
+ )
86
+ size = gr.Slider(
87
+ 256, 768, 512,
88
+ step=128,
89
+ label="Size",
90
+ )
91
+ with gr.Row():
92
+ guidance_scale = gr.Slider(
93
+ label='Guidance Scale',
94
+ minimum=0.1,
95
+ maximum=30.0,
96
+ value=7.0,
97
+ step=0.1
98
+ )
99
+ seed = gr.Slider(
100
+ label='Seed',
101
+ value=-1,
102
+ minimum=-1,
103
+ maximum=2147483647,
104
+ step=1,
105
+ # randomize=True
106
+ )
107
+ submit_btn = gr.Button(
108
+ value="Submit",
109
+ variant="primary"
110
+ )
111
+ with gr.Column(min_width=300):
112
+ output = gr.Image(
113
+ label="Result",
114
+ )
115
+
116
+ submit_btn.click(
117
+ fn=infer,
118
+ inputs=[
119
+ prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
120
+ ],
121
+ outputs=output
122
+ )
123
+ gr.Examples(
124
+ examples=[
125
+ ["a village in the mountains", "monochrome", "./conditioning_images/conditioning_image_1.jpg"],
126
+ ["three people walking in an alleyway with hats and pants", "monochrome", "./conditioning_images/conditioning_image_2.jpg"],
127
+ ["an anime character, natural skin", "monochrome, blue skin, grayscale", "./conditioning_images/conditioning_image_3.jpg"],
128
+ ["a man in a black suit", "monochrome", "./conditioning_images/conditioning_image_4.jpg"],
129
+ ["the forbidden city in beijing at sunset with a reflection in the water", "monochrome", "./conditioning_images/conditioning_image_5.jpg"],
130
+ ["a man in a white shirt holding his hand out in front of", "monochrome", "./conditioning_images/conditioning_image_6.jpg"],
131
+ ],
132
+ inputs=[
133
+ prompt, negative_prompt, conditioning_image
134
+ ],
135
+ outputs=output,
136
+ fn=infer,
137
+ cache_examples=True,
138
+ )
139
+ gr.Markdown(
140
+ """
141
+ * [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M)
142
+ * [Diffusers model](https://huggingface.co/ioclab/control_v1p_sd15_brightness), [Web UI model](https://huggingface.co/ioclab/ioc-controlnet)
143
+ * [Training Report](https://api.wandb.ai/links/ciaochaos/oot5cui2), [Doc(Chinese)](https://aigc.ioclab.com/sd-showcase/brightness-controlnet.html)
144
+ """)
145
+
146
+ demo.launch()
conditioning_images/conditioning_image_1.jpg ADDED
conditioning_images/conditioning_image_1_prompt.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ a painting of a village in the mountains
conditioning_images/conditioning_image_1_raw.jpg ADDED
conditioning_images/conditioning_image_2.jpg ADDED
conditioning_images/conditioning_image_2_prompt.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ three people walking in an alleyway with hats and pants
conditioning_images/conditioning_image_2_raw.jpg ADDED
conditioning_images/conditioning_image_3.jpg ADDED
conditioning_images/conditioning_image_4.jpg ADDED
conditioning_images/conditioning_image_5.jpg ADDED
conditioning_images/conditioning_image_6.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+
2
+ accelerate
3
+ diffusers
4
+ transformers
5
+ torch
6
+ xformers
7
+ safetensors