tejani commited on
Commit
9ff9d53
·
verified ·
1 Parent(s): db20358

Upload 4 files

Browse files
catvton_quantization_helper.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from pathlib import Path
3
+ import pickle
4
+
5
+ from tqdm.notebook import tqdm
6
+ from transformers import set_seed
7
+ import numpy as np
8
+ import openvino as ov
9
+ from PIL import Image
10
+ import torch
11
+ import nncf
12
+
13
+ from ov_catvton_helper import (
14
+ MODEL_DIR,
15
+ VAE_ENCODER_PATH,
16
+ VAE_DECODER_PATH,
17
+ UNET_PATH,
18
+ DENSEPOSE_PROCESSOR_PATH,
19
+ SCHP_PROCESSOR_ATR,
20
+ SCHP_PROCESSOR_LIP,
21
+ )
22
+
23
+ set_seed(42)
24
+ NUM_INFERENCE_STEPS = 50
25
+ GUIDANCE_SCALE = 2.5
26
+ GENERATOR = torch.Generator(device="cpu").manual_seed(42)
27
+
28
+ VAE_ENCODER_INT4_PATH = MODEL_DIR / "vae_encoder_int4.xml"
29
+ VAE_DECODER_INT4_PATH = MODEL_DIR / "vae_decoder_int4.xml"
30
+ UNET_INT8_PATH = MODEL_DIR / "unet_int8.xml"
31
+ DENSEPOSE_PROCESSOR_INT4_PATH = MODEL_DIR / "densepose_processor_int4.xml"
32
+ SCHP_PROCESSOR_ATR_INT4 = MODEL_DIR / "schp_processor_atr_int4.xml"
33
+ SCHP_PROCESSOR_LIP_INT4 = MODEL_DIR / "schp_processor_lip_int4.xml"
34
+
35
+
36
+ class CompiledModelDecorator(ov.CompiledModel):
37
+ def __init__(
38
+ self,
39
+ compiled_model: ov.CompiledModel,
40
+ data_cache: list[Any] = None,
41
+ keep_prob: float = 1.0,
42
+ ):
43
+ super().__init__(compiled_model)
44
+ self.data_cache = data_cache if data_cache is not None else []
45
+ self.keep_prob = keep_prob
46
+
47
+ def __call__(self, *args, **kwargs):
48
+ if np.random.rand() <= self.keep_prob:
49
+ self.data_cache.append(*args)
50
+ return super().__call__(*args, **kwargs)
51
+
52
+
53
+ def collect_calibration_data(pipeline, automasker, mask_processor, dataset, subset_size):
54
+ calibration_dataset_filepath = Path("calibration_data") / f"{subset_size}.pkl"
55
+ calibration_dataset_filepath.parent.mkdir(exist_ok=True, parents=True)
56
+
57
+ if not calibration_dataset_filepath.exists():
58
+ original_unet = pipeline.unet.unet
59
+ pipeline.unet.unet = CompiledModelDecorator(original_unet)
60
+
61
+ calibration_dataset = []
62
+ pbar = tqdm(total=subset_size, desc="Collecting calibration dataset")
63
+ for data in dataset:
64
+ person_image_path, cloth_image_path = data
65
+ person_image = Image.open(person_image_path)
66
+ cloth_image = Image.open(cloth_image_path)
67
+ cloth_type = "upper" if "upper" in person_image_path.as_posix() else "overall"
68
+ mask = automasker(person_image, cloth_type)["mask"]
69
+ mask = mask_processor.blur(mask, blur_factor=9)
70
+
71
+ pipeline(
72
+ image=person_image,
73
+ condition_image=cloth_image,
74
+ mask=mask,
75
+ num_inference_steps=NUM_INFERENCE_STEPS,
76
+ guidance_scale=GUIDANCE_SCALE,
77
+ generator=GENERATOR,
78
+ )
79
+ collected_subset_size = len(pipeline.unet.unet.data_cache)
80
+ pbar.update(NUM_INFERENCE_STEPS)
81
+ if collected_subset_size >= subset_size:
82
+ break
83
+
84
+ calibration_dataset = pipeline.unet.unet.data_cache
85
+ pipeline.unet.unet = original_unet
86
+
87
+ with open(calibration_dataset_filepath, "wb") as f:
88
+ pickle.dump(calibration_dataset, f)
89
+ else:
90
+ with open(calibration_dataset_filepath, "rb") as f:
91
+ calibration_dataset = pickle.load(f)
92
+
93
+ return calibration_dataset
94
+
95
+
96
+ def compress_model(core, model_path, save_path, group_size=128, ratio=0.8):
97
+ if not save_path.exists():
98
+ print(f"{model_path.stem} compression started")
99
+ print(f"Compression parameters:\n\tmode = {nncf.CompressWeightsMode.INT4_SYM}\n\tratio = {ratio}\n\tgroup_size = {group_size}")
100
+ model = core.read_model(model_path)
101
+ compressed_model = nncf.compress_weights(
102
+ model,
103
+ mode=nncf.CompressWeightsMode.INT4_SYM,
104
+ ratio=ratio,
105
+ group_size=group_size,
106
+ )
107
+ ov.save_model(compressed_model, save_path)
108
+ print(f"{model_path.stem} compression finished")
109
+ print(f"Compressed {model_path.stem} can be found in {save_path}")
110
+
111
+
112
+ def compress_models(core, group_size=128, ratio=0.8):
113
+ compress_model(core, VAE_ENCODER_PATH, VAE_ENCODER_INT4_PATH, group_size, ratio)
114
+ compress_model(core, VAE_DECODER_PATH, VAE_DECODER_INT4_PATH, group_size, ratio)
115
+ compress_model(core, DENSEPOSE_PROCESSOR_PATH, DENSEPOSE_PROCESSOR_INT4_PATH, group_size, ratio)
116
+ compress_model(core, SCHP_PROCESSOR_ATR, SCHP_PROCESSOR_ATR_INT4, group_size, ratio)
117
+ compress_model(core, SCHP_PROCESSOR_LIP, SCHP_PROCESSOR_LIP_INT4, group_size, ratio)
118
+
119
+
120
+ def compare_models_size():
121
+ fp16_model_paths = [
122
+ VAE_ENCODER_PATH,
123
+ VAE_DECODER_PATH,
124
+ UNET_PATH,
125
+ DENSEPOSE_PROCESSOR_PATH,
126
+ SCHP_PROCESSOR_ATR,
127
+ SCHP_PROCESSOR_LIP,
128
+ ]
129
+ optimized_models = [
130
+ VAE_ENCODER_INT4_PATH,
131
+ VAE_DECODER_INT4_PATH,
132
+ UNET_INT8_PATH,
133
+ DENSEPOSE_PROCESSOR_INT4_PATH,
134
+ SCHP_PROCESSOR_ATR_INT4,
135
+ SCHP_PROCESSOR_LIP_INT4,
136
+ ]
137
+
138
+ for fp16_path, optimized_path in zip(fp16_model_paths, optimized_models):
139
+ if not fp16_path.exists():
140
+ continue
141
+ fp16_ir_model_size = fp16_path.with_suffix(".bin").stat().st_size
142
+ optimized_model_size = optimized_path.with_suffix(".bin").stat().st_size
143
+ print(f"{fp16_path.stem} compression rate: {fp16_ir_model_size / optimized_model_size:.3f}")
catvton_workflow.json ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 23,
3
+ "last_link_id": 27,
4
+ "nodes": [
5
+ {
6
+ "id": 12,
7
+ "type": "LoadAutoMasker",
8
+ "pos": [
9
+ 97,
10
+ 118
11
+ ],
12
+ "size": {
13
+ "0": 436.1082458496094,
14
+ "1": 58
15
+ },
16
+ "flags": {},
17
+ "order": 0,
18
+ "mode": 0,
19
+ "outputs": [
20
+ {
21
+ "name": "pipe",
22
+ "type": "MODEL",
23
+ "links": [
24
+ 11
25
+ ],
26
+ "shape": 3,
27
+ "slot_index": 0
28
+ }
29
+ ],
30
+ "properties": {
31
+ "Node name for S&R": "LoadAutoMasker"
32
+ },
33
+ "widgets_values": [
34
+ "zhengchong/CatVTON"
35
+ ]
36
+ },
37
+ {
38
+ "id": 14,
39
+ "type": "PreviewImage",
40
+ "pos": [
41
+ 1028,
42
+ 115
43
+ ],
44
+ "size": {
45
+ "0": 160.99398803710938,
46
+ "1": 246
47
+ },
48
+ "flags": {},
49
+ "order": 7,
50
+ "mode": 0,
51
+ "inputs": [
52
+ {
53
+ "name": "images",
54
+ "type": "IMAGE",
55
+ "link": 17
56
+ }
57
+ ],
58
+ "title": "Masked Target",
59
+ "properties": {
60
+ "Node name for S&R": "PreviewImage"
61
+ }
62
+ },
63
+ {
64
+ "id": 18,
65
+ "type": "PreviewImage",
66
+ "pos": [
67
+ 879,
68
+ 469
69
+ ],
70
+ "size": {
71
+ "0": 313.9939880371094,
72
+ "1": 341.0123291015625
73
+ },
74
+ "flags": {},
75
+ "order": 8,
76
+ "mode": 0,
77
+ "inputs": [
78
+ {
79
+ "name": "images",
80
+ "type": "IMAGE",
81
+ "link": 27
82
+ }
83
+ ],
84
+ "properties": {
85
+ "Node name for S&R": "PreviewImage"
86
+ }
87
+ },
88
+ {
89
+ "id": 11,
90
+ "type": "LoadImage",
91
+ "pos": [
92
+ 319.77685748644126,
93
+ 463.34710718580516
94
+ ],
95
+ "size": {
96
+ "0": 210,
97
+ "1": 347.0123291015625
98
+ },
99
+ "flags": {},
100
+ "order": 1,
101
+ "mode": 0,
102
+ "outputs": [
103
+ {
104
+ "name": "IMAGE",
105
+ "type": "IMAGE",
106
+ "links": [
107
+ 15
108
+ ],
109
+ "shape": 3,
110
+ "slot_index": 0
111
+ },
112
+ {
113
+ "name": "MASK",
114
+ "type": "MASK",
115
+ "links": null,
116
+ "shape": 3
117
+ }
118
+ ],
119
+ "title": "Reference Garment",
120
+ "properties": {
121
+ "Node name for S&R": "LoadImage"
122
+ },
123
+ "widgets_values": [
124
+ "1300-28.jpg",
125
+ "image"
126
+ ],
127
+ "shape": 2
128
+ },
129
+ {
130
+ "id": 16,
131
+ "type": "CatVTON",
132
+ "pos": [
133
+ 599,
134
+ 471
135
+ ],
136
+ "size": {
137
+ "0": 242.99398803710938,
138
+ "1": 338.0123291015625
139
+ },
140
+ "flags": {},
141
+ "order": 6,
142
+ "mode": 0,
143
+ "inputs": [
144
+ {
145
+ "name": "pipe",
146
+ "type": "MODEL",
147
+ "link": 20
148
+ },
149
+ {
150
+ "name": "target_image",
151
+ "type": "IMAGE",
152
+ "link": 14
153
+ },
154
+ {
155
+ "name": "refer_image",
156
+ "type": "IMAGE",
157
+ "link": 15
158
+ },
159
+ {
160
+ "name": "mask_image",
161
+ "type": "IMAGE",
162
+ "link": 19
163
+ }
164
+ ],
165
+ "outputs": [
166
+ {
167
+ "name": "image",
168
+ "type": "IMAGE",
169
+ "links": [
170
+ 27
171
+ ],
172
+ "shape": 3,
173
+ "slot_index": 0
174
+ }
175
+ ],
176
+ "properties": {
177
+ "Node name for S&R": "CatVTON"
178
+ },
179
+ "widgets_values": [
180
+ 42,
181
+ "fixed",
182
+ 50,
183
+ 2.5
184
+ ]
185
+ },
186
+ {
187
+ "id": 10,
188
+ "type": "LoadImage",
189
+ "pos": [
190
+ 93.77685748644114,
191
+ 465.34710718580516
192
+ ],
193
+ "size": {
194
+ "0": 210,
195
+ "1": 345.0123291015625
196
+ },
197
+ "flags": {
198
+ "pinned": false
199
+ },
200
+ "order": 2,
201
+ "mode": 0,
202
+ "outputs": [
203
+ {
204
+ "name": "IMAGE",
205
+ "type": "IMAGE",
206
+ "links": [
207
+ 10,
208
+ 14
209
+ ],
210
+ "shape": 3,
211
+ "slot_index": 0
212
+ },
213
+ {
214
+ "name": "MASK",
215
+ "type": "MASK",
216
+ "links": null,
217
+ "shape": 3,
218
+ "slot_index": 1
219
+ }
220
+ ],
221
+ "title": "Target Person",
222
+ "properties": {
223
+ "Node name for S&R": "LoadImage"
224
+ },
225
+ "widgets_values": [
226
+ "1300.jpg",
227
+ "image"
228
+ ],
229
+ "shape": 2
230
+ },
231
+ {
232
+ "id": 15,
233
+ "type": "PreviewImage",
234
+ "pos": [
235
+ 845,
236
+ 116
237
+ ],
238
+ "size": {
239
+ "0": 160.1082305908203,
240
+ "1": 246
241
+ },
242
+ "flags": {},
243
+ "order": 5,
244
+ "mode": 0,
245
+ "inputs": [
246
+ {
247
+ "name": "images",
248
+ "type": "IMAGE",
249
+ "link": 18
250
+ }
251
+ ],
252
+ "title": "Binary Mask",
253
+ "properties": {
254
+ "Node name for S&R": "PreviewImage"
255
+ }
256
+ },
257
+ {
258
+ "id": 13,
259
+ "type": "AutoMasker",
260
+ "pos": [
261
+ 593,
262
+ 116
263
+ ],
264
+ "size": [
265
+ 227.49812396694324,
266
+ 240.48341801652845
267
+ ],
268
+ "flags": {},
269
+ "order": 4,
270
+ "mode": 0,
271
+ "inputs": [
272
+ {
273
+ "name": "pipe",
274
+ "type": "MODEL",
275
+ "link": 11
276
+ },
277
+ {
278
+ "name": "target_image",
279
+ "type": "IMAGE",
280
+ "link": 10
281
+ }
282
+ ],
283
+ "outputs": [
284
+ {
285
+ "name": "image",
286
+ "type": "IMAGE",
287
+ "links": [
288
+ 18,
289
+ 19
290
+ ],
291
+ "shape": 3,
292
+ "slot_index": 0
293
+ },
294
+ {
295
+ "name": "image_masked",
296
+ "type": "IMAGE",
297
+ "links": [
298
+ 17
299
+ ],
300
+ "shape": 3,
301
+ "slot_index": 1
302
+ }
303
+ ],
304
+ "properties": {
305
+ "Node name for S&R": "AutoMasker"
306
+ },
307
+ "widgets_values": [
308
+ "overall"
309
+ ]
310
+ },
311
+ {
312
+ "id": 17,
313
+ "type": "LoadCatVTONPipeline",
314
+ "pos": [
315
+ 101,
316
+ 223
317
+ ],
318
+ "size": {
319
+ "0": 431.00823974609375,
320
+ "1": 106
321
+ },
322
+ "flags": {},
323
+ "order": 3,
324
+ "mode": 0,
325
+ "outputs": [
326
+ {
327
+ "name": "pipe",
328
+ "type": "MODEL",
329
+ "links": [
330
+ 20
331
+ ],
332
+ "shape": 3,
333
+ "slot_index": 0
334
+ }
335
+ ],
336
+ "properties": {
337
+ "Node name for S&R": "LoadCatVTONPipeline"
338
+ },
339
+ "widgets_values": [
340
+ "runwayml/stable-diffusion-inpainting",
341
+ "zhengchong/CatVTON",
342
+ "bf16"
343
+ ]
344
+ }
345
+ ],
346
+ "links": [
347
+ [
348
+ 10,
349
+ 10,
350
+ 0,
351
+ 13,
352
+ 1,
353
+ "IMAGE"
354
+ ],
355
+ [
356
+ 11,
357
+ 12,
358
+ 0,
359
+ 13,
360
+ 0,
361
+ "MODEL"
362
+ ],
363
+ [
364
+ 14,
365
+ 10,
366
+ 0,
367
+ 16,
368
+ 1,
369
+ "IMAGE"
370
+ ],
371
+ [
372
+ 15,
373
+ 11,
374
+ 0,
375
+ 16,
376
+ 2,
377
+ "IMAGE"
378
+ ],
379
+ [
380
+ 17,
381
+ 13,
382
+ 1,
383
+ 14,
384
+ 0,
385
+ "IMAGE"
386
+ ],
387
+ [
388
+ 18,
389
+ 13,
390
+ 0,
391
+ 15,
392
+ 0,
393
+ "IMAGE"
394
+ ],
395
+ [
396
+ 19,
397
+ 13,
398
+ 0,
399
+ 16,
400
+ 3,
401
+ "IMAGE"
402
+ ],
403
+ [
404
+ 20,
405
+ 17,
406
+ 0,
407
+ 16,
408
+ 0,
409
+ "MODEL"
410
+ ],
411
+ [
412
+ 27,
413
+ 16,
414
+ 0,
415
+ 18,
416
+ 0,
417
+ "IMAGE"
418
+ ]
419
+ ],
420
+ "groups": [
421
+ {
422
+ "title": "Model Loading",
423
+ "bounding": [
424
+ 80,
425
+ 38,
426
+ 480,
427
+ 333
428
+ ],
429
+ "color": "#b06634",
430
+ "font_size": 24
431
+ },
432
+ {
433
+ "title": "Auto Mask Generating",
434
+ "bounding": [
435
+ 579,
436
+ 37,
437
+ 630,
438
+ 339
439
+ ],
440
+ "color": "#8AA",
441
+ "font_size": 24
442
+ },
443
+ {
444
+ "title": "Inputs Image",
445
+ "bounding": [
446
+ 80,
447
+ 384,
448
+ 483,
449
+ 443
450
+ ],
451
+ "color": "#3f789e",
452
+ "font_size": 24
453
+ },
454
+ {
455
+ "title": "TryOn by CatVTON",
456
+ "bounding": [
457
+ 580,
458
+ 387,
459
+ 629,
460
+ 441
461
+ ],
462
+ "color": "#b58b2a",
463
+ "font_size": 24
464
+ }
465
+ ],
466
+ "config": {},
467
+ "extra": {
468
+ "ds": {
469
+ "scale": 1.2100000000000002,
470
+ "offset": [
471
+ 206.77460330578393,
472
+ 291.45046628099226
473
+ ]
474
+ }
475
+ },
476
+ "version": 0.4
477
+ }
gradio_helper.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from datetime import datetime
4
+
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch
8
+ from PIL import Image
9
+
10
+ from model.cloth_masker import vis_mask
11
+ from utils import init_weight_dtype, resize_and_crop, resize_and_padding
12
+
13
+
14
+ def image_grid(imgs, rows, cols):
15
+ assert len(imgs) == rows * cols
16
+
17
+ w, h = imgs[0].size
18
+ grid = Image.new("RGB", size=(cols * w, rows * h))
19
+
20
+ for i, img in enumerate(imgs):
21
+ grid.paste(img, box=(i % cols * w, i // cols * h))
22
+ return grid
23
+
24
+
25
+ HEADER = """
26
+ <h1 style="text-align: center;"> 🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models </h1>
27
+ """
28
+
29
+
30
+ def make_demo(pipeline, mask_processor, automasker, output_dir):
31
+ def submit_function(person_image, cloth_image, cloth_type, num_inference_steps, guidance_scale, seed, show_type):
32
+ width = 768
33
+ height = 1024
34
+ person_image, mask = person_image["background"], person_image["layers"][0]
35
+ mask = Image.open(mask).convert("L")
36
+ if len(np.unique(np.array(mask))) == 1:
37
+ mask = None
38
+ else:
39
+ mask = np.array(mask)
40
+ mask[mask > 0] = 255
41
+ mask = Image.fromarray(mask)
42
+
43
+ tmp_folder = output_dir
44
+ date_str = datetime.now().strftime("%Y%m%d%H%M%S")
45
+ result_save_path = os.path.join(tmp_folder, date_str[:8], date_str[8:] + ".png")
46
+ if not os.path.exists(os.path.join(tmp_folder, date_str[:8])):
47
+ os.makedirs(os.path.join(tmp_folder, date_str[:8]))
48
+
49
+ generator = None
50
+ if seed != -1:
51
+ generator = torch.Generator(device="cpu").manual_seed(seed)
52
+
53
+ person_image = Image.open(person_image).convert("RGB")
54
+ cloth_image = Image.open(cloth_image).convert("RGB")
55
+ person_image = resize_and_crop(person_image, (width, height))
56
+ cloth_image = resize_and_padding(cloth_image, (width, height))
57
+
58
+ # Process mask
59
+ if mask is not None:
60
+ mask = resize_and_crop(mask, (width, height))
61
+ else:
62
+ mask = automasker(person_image, cloth_type)["mask"]
63
+ mask = mask_processor.blur(mask, blur_factor=9)
64
+
65
+ # Inference
66
+ result_image = pipeline(
67
+ image=person_image,
68
+ condition_image=cloth_image,
69
+ mask=mask,
70
+ num_inference_steps=num_inference_steps,
71
+ guidance_scale=guidance_scale,
72
+ generator=generator,
73
+ )[0]
74
+
75
+ # Post-process
76
+ masked_person = vis_mask(person_image, mask)
77
+ save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4)
78
+ save_result_image.save(result_save_path)
79
+ if show_type == "result only":
80
+ return result_image
81
+ else:
82
+ width, height = person_image.size
83
+ if show_type == "input & result":
84
+ condition_width = width // 2
85
+ conditions = image_grid([person_image, cloth_image], 2, 1)
86
+ else:
87
+ condition_width = width // 3
88
+ conditions = image_grid([person_image, masked_person, cloth_image], 3, 1)
89
+ conditions = conditions.resize((condition_width, height), Image.NEAREST)
90
+ new_result_image = Image.new("RGB", (width + condition_width + 5, height))
91
+ new_result_image.paste(conditions, (0, 0))
92
+ new_result_image.paste(result_image, (condition_width + 5, 0))
93
+ return new_result_image
94
+
95
+ with gr.Blocks(title="CatVTON") as demo:
96
+ gr.Markdown(HEADER)
97
+ with gr.Row():
98
+ with gr.Column(scale=1, min_width=350):
99
+ with gr.Row():
100
+ person_image = gr.ImageEditor(interactive=True, label="Person Image", type="filepath")
101
+
102
+ with gr.Row():
103
+ with gr.Column(scale=1, min_width=230):
104
+ cloth_image = gr.Image(interactive=True, label="Condition Image", type="filepath")
105
+ with gr.Column(scale=1, min_width=120):
106
+ gr.Markdown(
107
+ '<span style="color: #808080; font-size: small;">Two ways to provide Mask:<br>1. Upload the person image and use the `🖌️` above to draw the Mask (higher priority)<br>2. Select the `Try-On Cloth Type` to generate automatically </span>'
108
+ )
109
+ cloth_type = gr.Radio(
110
+ label="Try-On Cloth Type",
111
+ choices=["upper", "lower", "overall"],
112
+ value="upper",
113
+ )
114
+
115
+ submit = gr.Button("Submit")
116
+
117
+ gr.Markdown(
118
+ '<span style="color: #808080; font-size: small;">Advanced options can adjust details:<br>1. `Inference Step` may enhance details;<br>2. `CFG` is highly correlated with saturation;<br>3. `Random seed` may improve pseudo-shadow.</span>'
119
+ )
120
+ with gr.Accordion("Advanced Options", open=False):
121
+ num_inference_steps = gr.Slider(label="Inference Step", minimum=10, maximum=100, step=5, value=50)
122
+ # Guidence Scale
123
+ guidance_scale = gr.Slider(label="CFG Strenth", minimum=0.0, maximum=7.5, step=0.5, value=2.5)
124
+ # Random Seed
125
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=10000, step=1, value=42)
126
+ show_type = gr.Radio(
127
+ label="Show Type",
128
+ choices=["result only", "input & result", "input & mask & result"],
129
+ value="input & mask & result",
130
+ )
131
+
132
+ with gr.Column(scale=2, min_width=500):
133
+ result_image = gr.Image(interactive=False, label="Result")
134
+ with gr.Row():
135
+ # Photo Examples
136
+ root_path = "CatVTON/resource/demo/example"
137
+ with gr.Column():
138
+ men_exm = gr.Examples(
139
+ examples=[os.path.join(root_path, "person", "men", _) for _ in os.listdir(os.path.join(root_path, "person", "men"))],
140
+ examples_per_page=4,
141
+ inputs=person_image,
142
+ label="Person Examples ①",
143
+ )
144
+ women_exm = gr.Examples(
145
+ examples=[os.path.join(root_path, "person", "women", _) for _ in os.listdir(os.path.join(root_path, "person", "women"))],
146
+ examples_per_page=4,
147
+ inputs=person_image,
148
+ label="Person Examples ②",
149
+ )
150
+ gr.Markdown(
151
+ '<span style="color: #808080; font-size: small;">*Person examples come from the demos of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a>. </span>'
152
+ )
153
+ with gr.Column():
154
+ condition_upper_exm = gr.Examples(
155
+ examples=[os.path.join(root_path, "condition", "upper", _) for _ in os.listdir(os.path.join(root_path, "condition", "upper"))],
156
+ examples_per_page=4,
157
+ inputs=cloth_image,
158
+ label="Condition Upper Examples",
159
+ )
160
+ condition_overall_exm = gr.Examples(
161
+ examples=[os.path.join(root_path, "condition", "overall", _) for _ in os.listdir(os.path.join(root_path, "condition", "overall"))],
162
+ examples_per_page=4,
163
+ inputs=cloth_image,
164
+ label="Condition Overall Examples",
165
+ )
166
+ condition_person_exm = gr.Examples(
167
+ examples=[os.path.join(root_path, "condition", "person", _) for _ in os.listdir(os.path.join(root_path, "condition", "person"))],
168
+ examples_per_page=4,
169
+ inputs=cloth_image,
170
+ label="Condition Reference Person Examples",
171
+ )
172
+ gr.Markdown('<span style="color: #808080; font-size: small;">*Condition examples come from the Internet. </span>')
173
+
174
+ submit.click(
175
+ submit_function,
176
+ [
177
+ person_image,
178
+ cloth_image,
179
+ cloth_type,
180
+ num_inference_steps,
181
+ guidance_scale,
182
+ seed,
183
+ show_type,
184
+ ],
185
+ result_image,
186
+ )
187
+
188
+ return demo
ov_catvton_helper.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ from collections import namedtuple
4
+ from pathlib import Path
5
+ import warnings
6
+
7
+ from diffusers.image_processor import VaeImageProcessor
8
+ from diffusers.models.autoencoders.vae import DiagonalGaussianDistribution
9
+ from huggingface_hub import snapshot_download
10
+ import yaml
11
+ import openvino as ov
12
+ import torch
13
+
14
+ from model.cloth_masker import AutoMasker
15
+ from model.pipeline import CatVTONPipeline
16
+
17
+ MODEL_DIR = Path("models")
18
+ VAE_ENCODER_PATH = MODEL_DIR / "vae_encoder.xml"
19
+ VAE_DECODER_PATH = MODEL_DIR / "vae_decoder.xml"
20
+ UNET_PATH = MODEL_DIR / "unet.xml"
21
+ DENSEPOSE_PROCESSOR_PATH = MODEL_DIR / "densepose_processor.xml"
22
+ SCHP_PROCESSOR_ATR = MODEL_DIR / "schp_processor_atr.xml"
23
+ SCHP_PROCESSOR_LIP = MODEL_DIR / "schp_processor_lip.xml"
24
+
25
+
26
+ def convert(model: torch.nn.Module, xml_path: str, example_input):
27
+ xml_path = Path(xml_path)
28
+ if not xml_path.exists():
29
+ xml_path.parent.mkdir(parents=True, exist_ok=True)
30
+ model.eval()
31
+ with torch.no_grad():
32
+ converted_model = ov.convert_model(model, example_input=example_input)
33
+ ov.save_model(converted_model, xml_path)
34
+
35
+ # cleanup memory
36
+ torch._C._jit_clear_class_registry()
37
+ torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore()
38
+ torch.jit._state._clear_class_state()
39
+
40
+
41
+ class VaeEncoder(torch.nn.Module):
42
+ def __init__(self, vae):
43
+ super().__init__()
44
+ self.vae = vae
45
+
46
+ def forward(self, x):
47
+ return {"latent_parameters": self.vae.encode(x)["latent_dist"].parameters}
48
+
49
+
50
+ class VaeDecoder(torch.nn.Module):
51
+ def __init__(self, vae):
52
+ super().__init__()
53
+ self.vae = vae
54
+
55
+ def forward(self, latents):
56
+ return self.vae.decode(latents)
57
+
58
+
59
+ class UNetWrapper(torch.nn.Module):
60
+ def __init__(self, unet):
61
+ super().__init__()
62
+ self.unet = unet
63
+
64
+ def forward(self, sample=None, timestep=None, encoder_hidden_states=None, return_dict=None):
65
+ result = self.unet(sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, return_dict=False)
66
+ return result
67
+
68
+
69
+ def download_models():
70
+ resume_path = "zhengchong/CatVTON"
71
+ base_model_path = "booksforcharlie/stable-diffusion-inpainting"
72
+ repo_path = snapshot_download(repo_id=resume_path, local_dir=MODEL_DIR)
73
+
74
+ pipeline = CatVTONPipeline(base_ckpt=base_model_path, attn_ckpt=repo_path, attn_ckpt_version="mix", use_tf32=True, device="cpu")
75
+
76
+ # fix default config to use cpu
77
+ with open(f"{repo_path}/DensePose/densepose_rcnn_R_50_FPN_s1x.yaml", "r") as fp:
78
+ data = yaml.safe_load(fp)
79
+
80
+ data["MODEL"].update({"DEVICE": "cpu"})
81
+
82
+ with open(f"{repo_path}/DensePose/densepose_rcnn_R_50_FPN_s1x.yaml", "w") as fp:
83
+ yaml.safe_dump(data, fp)
84
+
85
+ mask_processor = VaeImageProcessor(vae_scale_factor=8, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
86
+ automasker = AutoMasker(
87
+ densepose_ckpt=os.path.join(repo_path, "DensePose"),
88
+ schp_ckpt=os.path.join(repo_path, "SCHP"),
89
+ device="cpu",
90
+ )
91
+ return pipeline, mask_processor, automasker
92
+
93
+
94
+ def convert_pipeline_models(pipeline):
95
+ convert(VaeEncoder(pipeline.vae), VAE_ENCODER_PATH, torch.zeros(1, 3, 1024, 768))
96
+ convert(VaeDecoder(pipeline.vae), VAE_DECODER_PATH, torch.zeros(1, 4, 128, 96))
97
+ del pipeline.vae
98
+
99
+ inpainting_latent_model_input = torch.rand(2, 9, 256, 96)
100
+ timestep = torch.tensor(0)
101
+ encoder_hidden_states = torch.Tensor(0)
102
+ example_input = (inpainting_latent_model_input, timestep, encoder_hidden_states)
103
+
104
+ convert(UNetWrapper(pipeline.unet), UNET_PATH, example_input)
105
+ del pipeline.unet
106
+ gc.collect()
107
+
108
+
109
+ def convert_automasker_models(automasker):
110
+ from detectron2.export import TracingAdapter # it's detectron2 from CatVTON repo
111
+
112
+ def inference(model, inputs):
113
+ # use do_postprocess=False so it returns ROI mask
114
+ inst = model.inference(inputs, do_postprocess=False)[0]
115
+ return [{"instances": inst}]
116
+
117
+ tracing_input = [{"image": torch.rand([3, 800, 800], dtype=torch.float32)}]
118
+ warnings.filterwarnings("ignore")
119
+ traceable_model = TracingAdapter(automasker.densepose_processor.predictor.model, tracing_input, inference)
120
+
121
+ convert(traceable_model, DENSEPOSE_PROCESSOR_PATH, tracing_input[0]["image"])
122
+ del automasker.densepose_processor.predictor.model
123
+
124
+ convert(automasker.schp_processor_atr.model, SCHP_PROCESSOR_ATR, torch.rand([1, 3, 512, 512], dtype=torch.float32))
125
+ convert(automasker.schp_processor_lip.model, SCHP_PROCESSOR_LIP, torch.rand([1, 3, 473, 473], dtype=torch.float32))
126
+ del automasker.schp_processor_atr.model
127
+ del automasker.schp_processor_lip.model
128
+ gc.collect()
129
+
130
+
131
+ class VAEWrapper(torch.nn.Module):
132
+ def __init__(self, vae_encoder, vae_decoder, scaling_factor):
133
+ super().__init__()
134
+ self.vae_enocder = vae_encoder
135
+ self.vae_decoder = vae_decoder
136
+ self.device = "cpu"
137
+ self.dtype = torch.float32
138
+ self.config = namedtuple("VAEConfig", ["scaling_factor"])(scaling_factor)
139
+
140
+ def encode(self, pixel_values):
141
+ ov_outputs = self.vae_enocder(pixel_values).to_dict()
142
+
143
+ model_outputs = {}
144
+ for key, value in ov_outputs.items():
145
+ model_outputs[next(iter(key.names))] = torch.from_numpy(value)
146
+
147
+ result = namedtuple("VAE", "latent_dist")(DiagonalGaussianDistribution(parameters=model_outputs.pop("latent_parameters")))
148
+
149
+ return result
150
+
151
+ def decode(self, latents):
152
+ outs = self.vae_decoder(latents)
153
+ outs = namedtuple("VAE", "sample")(torch.from_numpy(outs[0]))
154
+ return outs
155
+
156
+
157
+ class ConvUnetWrapper(torch.nn.Module):
158
+ def __init__(self, unet):
159
+ super().__init__()
160
+ self.unet = unet
161
+
162
+ def forward(self, sample, timestep, encoder_hidden_states=None, **kwargs):
163
+ outputs = self.unet(
164
+ {
165
+ "sample": sample,
166
+ "timestep": timestep,
167
+ },
168
+ )
169
+
170
+ return [torch.from_numpy(outputs[0])]
171
+
172
+
173
+ class ConvDenseposeProcessorWrapper(torch.nn.Module):
174
+ def __init__(self, densepose_processor):
175
+ super().__init__()
176
+ self.densepose_processor = densepose_processor
177
+
178
+ def forward(self, sample, **kwargs):
179
+ from detectron2.structures import Instances, Boxes # it's detectron2 from CatVTON repo
180
+
181
+ outputs = self.densepose_processor(sample[0]["image"])
182
+ boxes = outputs[0]
183
+ classes = outputs[1]
184
+ has_mask = len(outputs) >= 5
185
+ scores = outputs[2 if not has_mask else 3]
186
+ print(scores)
187
+ model_input_size = (
188
+ int(outputs[3 if not has_mask else 4][0]),
189
+ int(outputs[3 if not has_mask else 4][1]),
190
+ )
191
+ filtered_detections = scores >= 0
192
+ boxes = Boxes(boxes[filtered_detections])
193
+ scores = scores[filtered_detections]
194
+ classes = classes[filtered_detections]
195
+ out_dict = {"pred_boxes": boxes, "scores": scores, "pred_classes": classes}
196
+
197
+ instances = Instances(model_input_size, **out_dict)
198
+
199
+ return [{"instances": instances}]
200
+
201
+
202
+ class ConvSchpProcessorWrapper(torch.nn.Module):
203
+ def __init__(self, schp_processor):
204
+ super().__init__()
205
+ self.schp_processor = schp_processor
206
+
207
+ def forward(self, image):
208
+ outputs = self.schp_processor(image)
209
+
210
+ return torch.from_numpy(outputs[0])
211
+
212
+
213
+ def get_compiled_pipeline(pipeline, core, device, vae_encoder_path, vae_decoder_path, unet_path, vae_scaling_factor):
214
+ compiled_unet = core.compile_model(unet_path, device.value)
215
+ compiled_vae_encoder = core.compile_model(vae_encoder_path, device.value)
216
+ compiled_vae_decoder = core.compile_model(vae_decoder_path, device.value)
217
+
218
+ pipeline.vae = VAEWrapper(compiled_vae_encoder, compiled_vae_decoder, vae_scaling_factor)
219
+ pipeline.unet = ConvUnetWrapper(compiled_unet)
220
+
221
+ return pipeline
222
+
223
+
224
+ def get_compiled_automasker(automasker, core, device, densepose_processor_path, schp_processor_atr_path, schp_processor_lip_path):
225
+ compiled_densepose_processor = core.compile_model(densepose_processor_path, device.value)
226
+ compiled_schp_processor_atr = core.compile_model(schp_processor_atr_path, device.value)
227
+ compiled_schp_processor_lip = core.compile_model(schp_processor_lip_path, device.value)
228
+
229
+ automasker.densepose_processor.predictor.model = ConvDenseposeProcessorWrapper(compiled_densepose_processor)
230
+ automasker.schp_processor_atr.model = ConvSchpProcessorWrapper(compiled_schp_processor_atr)
231
+ automasker.schp_processor_lip.model = ConvSchpProcessorWrapper(compiled_schp_processor_lip)
232
+
233
+ return automasker
234
+
235
+
236
+ def get_pipeline_selection_option(is_optimized_pipe_available=False):
237
+ import ipywidgets as widgets
238
+
239
+ use_quantized_models = widgets.Checkbox(
240
+ value=is_optimized_pipe_available,
241
+ description="Use quantized models",
242
+ disabled=not is_optimized_pipe_available,
243
+ )
244
+ return use_quantized_models