JingYang2017 commited on
Commit
69506a5
·
verified ·
1 Parent(s): 0839328

Upload zero123_plus.py

Browse files
Files changed (1) hide show
  1. zero123_plus.py +444 -0
zero123_plus.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional
2
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
3
+ from diffusers.schedulers import KarrasDiffusionSchedulers
4
+
5
+ import numpy
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.utils.checkpoint
10
+ import torch.distributed
11
+ import transformers
12
+ from collections import OrderedDict
13
+ from PIL import Image
14
+ from torchvision import transforms
15
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
16
+ import torch.nn.functional as F
17
+ import diffusers
18
+ from diffusers import (
19
+ AutoencoderKL,
20
+ DDPMScheduler,
21
+ DiffusionPipeline,
22
+ EulerAncestralDiscreteScheduler,
23
+ UNet2DConditionModel,
24
+ ImagePipelineOutput
25
+ )
26
+ from diffusers.image_processor import VaeImageProcessor
27
+ from diffusers.models.attention_processor import Attention, AttnProcessor, XFormersAttnProcessor, AttnProcessor2_0
28
+ from diffusers.utils.import_utils import is_xformers_available
29
+
30
+
31
+ def to_rgb_image(maybe_rgba: Image.Image):
32
+ if maybe_rgba.mode == 'RGB':
33
+ return maybe_rgba
34
+ elif maybe_rgba.mode == 'RGBA':
35
+ rgba = maybe_rgba
36
+ img = numpy.random.randint(127, 128, size=[rgba.size[1], rgba.size[0], 3], dtype=numpy.uint8)
37
+ img = Image.fromarray(img, 'RGB')
38
+ img.paste(rgba, mask=rgba.getchannel('A'))
39
+ return img
40
+ else:
41
+ raise ValueError("Unsupported image type.", maybe_rgba.mode)
42
+
43
+
44
+ class ReferenceOnlyAttnProc(torch.nn.Module):
45
+ def __init__(
46
+ self,
47
+ chained_proc,
48
+ enabled=False,
49
+ name=None
50
+ ) -> None:
51
+ super().__init__()
52
+ self.enabled = enabled
53
+ self.chained_proc = chained_proc
54
+ self.name = name
55
+
56
+ def __call__(
57
+ self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None,
58
+ mode="w", ref_dict: dict = None, is_cfg_guidance = False
59
+ ) -> Any:
60
+ if encoder_hidden_states is None:
61
+ encoder_hidden_states = hidden_states
62
+ if self.enabled and is_cfg_guidance:
63
+ res0 = self.chained_proc(attn, hidden_states[:1], encoder_hidden_states[:1], attention_mask)
64
+ hidden_states = hidden_states[1:]
65
+ encoder_hidden_states = encoder_hidden_states[1:]
66
+ if self.enabled:
67
+ if mode == 'w':
68
+ ref_dict[self.name] = encoder_hidden_states
69
+ elif mode == 'r':
70
+ encoder_hidden_states = torch.cat([encoder_hidden_states, ref_dict.pop(self.name)], dim=1)
71
+ elif mode == 'm':
72
+ encoder_hidden_states = torch.cat([encoder_hidden_states, ref_dict[self.name]], dim=1)
73
+ else:
74
+ assert False, mode
75
+ res = self.chained_proc(attn, hidden_states, encoder_hidden_states, attention_mask)
76
+ if self.enabled and is_cfg_guidance:
77
+ res = torch.cat([res0, res])
78
+ return res
79
+
80
+
81
+ class RefOnlyNoisedUNet(torch.nn.Module):
82
+ def __init__(self, unet: UNet2DConditionModel, train_sched: DDPMScheduler, val_sched: EulerAncestralDiscreteScheduler) -> None:
83
+ super().__init__()
84
+ self.unet = unet
85
+ self.train_sched = train_sched
86
+ self.val_sched = val_sched
87
+
88
+ unet_lora_attn_procs = dict()
89
+ for name, _ in unet.attn_processors.items():
90
+ if torch.__version__ >= '2.0':
91
+ default_attn_proc = AttnProcessor2_0()
92
+ elif is_xformers_available():
93
+ default_attn_proc = XFormersAttnProcessor()
94
+ else:
95
+ default_attn_proc = AttnProcessor()
96
+ unet_lora_attn_procs[name] = ReferenceOnlyAttnProc(
97
+ default_attn_proc, enabled=name.endswith("attn1.processor"), name=name
98
+ )
99
+ unet.set_attn_processor(unet_lora_attn_procs)
100
+
101
+ def __getattr__(self, name: str):
102
+ try:
103
+ return super().__getattr__(name)
104
+ except AttributeError:
105
+ return getattr(self.unet, name)
106
+
107
+ def forward_cond(self, noisy_cond_lat, timestep, encoder_hidden_states, class_labels, ref_dict, is_cfg_guidance, **kwargs):
108
+ if is_cfg_guidance:
109
+ encoder_hidden_states = encoder_hidden_states[1:]
110
+ class_labels = class_labels[1:]
111
+ self.unet(
112
+ noisy_cond_lat, timestep,
113
+ encoder_hidden_states=encoder_hidden_states,
114
+ class_labels=class_labels,
115
+ cross_attention_kwargs=dict(mode="w", ref_dict=ref_dict),
116
+ **kwargs
117
+ )
118
+
119
+ def forward(
120
+ self, sample, timestep, encoder_hidden_states, class_labels=None,
121
+ *args, cross_attention_kwargs,
122
+ down_block_res_samples=None, mid_block_res_sample=None,
123
+ **kwargs
124
+ ):
125
+ cond_lat = cross_attention_kwargs['cond_lat']
126
+ is_cfg_guidance = cross_attention_kwargs.get('is_cfg_guidance', False)
127
+ noise = torch.randn_like(cond_lat)
128
+ if self.training:
129
+ noisy_cond_lat = self.train_sched.add_noise(cond_lat, noise, timestep)
130
+ noisy_cond_lat = self.train_sched.scale_model_input(noisy_cond_lat, timestep)
131
+ else:
132
+ noisy_cond_lat = self.val_sched.add_noise(cond_lat, noise, timestep.reshape(-1))
133
+ noisy_cond_lat = self.val_sched.scale_model_input(noisy_cond_lat, timestep.reshape(-1))
134
+ ref_dict = {}
135
+ self.forward_cond(
136
+ noisy_cond_lat, timestep,
137
+ encoder_hidden_states, class_labels,
138
+ ref_dict, is_cfg_guidance, **kwargs
139
+ )
140
+ weight_dtype = self.unet.dtype
141
+ return self.unet(
142
+ sample, timestep,
143
+ encoder_hidden_states, *args,
144
+ class_labels=class_labels,
145
+ cross_attention_kwargs=dict(mode="r", ref_dict=ref_dict, is_cfg_guidance=is_cfg_guidance),
146
+ down_block_additional_residuals=[
147
+ sample.to(dtype=weight_dtype) for sample in down_block_res_samples
148
+ ] if down_block_res_samples is not None else None,
149
+ mid_block_additional_residual=(
150
+ mid_block_res_sample.to(dtype=weight_dtype)
151
+ if mid_block_res_sample is not None else None
152
+ ),
153
+ **kwargs
154
+ )
155
+
156
+
157
+ def scale_latents(latents):
158
+ latents = (latents - 0.22) * 0.75
159
+ return latents
160
+
161
+
162
+ def unscale_latents(latents):
163
+ latents = latents / 0.75 + 0.22
164
+ return latents
165
+
166
+
167
+ def scale_image(image):
168
+ image = image * 0.5 / 0.8
169
+ return image
170
+
171
+
172
+ def unscale_image(image):
173
+ image = image / 0.5 * 0.8
174
+ return image
175
+
176
+
177
+ class DepthControlUNet(torch.nn.Module):
178
+ def __init__(self, unet: RefOnlyNoisedUNet, controlnet: Optional[diffusers.ControlNetModel] = None, conditioning_scale=1.0) -> None:
179
+ super().__init__()
180
+ self.unet = unet
181
+ if controlnet is None:
182
+ self.controlnet = diffusers.ControlNetModel.from_unet(unet.unet)
183
+ else:
184
+ self.controlnet = controlnet
185
+ DefaultAttnProc = AttnProcessor2_0
186
+ if is_xformers_available():
187
+ DefaultAttnProc = XFormersAttnProcessor
188
+ self.controlnet.set_attn_processor(DefaultAttnProc())
189
+ self.conditioning_scale = conditioning_scale
190
+
191
+ def __getattr__(self, name: str):
192
+ try:
193
+ return super().__getattr__(name)
194
+ except AttributeError:
195
+ return getattr(self.unet, name)
196
+
197
+ def forward(self, sample, timestep, encoder_hidden_states, class_labels=None, *args, cross_attention_kwargs: dict, **kwargs):
198
+ cross_attention_kwargs = dict(cross_attention_kwargs)
199
+ control_depth = cross_attention_kwargs.pop('control_depth')
200
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
201
+ sample,
202
+ timestep,
203
+ encoder_hidden_states=encoder_hidden_states,
204
+ controlnet_cond=control_depth,
205
+ conditioning_scale=self.conditioning_scale,
206
+ return_dict=False,
207
+ )
208
+ return self.unet(
209
+ sample,
210
+ timestep,
211
+ encoder_hidden_states=encoder_hidden_states,
212
+ down_block_res_samples=down_block_res_samples,
213
+ mid_block_res_sample=mid_block_res_sample,
214
+ cross_attention_kwargs=cross_attention_kwargs
215
+ )
216
+
217
+
218
+ class ModuleListDict(torch.nn.Module):
219
+ def __init__(self, procs: dict) -> None:
220
+ super().__init__()
221
+ self.keys = sorted(procs.keys())
222
+ self.values = torch.nn.ModuleList(procs[k] for k in self.keys)
223
+
224
+ def __getitem__(self, key):
225
+ return self.values[self.keys.index(key)]
226
+
227
+
228
+ class SuperNet(torch.nn.Module):
229
+ def __init__(self, state_dict: Dict[str, torch.Tensor]):
230
+ super().__init__()
231
+ state_dict = OrderedDict((k, state_dict[k]) for k in sorted(state_dict.keys()))
232
+ self.layers = torch.nn.ModuleList(state_dict.values())
233
+ self.mapping = dict(enumerate(state_dict.keys()))
234
+ self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
235
+
236
+ # .processor for unet, .self_attn for text encoder
237
+ self.split_keys = [".processor", ".self_attn"]
238
+
239
+ # we add a hook to state_dict() and load_state_dict() so that the
240
+ # naming fits with `unet.attn_processors`
241
+ def map_to(module, state_dict, *args, **kwargs):
242
+ new_state_dict = {}
243
+ for key, value in state_dict.items():
244
+ num = int(key.split(".")[1]) # 0 is always "layers"
245
+ new_key = key.replace(f"layers.{num}", module.mapping[num])
246
+ new_state_dict[new_key] = value
247
+
248
+ return new_state_dict
249
+
250
+ def remap_key(key, state_dict):
251
+ for k in self.split_keys:
252
+ if k in key:
253
+ return key.split(k)[0] + k
254
+ return key.split('.')[0]
255
+
256
+ def map_from(module, state_dict, *args, **kwargs):
257
+ all_keys = list(state_dict.keys())
258
+ for key in all_keys:
259
+ replace_key = remap_key(key, state_dict)
260
+ new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
261
+ state_dict[new_key] = state_dict[key]
262
+ del state_dict[key]
263
+
264
+ self._register_state_dict_hook(map_to)
265
+ self._register_load_state_dict_pre_hook(map_from, with_module=True)
266
+
267
+
268
+ class Zero123PlusPipeline(diffusers.StableDiffusionPipeline):
269
+ tokenizer: transformers.CLIPTokenizer
270
+ text_encoder: transformers.CLIPTextModel
271
+ vision_encoder: transformers.CLIPVisionModelWithProjection
272
+
273
+ feature_extractor_clip: transformers.CLIPImageProcessor
274
+ unet: UNet2DConditionModel
275
+ scheduler: diffusers.schedulers.KarrasDiffusionSchedulers
276
+
277
+ vae: AutoencoderKL
278
+ ramping: nn.Linear
279
+
280
+ feature_extractor_vae: transformers.CLIPImageProcessor
281
+
282
+ depth_transforms_multi = transforms.Compose([
283
+ transforms.ToTensor(),
284
+ transforms.Normalize([0.5], [0.5])
285
+ ])
286
+
287
+ def __init__(
288
+ self,
289
+ vae: AutoencoderKL,
290
+ text_encoder: CLIPTextModel,
291
+ tokenizer: CLIPTokenizer,
292
+ unet: UNet2DConditionModel,
293
+ scheduler: KarrasDiffusionSchedulers,
294
+ vision_encoder: transformers.CLIPVisionModelWithProjection,
295
+ feature_extractor_clip: CLIPImageProcessor,
296
+ feature_extractor_vae: CLIPImageProcessor,
297
+ ramping_coefficients: Optional[list] = None,
298
+ safety_checker=None,
299
+ ):
300
+ DiffusionPipeline.__init__(self)
301
+
302
+ self.register_modules(
303
+ vae=vae, text_encoder=text_encoder, tokenizer=tokenizer,
304
+ unet=unet, scheduler=scheduler, safety_checker=None,
305
+ vision_encoder=vision_encoder,
306
+ feature_extractor_clip=feature_extractor_clip,
307
+ feature_extractor_vae=feature_extractor_vae
308
+ )
309
+ self.register_to_config(ramping_coefficients=ramping_coefficients)
310
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
311
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
312
+
313
+ def prepare(self):
314
+ train_sched = DDPMScheduler.from_config(self.scheduler.config)
315
+ if isinstance(self.unet, UNet2DConditionModel):
316
+ self.unet = RefOnlyNoisedUNet(self.unet, train_sched, self.scheduler).eval()
317
+
318
+ def add_controlnet(self, controlnet: Optional[diffusers.ControlNetModel] = None, conditioning_scale=1.0):
319
+ self.prepare()
320
+ self.unet = DepthControlUNet(self.unet, controlnet, conditioning_scale)
321
+ return SuperNet(OrderedDict([('controlnet', self.unet.controlnet)]))
322
+
323
+ def encode_condition_image(self, image: torch.Tensor):
324
+ image = self.vae.encode(image).latent_dist.sample()
325
+ return image
326
+
327
+ @torch.no_grad()
328
+ def __call__(
329
+ self,
330
+ image: Image.Image = None,
331
+ prompt = "",
332
+ *args,
333
+ num_images_per_prompt: Optional[int] = 1,
334
+ guidance_scale=4.0,
335
+ depth_image: Image.Image = None,
336
+ output_type: Optional[str] = "pil",
337
+ width=640,
338
+ height=960,
339
+ num_inference_steps=28,
340
+ return_dict=True,
341
+ **kwargs
342
+ ):
343
+ print('!!!!!!!!!!!!!!!!!!!!!!!, start test')
344
+ self.prepare()
345
+ if image is None:
346
+ raise ValueError("Inputting embeddings not supported for this pipeline. Please pass an image.")
347
+ assert not isinstance(image, torch.Tensor)
348
+ image = to_rgb_image(image)
349
+ image_1 = self.feature_extractor_vae(images=image, return_tensors="pt").pixel_values
350
+ image_2 = self.feature_extractor_clip(images=image, return_tensors="pt").pixel_values
351
+ if depth_image is not None and hasattr(self.unet, "controlnet"):
352
+ depth_image = to_rgb_image(depth_image)
353
+ depth_image = self.depth_transforms_multi(depth_image).to(
354
+ device=self.unet.controlnet.device, dtype=self.unet.controlnet.dtype
355
+ )
356
+ image = image_1.to(device=self.vae.device, dtype=self.vae.dtype)
357
+ image_2 = image_2.to(device=self.vae.device, dtype=self.vae.dtype)
358
+ cond_lat = self.encode_condition_image(image)
359
+ if guidance_scale > 1:
360
+ negative_lat = self.encode_condition_image(torch.zeros_like(image))
361
+ cond_lat = torch.cat([negative_lat, cond_lat])
362
+ encoded = self.vision_encoder(image_2, output_hidden_states=False)
363
+ global_embeds = encoded.image_embeds
364
+ global_embeds = global_embeds.unsqueeze(-2)
365
+
366
+ if hasattr(self, "encode_prompt"):
367
+ encoder_hidden_states = self.encode_prompt(
368
+ prompt,
369
+ self.device,
370
+ 1,
371
+ False
372
+ )[0]
373
+ else:
374
+ encoder_hidden_states = self._encode_prompt(
375
+ prompt,
376
+ self.device,
377
+ 1,
378
+ False
379
+ )
380
+ ramp = global_embeds.new_tensor(self.config.ramping_coefficients).unsqueeze(-1)
381
+ encoder_hidden_states = encoder_hidden_states + global_embeds * ramp
382
+
383
+ if num_images_per_prompt > 1:
384
+ bs_embed, *lat_shape = cond_lat.shape
385
+ assert len(lat_shape) == 3
386
+ cond_lat = cond_lat.repeat(1, num_images_per_prompt, 1, 1)
387
+ cond_lat = cond_lat.view(bs_embed * num_images_per_prompt, *lat_shape)
388
+
389
+ cak = dict(cond_lat=cond_lat)
390
+ if hasattr(self.unet, "controlnet"):
391
+ cak['control_depth'] = depth_image
392
+ latents: torch.Tensor = super().__call__(
393
+ None,
394
+ *args,
395
+ cross_attention_kwargs=cak,
396
+ guidance_scale=guidance_scale,
397
+ num_images_per_prompt=num_images_per_prompt,
398
+ prompt_embeds=encoder_hidden_states,
399
+ num_inference_steps=num_inference_steps,
400
+ output_type='latent',
401
+ width=width,
402
+ height=height,
403
+ **kwargs
404
+ ).images
405
+
406
+ print(latents.size())
407
+
408
+ latents = unscale_latents(latents)
409
+ if not output_type == "latent":
410
+ image = unscale_image(self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0])
411
+ else:
412
+ image = latents
413
+
414
+ image = self.image_processor.postprocess(image, output_type=output_type)
415
+ if not return_dict:
416
+ return (image,)
417
+
418
+ return ImagePipelineOutput(images=image)
419
+
420
+ if __name__ == '__main__':
421
+ import PIL
422
+ pipeline = Zero123PlusPipeline.from_pretrained(
423
+ "sudo-ai/zero123plus-v1.2", custom_pipeline="zero123_plus",
424
+ torch_dtype=torch.float16
425
+ )
426
+ pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(
427
+ pipeline.scheduler.config, timestep_spacing='trailing'
428
+ )
429
+ pipeline.to('cuda:0')
430
+
431
+ # Download an example image.
432
+ cond = PIL.Image.open('../data/1_rgba_rgba.png')
433
+
434
+ torch.save(pipeline.unet,'zero123_plus_unt.pt')
435
+ # Run the pipeline!
436
+ result = pipeline(cond, num_inference_steps=1).images[0]
437
+ # for general real and synthetic images of general objects
438
+ # usually it is enough to have around 28 inference steps
439
+ # for images with delicate details like faces (real or anime)
440
+ # you may need 75-100 steps for the details to construct
441
+
442
+ result.show()
443
+ result.save("output_v3.png")
444
+