| cc = torch.cat(c_crossattn, 1) | |
| out = self.diffusion_model(xc, t, context=cc) | |
| elif self.conditioning_key == 'hybrid-adm': | |
| assert c_adm is not None | |
| xc = torch.cat([x] + c_concat, dim=1) | |
| cc = torch.cat(c_crossattn, 1) | |
| out = self.diffusion_model(xc, t, context=cc, y=c_adm) | |
| elif self.conditioning_key == 'crossattn-adm': | |
| assert c_adm is not None | |
| cc = torch.cat(c_crossattn, 1) | |
| out = self.diffusion_model(x, t, context=cc, y=c_adm) | |
| elif self.conditioning_key == 'adm': | |
| cc = c_crossattn[0] | |
| out = self.diffusion_model(x, t, y=cc) | |
| else: | |
| raise NotImplementedError() | |
| return out | |
| class LatentUpscaleDiffusion(LatentDiffusion): | |
| def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs): | |
| super().__init__(*args, **kwargs) | |
| # assumes that neither the cond_stage nor the low_scale_model contain trainable params | |
| assert not self.cond_stage_trainable | |
| self.instantiate_low_stage(low_scale_config) | |
| self.low_scale_key = low_scale_key | |
| self.noise_level_key = noise_level_key | |
| def instantiate_low_stage(self, config): | |
| model = instantiate_from_config(config) | |
| self.low_scale_model = model.eval() | |
| self.low_scale_model.train = disabled_train | |
| for param in self.low_scale_model.parameters(): | |
| param.requires_grad = False | |
| @torch.no_grad() | |
| def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): | |
| if not log_mode: | |
| z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) | |
| else: | |
| z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, | |
| force_c_encode=True, return_original_cond=True, bs=bs) | |
| x_low = batch[self.low_scale_key][:bs] | |
| x_low = rearrange(x_low, 'b h w c -> b c h w') | |
| x_low = x_low.to(memory_format=torch.contiguous_format).float() | |
| zx, noise_level = self.low_scale_model(x_low) | |
| if self.noise_level_key is not None: | |
| # get noise level from batch instead, e.g. when extracting a custom noise level for bsr | |
| raise NotImplementedError('TODO') | |
| all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} | |
| if log_mode: | |
| # TODO: maybe disable if too expensive | |
| x_low_rec = self.low_scale_model.decode(zx) | |
| return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level | |
| return z, all_conds | |
| @torch.no_grad() | |
| def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, | |
| plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, | |
| unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, | |
| **kwargs): | |
| ema_scope = self.ema_scope if use_ema_scope else nullcontext | |
| use_ddim = ddim_steps is not None | |
| log = dict() | |
| z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, | |
| log_mode=True) | |
| N = min(x.shape[0], N) | |
| n_row = min(x.shape[0], n_row) | |
| log["inputs"] = x | |
| log["reconstruction"] = xrec | |
| log["x_lr"] = x_low | |
| log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec | |
| if self.model.conditioning_key is not None: | |
| if hasattr(self.cond_stage_model, "decode"): | |
| xc = self.cond_stage_model.decode(c) | |
| log["conditioning"] = xc | |
| elif self.cond_stage_key in ["caption", "txt"]: | |
| xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[ |