text
stringlengths 1
93.6k
|
|---|
visualize_cond_img(selected_path)
|
c = Image.open(selected_path)
|
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]], antialias=True)
|
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
|
c = rearrange(c, '1 c h w -> 1 h w c')
|
c = 2. * c - 1.
|
c = c.to(torch.device("cuda"))
|
example["LR_image"] = c
|
example["image"] = c_up
|
return example
|
def visualize_cond_img(path):
|
display(ipyimg(filename=path))
|
def run(model, selected_path, task, custom_steps, resize_enabled=False, classifier_ckpt=None, global_step=None):
|
example = get_cond(task, selected_path)
|
save_intermediate_vid = False
|
n_runs = 1
|
masked = False
|
guider = None
|
ckwargs = None
|
mode = 'ddim'
|
ddim_use_x0_pred = False
|
temperature = 1.
|
eta = 1.
|
make_progrow = True
|
custom_shape = None
|
height, width = example["image"].shape[1:3]
|
split_input = height >= 128 and width >= 128
|
if split_input:
|
ks = 128
|
stride = 64
|
vqf = 4 #
|
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
|
"vqf": vqf,
|
"patch_distributed_vq": True,
|
"tie_braker": False,
|
"clip_max_weight": 0.5,
|
"clip_min_weight": 0.01,
|
"clip_max_tie_weight": 0.5,
|
"clip_min_tie_weight": 0.01}
|
else:
|
if hasattr(model, "split_input_params"):
|
delattr(model, "split_input_params")
|
invert_mask = False
|
x_T = None
|
for n in range(n_runs):
|
if custom_shape is not None:
|
x_T = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
x_T = repeat(x_T, '1 c h w -> b c h w', b=custom_shape[0])
|
logs = make_convolutional_sample(example, model,
|
mode=mode, custom_steps=custom_steps,
|
eta=eta, swap_mode=False , masked=masked,
|
invert_mask=invert_mask, quantize_x0=False,
|
custom_schedule=None, decode_interval=10,
|
resize_enabled=resize_enabled, custom_shape=custom_shape,
|
temperature=temperature, noise_dropout=0.,
|
corrector=guider, corrector_kwargs=ckwargs, x_T=x_T, save_intermediate_vid=save_intermediate_vid,
|
make_progrow=make_progrow,ddim_use_x0_pred=ddim_use_x0_pred
|
)
|
return logs
|
@torch.no_grad()
|
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
|
mask=None, x0=None, quantize_x0=False, img_callback=None,
|
temperature=1., noise_dropout=0., score_corrector=None,
|
corrector_kwargs=None, x_T=None, log_every_t=None
|
):
|
ddim = DDIMSampler(model)
|
bs = shape[0] # dont know where this comes from but wayne
|
shape = shape[1:] # cut batch dim
|
print(f"Sampling with eta = {eta}; steps: {steps}")
|
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
|
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
|
mask=mask, x0=x0, temperature=temperature, verbose=False,
|
score_corrector=score_corrector,
|
corrector_kwargs=corrector_kwargs, x_T=x_T)
|
return samples, intermediates
|
@torch.no_grad()
|
def make_convolutional_sample(batch, model, mode="vanilla", custom_steps=None, eta=1.0, swap_mode=False, masked=False,
|
invert_mask=True, quantize_x0=False, custom_schedule=None, decode_interval=1000,
|
resize_enabled=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.