text
stringlengths
1
93.6k
def _log_cont_noise(t, beta_0, beta_T, T):
# We want log_cont_noise(t, beta_0, beta_T, T) ~= np.log(Alpha_bar[-1].numpy())
delta_beta = (beta_T - beta_0) / (T - 1)
_c = (1.0 - beta_0) / delta_beta
t_1 = t + 1
return t_1 * np.log(delta_beta) + _log_gamma(_c + 1) - _log_gamma(_c - t_1 + 1)
# Standard DDPM generation
def STD_sampling(net, size, diffusion_hyperparams):
"""
Perform the complete sampling step according to DDPM
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
assert len(Alpha_bar) == T
assert len(size) == 4
Sigma = _dh["Sigma"]
x = std_normal(size)
with torch.no_grad():
for t in range(T-1, -1, -1):
diffusion_steps = t * map_gpu(torch.ones(size[0]))
epsilon_theta = net(x, diffusion_steps)
x = (x - (1-Alpha[t])/torch.sqrt(1-Alpha_bar[t]) * epsilon_theta) / torch.sqrt(Alpha[t])
if t > 0:
x = x + Sigma[t] * std_normal(size)
return x
# DDIM
def DDIM_sampling(net, size, diffusion_hyperparams):
"""
Perform the complete sampling step according to DDPM
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, Beta = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Beta"]
assert len(Alpha_bar) == T
assert len(size) == 4
Sigma = _dh["Sigma"]
x = std_normal(size)
with torch.no_grad():
for t in range(T-1, -1, -1):
diffusion_steps = t * map_gpu(torch.ones(size[0]))
epsilon_theta = net(x, diffusion_steps)
x = (x - (1-Alpha[t])/torch.sqrt(1-Alpha_bar[t]) * epsilon_theta) / torch.sqrt(Alpha[t])
if t > 0:
x = x + Sigma[t] * std_normal(size)
return x
# STEP
def STEP_sampling(net, size, diffusion_hyperparams, user_defined_steps, kappa):
"""
Perform the complete sampling step according to https://arxiv.org/pdf/2010.02502.pdf
official repo: https://github.com/ermongroup/ddim
Parameters:
net (torch network): the model
size (tuple): size of tensor to be generated,
usually is (number of audios to generate, channels=1, length of audio)
diffusion_hyperparams (dict): dictionary of diffusion hyperparameters returned by calc_diffusion_hyperparams
note, the tensors need to be cuda tensors
user_defined_steps (int list): User defined steps (sorted)
kappa (float): factor multipled over sigma, between 0 and 1
Returns:
the generated images in torch.tensor, shape=size
"""
_dh = diffusion_hyperparams
T, Alpha, Alpha_bar, _ = _dh["T"], _dh["Alpha"], _dh["Alpha_bar"], _dh["Sigma"]
assert len(Alpha_bar) == T
assert len(size) == 4
assert 0.0 <= kappa <= 1.0