code stringlengths 17 6.64M |
|---|
class SpatialTransformer(nn.Module):
'\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n '
def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0.0, context_dim=None):
super().__init__()
self.in_channels = in_channels
inner_dim = (n_heads * d_head)
self.norm = Normalize(in_channels)
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
self.transformer_blocks = nn.ModuleList([BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) for d in range(depth)])
self.proj_out = zero_module(nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0))
def forward(self, x, context=None):
(b, c, h, w) = x.shape
x_in = x
x = self.norm(x)
x = self.proj_in(x)
x = rearrange(x, 'b c h w -> b (h w) c')
for block in self.transformer_blocks:
x = block(x, context=context)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
x = self.proj_out(x)
return (x + x_in)
|
class AbstractDistribution():
def sample(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
|
class DiracDistribution(AbstractDistribution):
def __init__(self, value):
self.value = value
def sample(self):
return self.value
def mode(self):
return self.value
|
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
(self.mean, self.logvar) = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, (- 30.0), 20.0)
self.deterministic = deterministic
self.std = torch.exp((0.5 * self.logvar))
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self):
x = (self.mean + (self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)))
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.0])
elif (other is None):
return (0.5 * torch.sum((((torch.pow(self.mean, 2) + self.var) - 1.0) - self.logvar), dim=[1, 2, 3]))
else:
return (0.5 * torch.sum((((((torch.pow((self.mean - other.mean), 2) / other.var) + (self.var / other.var)) - 1.0) - self.logvar) + other.logvar), dim=[1, 2, 3]))
def nll(self, sample, dims=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
logtwopi = np.log((2.0 * np.pi))
return (0.5 * torch.sum(((logtwopi + self.logvar) + (torch.pow((sample - self.mean), 2) / self.var)), dim=dims))
def mode(self):
return self.mean
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + torch.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * torch.exp((- logvar2)))))
|
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if ((decay < 0.0) or (decay > 1.0)):
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', (torch.tensor(0, dtype=torch.int) if use_num_upates else torch.tensor((- 1), dtype=torch.int)))
for (name, p) in model.named_parameters():
if p.requires_grad:
s_name = name.replace('.', '')
self.m_name2s_name.update({name: s_name})
self.register_buffer(s_name, p.clone().detach().data)
self.collected_params = []
def forward(self, model):
decay = self.decay
if (self.num_updates >= 0):
self.num_updates += 1
decay = min(self.decay, ((1 + self.num_updates) / (10 + self.num_updates)))
one_minus_decay = (1.0 - decay)
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_((one_minus_decay * (shadow_params[sname] - m_param[key])))
else:
assert (not (key in self.m_name2s_name))
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert (not (key in self.m_name2s_name))
def store(self, parameters):
'\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n '
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
'\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n '
for (c_param, param) in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
|
class LPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_loss='hinge'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.logvar = nn.Parameter((torch.ones(size=()) * logvar_init))
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm).apply(weights_init)
self.discriminator_iter_start = disc_start
self.disc_loss = (hinge_d_loss if (disc_loss == 'hinge') else vanilla_d_loss)
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, inputs, reconstructions, posteriors, optimizer_idx, global_step, last_layer=None, cond=None, split='train', weights=None):
rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous()))
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
nll_loss = ((rec_loss / torch.exp(self.logvar)) + self.logvar)
weighted_nll_loss = nll_loss
if (weights is not None):
weighted_nll_loss = (weights * nll_loss)
weighted_nll_loss = (torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0])
nll_loss = (torch.sum(nll_loss) / nll_loss.shape[0])
kl_loss = posteriors.kl()
kl_loss = (torch.sum(kl_loss) / kl_loss.shape[0])
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
if (self.disc_factor > 0.0):
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
else:
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((weighted_nll_loss + (self.kl_weight * kl_loss)) + ((d_weight * disc_factor) * g_loss))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/logvar'.format(split): self.logvar.detach(), '{}/kl_loss'.format(split): kl_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log)
|
def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
assert (weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0])
loss_real = torch.mean(F.relu((1.0 - logits_real)), dim=[1, 2, 3])
loss_fake = torch.mean(F.relu((1.0 + logits_fake)), dim=[1, 2, 3])
loss_real = ((weights * loss_real).sum() / weights.sum())
loss_fake = ((weights * loss_fake).sum() / weights.sum())
d_loss = (0.5 * (loss_real + loss_fake))
return d_loss
|
def adopt_weight(weight, global_step, threshold=0, value=0.0):
if (global_step < threshold):
weight = value
return weight
|
def measure_perplexity(predicted_indices, n_embed):
encodings = F.one_hot(predicted_indices, n_embed).float().reshape((- 1), n_embed)
avg_probs = encodings.mean(0)
perplexity = (- (avg_probs * torch.log((avg_probs + 1e-10))).sum()).exp()
cluster_use = torch.sum((avg_probs > 0))
return (perplexity, cluster_use)
|
def l1(x, y):
return torch.abs((x - y))
|
def l2(x, y):
return torch.pow((x - y), 2)
|
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_ndf=64, disc_loss='hinge', n_classes=None, perceptual_loss='lpips', pixel_loss='l1'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
assert (perceptual_loss in ['lpips', 'clips', 'dists'])
assert (pixel_loss in ['l1', 'l2'])
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
if (perceptual_loss == 'lpips'):
print(f'{self.__class__.__name__}: Running with LPIPS.')
self.perceptual_loss = LPIPS().eval()
else:
raise ValueError(f'Unknown perceptual loss: >> {perceptual_loss} <<')
self.perceptual_weight = perceptual_weight
if (pixel_loss == 'l1'):
self.pixel_loss = l1
else:
self.pixel_loss = l2
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
self.discriminator_iter_start = disc_start
if (disc_loss == 'hinge'):
self.disc_loss = hinge_d_loss
elif (disc_loss == 'vanilla'):
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.')
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
self.n_classes = n_classes
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None, cond=None, split='train', predicted_indices=None):
if (not exists(codebook_loss)):
codebook_loss = torch.tensor([0.0]).to(inputs.device)
rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous())
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
nll_loss = torch.mean(nll_loss)
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((nll_loss + ((d_weight * disc_factor) * g_loss)) + (self.codebook_weight * codebook_loss.mean()))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/quant_loss'.format(split): codebook_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/p_loss'.format(split): p_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
if (predicted_indices is not None):
assert (self.n_classes is not None)
with torch.no_grad():
(perplexity, cluster_usage) = measure_perplexity(predicted_indices, self.n_classes)
log[f'{split}/perplexity'] = perplexity
log[f'{split}/cluster_usage'] = cluster_usage
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log)
|
def log_txt_as_img(wh, xc, size=10):
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new('RGB', wh, color='white')
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
nc = int((40 * (wh[0] / 256)))
lines = '\n'.join((xc[bi][start:(start + nc)] for start in range(0, len(xc[bi]), nc)))
try:
draw.text((0, 0), lines, fill='black', font=font)
except UnicodeEncodeError:
print('Cant encode string for logging. Skipping.')
txt = ((np.array(txt).transpose(2, 0, 1) / 127.5) - 1.0)
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts
|
def ismap(x):
if (not isinstance(x, torch.Tensor)):
return False
return ((len(x.shape) == 4) and (x.shape[1] > 3))
|
def isimage(x):
if (not isinstance(x, torch.Tensor)):
return False
return ((len(x.shape) == 4) and ((x.shape[1] == 3) or (x.shape[1] == 1)))
|
def exists(x):
return (x is not None)
|
def default(val, d):
if exists(val):
return val
return (d() if isfunction(d) else d)
|
def mean_flat(tensor):
'\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n '
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
def count_params(model, verbose=False):
total_params = sum((p.numel() for p in model.parameters()))
if verbose:
print(f'{model.__class__.__name__} has {(total_params * 1e-06):.2f} M params.')
return total_params
|
def instantiate_from_config(config):
if (not ('target' in config)):
if (config == '__is_first_stage__'):
return None
elif (config == '__is_unconditional__'):
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
|
def get_obj_from_str(string, reload=False):
(module, cls) = string.rsplit('.', 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
|
def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False):
if idx_to_fn:
res = func(data, worker_id=idx)
else:
res = func(data)
Q.put([idx, res])
Q.put('Done')
|
def parallel_data_prefetch(func: callable, data, n_proc, target_data_type='ndarray', cpu_intensive=True, use_worker_id=False):
if (isinstance(data, np.ndarray) and (target_data_type == 'list')):
raise ValueError('list expected but function got ndarray.')
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.')
data = list(data.values())
if (target_data_type == 'ndarray'):
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(f'The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}.')
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
if (target_data_type == 'ndarray'):
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate(np.array_split(data, n_proc))]
else:
step = (int(((len(data) / n_proc) + 1)) if ((len(data) % n_proc) != 0) else int((len(data) / n_proc)))
arguments = [[func, Q, part, i, use_worker_id] for (i, part) in enumerate([data[i:(i + step)] for i in range(0, len(data), step)])]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
print(f'Start prefetching...')
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while (k < n_proc):
res = Q.get()
if (res == 'Done'):
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print('Exception: ', e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f'Prefetching complete. [{(time.time() - start)} sec.]')
if (target_data_type == 'ndarray'):
if (not isinstance(gather_res[0], np.ndarray)):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
return np.concatenate(gather_res, axis=0)
elif (target_data_type == 'list'):
out = []
for r in gather_res:
out.extend(r)
return out
else:
return gather_res
|
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging dat shit')
parser.add_argument('--scale_lr', type=str2bool, nargs='?', const=True, default=True, help='scale base-lr by ngpu * batch_size * n_accumulate')
return parser
|
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted((k for k in vars(args) if (getattr(opt, k) != getattr(args, k))))
|
class WrappedDataset(Dataset):
'Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset'
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
|
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
worker_id = worker_info.id
if isinstance(dataset, Txt2ImgIterableBaseDataset):
split_size = (dataset.num_records // worker_info.num_workers)
dataset.sample_ids = dataset.valid_ids[(worker_id * split_size):((worker_id + 1) * split_size)]
current_id = np.random.choice(len(np.random.get_state()[1]), 1)
return np.random.seed((np.random.get_state()[1][current_id] + worker_id))
else:
return np.random.seed((np.random.get_state()[1][0] + worker_id))
|
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = (num_workers if (num_workers is not None) else (batch_size * 2))
self.use_worker_init_fn = use_worker_init_fn
if (train is not None):
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if (validation is not None):
self.dataset_configs['validation'] = validation
self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader)
if (test is not None):
self.dataset_configs['test'] = test
self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
if (predict is not None):
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs))
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if (is_iterable_dataset or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['train'], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=(False if is_iterable_dataset else True), worker_init_fn=init_fn)
def _val_dataloader(self, shuffle=False):
if (isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['validation'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
def _test_dataloader(self, shuffle=False):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if (is_iterable_dataset or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
shuffle = (shuffle and (not is_iterable_dataset))
return DataLoader(self.datasets['test'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
def _predict_dataloader(self, shuffle=False):
if (isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['predict'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn)
|
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if (trainer.global_rank == 0):
print('Summoning checkpoint.')
ckpt_path = os.path.join(self.ckptdir, 'last.ckpt')
trainer.save_checkpoint(ckpt_path)
def on_pretrain_routine_start(self, trainer, pl_module):
if (trainer.global_rank == 0):
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if ('callbacks' in self.lightning_config):
if ('metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']):
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print('Project config')
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config, os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now)))
print('Lightning config')
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({'lightning': self.lightning_config}), os.path.join(self.cfgdir, '{}-lightning.yaml'.format(self.now)))
elif ((not self.resume) and os.path.exists(self.logdir)):
(dst, name) = os.path.split(self.logdir)
dst = os.path.join(dst, 'child_runs', name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
|
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, log_images_kwargs=None):
super().__init__()
self.rescale = rescale
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {pl.loggers.TestTubeLogger: self._testtube}
self.log_steps = [(2 ** n) for n in range((int(np.log2(self.batch_freq)) + 1))]
if (not increase_log_steps):
self.log_steps = [self.batch_freq]
self.clamp = clamp
self.disabled = disabled
self.log_on_batch_idx = log_on_batch_idx
self.log_images_kwargs = (log_images_kwargs if log_images_kwargs else {})
self.log_first_step = log_first_step
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = ((grid + 1.0) / 2.0)
tag = f'{split}/{k}'
pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, 'images', split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
if self.rescale:
grid = ((grid + 1.0) / 2.0)
grid = grid.transpose(0, 1).transpose(1, 2).squeeze((- 1))
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = '{}_gs-{:06}_e-{:06}_b-{:06}.png'.format(k, global_step, current_epoch, batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split='train'):
check_idx = (batch_idx if self.log_on_batch_idx else pl_module.global_step)
if (self.check_frequency(check_idx) and hasattr(pl_module, 'log_images') and callable(pl_module.log_images) and (self.max_images > 0)):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], (- 1.0), 1.0)
self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, (lambda *args, **kwargs: None))
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, check_idx):
if ((((check_idx % self.batch_freq) == 0) or (check_idx in self.log_steps)) and ((check_idx > 0) or self.log_first_step)):
try:
self.log_steps.pop(0)
except IndexError as e:
print(e)
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((not self.disabled) and ((pl_module.global_step > 0) or self.log_first_step)):
self.log_img(pl_module, batch, batch_idx, split='train')
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((not self.disabled) and (pl_module.global_step > 0)):
self.log_img(pl_module, batch, batch_idx, split='val')
if hasattr(pl_module, 'calibrate_grad_norm'):
if ((pl_module.calibrate_grad_norm and ((batch_idx % 25) == 0)) and (batch_idx > 0)):
self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
|
class CUDACallback(Callback):
def on_train_epoch_start(self, trainer, pl_module):
torch.cuda.reset_peak_memory_stats(trainer.root_gpu)
torch.cuda.synchronize(trainer.root_gpu)
self.start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module, outputs):
torch.cuda.synchronize(trainer.root_gpu)
max_memory = (torch.cuda.max_memory_allocated(trainer.root_gpu) / (2 ** 20))
epoch_time = (time.time() - self.start_time)
try:
max_memory = trainer.training_type_plugin.reduce(max_memory)
epoch_time = trainer.training_type_plugin.reduce(epoch_time)
rank_zero_info(f'Average Epoch time: {epoch_time:.2f} seconds')
rank_zero_info(f'Average Peak memory {max_memory:.2f}MiB')
except AttributeError:
pass
|
def download_models(mode):
if (mode == 'superresolution'):
url_conf = 'https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1'
url_ckpt = 'https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1'
path_conf = 'logs/diffusion/superresolution_bsr/configs/project.yaml'
path_ckpt = 'logs/diffusion/superresolution_bsr/checkpoints/last.ckpt'
download_url(url_conf, path_conf)
download_url(url_ckpt, path_ckpt)
path_conf = (path_conf + '/?dl=1')
path_ckpt = (path_ckpt + '/?dl=1')
return (path_conf, path_ckpt)
else:
raise NotImplementedError
|
def load_model_from_config(config, ckpt):
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt, map_location='cpu')
global_step = pl_sd['global_step']
sd = pl_sd['state_dict']
model = instantiate_from_config(config.model)
(m, u) = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return ({'model': model}, global_step)
|
def get_model(mode):
(path_conf, path_ckpt) = download_models(mode)
config = OmegaConf.load(path_conf)
(model, step) = load_model_from_config(config, path_ckpt)
return model
|
def get_custom_cond(mode):
dest = 'data/example_conditioning'
if (mode == 'superresolution'):
uploaded_img = files.upload()
filename = next(iter(uploaded_img))
(name, filetype) = filename.split('.')
os.rename(f'{filename}', f'{dest}/{mode}/custom_{name}.{filetype}')
elif (mode == 'text_conditional'):
w = widgets.Text(value='A cake with cream!', disabled=True)
display(w)
with open(f'{dest}/{mode}/custom_{w.value[:20]}.txt', 'w') as f:
f.write(w.value)
elif (mode == 'class_conditional'):
w = widgets.IntSlider(min=0, max=1000)
display(w)
with open(f'{dest}/{mode}/custom.txt', 'w') as f:
f.write(w.value)
else:
raise NotImplementedError(f'cond not implemented for mode{mode}')
|
def get_cond_options(mode):
path = 'data/example_conditioning'
path = os.path.join(path, mode)
onlyfiles = [f for f in sorted(os.listdir(path))]
return (path, onlyfiles)
|
def select_cond_path(mode):
path = 'data/example_conditioning'
path = os.path.join(path, mode)
onlyfiles = [f for f in sorted(os.listdir(path))]
selected = widgets.RadioButtons(options=onlyfiles, description='Select conditioning:', disabled=False)
display(selected)
selected_path = os.path.join(path, selected.value)
return selected_path
|
def get_cond(mode, selected_path):
example = dict()
if (mode == 'superresolution'):
up_f = 4
visualize_cond_img(selected_path)
c = Image.open(selected_path)
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
c_up = torchvision.transforms.functional.resize(c, size=[(up_f * c.shape[2]), (up_f * c.shape[3])], antialias=True)
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
c = rearrange(c, '1 c h w -> 1 h w c')
c = ((2.0 * c) - 1.0)
c = c.to(torch.device('cuda'))
example['LR_image'] = c
example['image'] = c_up
return example
|
def visualize_cond_img(path):
display(ipyimg(filename=path))
|
def run(model, selected_path, task, custom_steps, resize_enabled=False, classifier_ckpt=None, global_step=None):
example = get_cond(task, selected_path)
save_intermediate_vid = False
n_runs = 1
masked = False
guider = None
ckwargs = None
mode = 'ddim'
ddim_use_x0_pred = False
temperature = 1.0
eta = 1.0
make_progrow = True
custom_shape = None
(height, width) = example['image'].shape[1:3]
split_input = ((height >= 128) and (width >= 128))
if split_input:
ks = 128
stride = 64
vqf = 4
model.split_input_params = {'ks': (ks, ks), 'stride': (stride, stride), 'vqf': vqf, 'patch_distributed_vq': True, 'tie_braker': False, 'clip_max_weight': 0.5, 'clip_min_weight': 0.01, 'clip_max_tie_weight': 0.5, 'clip_min_tie_weight': 0.01}
elif hasattr(model, 'split_input_params'):
delattr(model, 'split_input_params')
invert_mask = False
x_T = None
for n in range(n_runs):
if (custom_shape is not None):
x_T = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_T = repeat(x_T, '1 c h w -> b c h w', b=custom_shape[0])
logs = make_convolutional_sample(example, model, mode=mode, custom_steps=custom_steps, eta=eta, swap_mode=False, masked=masked, invert_mask=invert_mask, quantize_x0=False, custom_schedule=None, decode_interval=10, resize_enabled=resize_enabled, custom_shape=custom_shape, temperature=temperature, noise_dropout=0.0, corrector=guider, corrector_kwargs=ckwargs, x_T=x_T, save_intermediate_vid=save_intermediate_vid, make_progrow=make_progrow, ddim_use_x0_pred=ddim_use_x0_pred)
return logs
|
@torch.no_grad()
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None, mask=None, x0=None, quantize_x0=False, img_callback=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, x_T=None, log_every_t=None):
ddim = DDIMSampler(model)
bs = shape[0]
shape = shape[1:]
print(f'Sampling with eta = {eta}; steps: {steps}')
(samples, intermediates) = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback, normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta, mask=mask, x0=x0, temperature=temperature, verbose=False, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T)
return (samples, intermediates)
|
@torch.no_grad()
def make_convolutional_sample(batch, model, mode='vanilla', custom_steps=None, eta=1.0, swap_mode=False, masked=False, invert_mask=True, quantize_x0=False, custom_schedule=None, decode_interval=1000, resize_enabled=False, custom_shape=None, temperature=1.0, noise_dropout=0.0, corrector=None, corrector_kwargs=None, x_T=None, save_intermediate_vid=False, make_progrow=True, ddim_use_x0_pred=False):
log = dict()
(z, c, x, xrec, xc) = model.get_input(batch, model.first_stage_key, return_first_stage_outputs=True, force_c_encode=(not (hasattr(model, 'split_input_params') and (model.cond_stage_key == 'coordinates_bbox'))), return_original_cond=True)
log_every_t = (1 if save_intermediate_vid else None)
if (custom_shape is not None):
z = torch.randn(custom_shape)
print(f'Generating {custom_shape[0]} samples of shape {custom_shape[1:]}')
z0 = None
log['input'] = x
log['reconstruction'] = xrec
if ismap(xc):
log['original_conditioning'] = model.to_rgb(xc)
if hasattr(model, 'cond_stage_key'):
log[model.cond_stage_key] = model.to_rgb(xc)
else:
log['original_conditioning'] = (xc if (xc is not None) else torch.zeros_like(x))
if model.cond_stage_model:
log[model.cond_stage_key] = (xc if (xc is not None) else torch.zeros_like(x))
if (model.cond_stage_key == 'class_label'):
log[model.cond_stage_key] = xc[model.cond_stage_key]
with model.ema_scope('Plotting'):
t0 = time.time()
img_cb = None
(sample, intermediates) = convsample_ddim(model, c, steps=custom_steps, shape=z.shape, eta=eta, quantize_x0=quantize_x0, img_callback=img_cb, mask=None, x0=z0, temperature=temperature, noise_dropout=noise_dropout, score_corrector=corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t)
t1 = time.time()
if ddim_use_x0_pred:
sample = intermediates['pred_x0'][(- 1)]
x_sample = model.decode_first_stage(sample)
try:
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log['sample_noquant'] = x_sample_noquant
log['sample_diff'] = torch.abs((x_sample_noquant - x_sample))
except:
pass
log['sample'] = x_sample
log['time'] = (t1 - t0)
return log
|
def chunk(it, size):
it = iter(it)
return iter((lambda : tuple(islice(it, size))), ())
|
def load_model_from_config(config, ckpt, verbose=False):
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt, map_location='cpu')
if ('global_step' in pl_sd):
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd['state_dict']
model = instantiate_from_config(config.model)
(m, u) = model.load_state_dict(sd, strict=False)
if ((len(m) > 0) and verbose):
print('missing keys:')
print(m)
if ((len(u) > 0) and verbose):
print('unexpected keys:')
print(u)
model.cuda()
model.eval()
return model
|
def load_img(path):
image = Image.open(path).convert('RGB')
(w, h) = image.size
print(f'loaded input image of size ({w}, {h}) from {path}')
(w, h) = map((lambda x: (x - (x % 64))), (w, h))
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = (np.array(image).astype(np.float32) / 255.0)
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return ((2.0 * image) - 1.0)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--prompt', type=str, nargs='?', default='a painting of a virus monster playing guitar', help='the prompt to render')
parser.add_argument('--init-img', type=str, nargs='?', help='path to the input image')
parser.add_argument('--outdir', type=str, nargs='?', help='dir to write results to', default='outputs/img2img-samples')
parser.add_argument('--skip_grid', action='store_true', help='do not save a grid, only individual samples. Helpful when evaluating lots of samples')
parser.add_argument('--skip_save', action='store_true', help='do not save indiviual samples. For speed measurements.')
parser.add_argument('--ddim_steps', type=int, default=50, help='number of ddim sampling steps')
parser.add_argument('--plms', action='store_true', help='use plms sampling')
parser.add_argument('--fixed_code', action='store_true', help='if enabled, uses the same starting code across all samples ')
parser.add_argument('--ddim_eta', type=float, default=0.0, help='ddim eta (eta=0.0 corresponds to deterministic sampling')
parser.add_argument('--n_iter', type=int, default=1, help='sample this often')
parser.add_argument('--C', type=int, default=4, help='latent channels')
parser.add_argument('--f', type=int, default=8, help='downsampling factor, most often 8 or 16')
parser.add_argument('--n_samples', type=int, default=2, help='how many samples to produce for each given prompt. A.k.a batch size')
parser.add_argument('--n_rows', type=int, default=0, help='rows in the grid (default: n_samples)')
parser.add_argument('--scale', type=float, default=5.0, help='unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))')
parser.add_argument('--strength', type=float, default=0.75, help='strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image')
parser.add_argument('--from-file', type=str, help='if specified, load prompts from this file')
parser.add_argument('--config', type=str, default='configs/stable-diffusion/v1-inference.yaml', help='path to config which constructs model')
parser.add_argument('--ckpt', type=str, default='models/ldm/stable-diffusion-v1/model.ckpt', help='path to checkpoint of model')
parser.add_argument('--seed', type=int, default=42, help='the seed (for reproducible sampling)')
parser.add_argument('--precision', type=str, help='evaluate at this precision', choices=['full', 'autocast'], default='autocast')
opt = parser.parse_args()
seed_everything(opt.seed)
config = OmegaConf.load(f'{opt.config}')
model = load_model_from_config(config, f'{opt.ckpt}')
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
model = model.to(device)
if opt.plms:
raise NotImplementedError('PLMS sampler not (yet) supported')
sampler = PLMSSampler(model)
else:
sampler = DDIMSampler(model)
os.makedirs(opt.outdir, exist_ok=True)
outpath = opt.outdir
batch_size = opt.n_samples
n_rows = (opt.n_rows if (opt.n_rows > 0) else batch_size)
if (not opt.from_file):
prompt = opt.prompt
assert (prompt is not None)
data = [(batch_size * [prompt])]
else:
print(f'reading prompts from {opt.from_file}')
with open(opt.from_file, 'r') as f:
data = f.read().splitlines()
data = list(chunk(data, batch_size))
sample_path = os.path.join(outpath, 'samples')
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = (len(os.listdir(outpath)) - 1)
assert os.path.isfile(opt.init_img)
init_image = load_img(opt.init_img).to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image))
sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
assert (0.0 <= opt.strength <= 1.0), 'can only work with strength in [0.0, 1.0]'
t_enc = int((opt.strength * opt.ddim_steps))
print(f'target t_enc is {t_enc} steps')
precision_scope = (autocast if (opt.precision == 'autocast') else nullcontext)
with torch.no_grad():
with precision_scope('cuda'):
with model.ema_scope():
tic = time.time()
all_samples = list()
for n in trange(opt.n_iter, desc='Sampling'):
for prompts in tqdm(data, desc='data'):
uc = None
if (opt.scale != 1.0):
uc = model.get_learned_conditioning((batch_size * ['']))
if isinstance(prompts, tuple):
prompts = list(prompts)
c = model.get_learned_conditioning(prompts)
z_enc = sampler.stochastic_encode(init_latent, torch.tensor(([t_enc] * batch_size)).to(device))
samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc)
x_samples = model.decode_first_stage(samples)
x_samples = torch.clamp(((x_samples + 1.0) / 2.0), min=0.0, max=1.0)
if (not opt.skip_save):
for x_sample in x_samples:
x_sample = (255.0 * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c'))
Image.fromarray(x_sample.astype(np.uint8)).save(os.path.join(sample_path, f'{base_count:05}.png'))
base_count += 1
all_samples.append(x_samples)
if (not opt.skip_grid):
grid = torch.stack(all_samples, 0)
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
grid = make_grid(grid, nrow=n_rows)
grid = (255.0 * rearrange(grid, 'c h w -> h w c').cpu().numpy())
Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
grid_count += 1
toc = time.time()
print(f'''Your samples are ready and waiting for you here:
{outpath}
Enjoy.''')
|
def make_batch(image, mask, device):
image = np.array(Image.open(image).convert('RGB'))
image = (image.astype(np.float32) / 255.0)
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
mask = np.array(Image.open(mask).convert('L'))
mask = (mask.astype(np.float32) / 255.0)
mask = mask[(None, None)]
mask[(mask < 0.5)] = 0
mask[(mask >= 0.5)] = 1
mask = torch.from_numpy(mask)
masked_image = ((1 - mask) * image)
batch = {'image': image, 'mask': mask, 'masked_image': masked_image}
for k in batch:
batch[k] = batch[k].to(device=device)
batch[k] = ((batch[k] * 2.0) - 1.0)
return batch
|
def load_model_from_config(config, ckpt):
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt)
sd = pl_sd['state_dict']
model = instantiate_from_config(config.model)
(m, u) = model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
|
def get_model():
config = OmegaConf.load('configs/latent-diffusion/cin256-v2.yaml')
model = load_model_from_config(config, 'models/ldm/cin256-v2/model.ckpt')
return model
|
def custom_to_pil(x):
x = x.detach().cpu()
x = torch.clamp(x, (- 1.0), 1.0)
x = ((x + 1.0) / 2.0)
x = x.permute(1, 2, 0).numpy()
x = (255 * x).astype(np.uint8)
x = Image.fromarray(x)
if (not (x.mode == 'RGB')):
x = x.convert('RGB')
return x
|
def custom_to_np(x):
sample = x.detach().cpu()
sample = ((sample + 1) * 127.5).clamp(0, 255).to(torch.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
return sample
|
def logs2pil(logs, keys=['sample']):
imgs = dict()
for k in logs:
try:
if (len(logs[k].shape) == 4):
img = custom_to_pil(logs[k][(0, ...)])
elif (len(logs[k].shape) == 3):
img = custom_to_pil(logs[k])
else:
print(f'Unknown format for key {k}. ')
img = None
except:
img = None
imgs[k] = img
return imgs
|
@torch.no_grad()
def convsample(model, shape, return_intermediates=True, verbose=True, make_prog_row=False):
if (not make_prog_row):
return model.p_sample_loop(None, shape, return_intermediates=return_intermediates, verbose=verbose)
else:
return model.progressive_denoising(None, shape, verbose=True)
|
@torch.no_grad()
def convsample_ddim(model, steps, shape, eta=1.0):
ddim = DDIMSampler(model)
bs = shape[0]
shape = shape[1:]
(samples, intermediates) = ddim.sample(steps, batch_size=bs, shape=shape, eta=eta, verbose=False)
return (samples, intermediates)
|
@torch.no_grad()
def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0):
log = dict()
shape = [batch_size, model.model.diffusion_model.in_channels, model.model.diffusion_model.image_size, model.model.diffusion_model.image_size]
with model.ema_scope('Plotting'):
t0 = time.time()
if vanilla:
(sample, progrow) = convsample(model, shape, make_prog_row=True)
else:
(sample, intermediates) = convsample_ddim(model, steps=custom_steps, shape=shape, eta=eta)
t1 = time.time()
x_sample = model.decode_first_stage(sample)
log['sample'] = x_sample
log['time'] = (t1 - t0)
log['throughput'] = (sample.shape[0] / (t1 - t0))
print(f"Throughput for this batch: {log['throughput']}")
return log
|
def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None):
if vanilla:
print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.')
else:
print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}')
tstart = time.time()
n_saved = (len(glob.glob(os.path.join(logdir, '*.png'))) - 1)
if (model.cond_stage_model is None):
all_images = []
print(f'Running unconditional sampling for {n_samples} samples')
for _ in trange((n_samples // batch_size), desc='Sampling Batches (unconditional)'):
logs = make_convolutional_sample(model, batch_size=batch_size, vanilla=vanilla, custom_steps=custom_steps, eta=eta)
n_saved = save_logs(logs, logdir, n_saved=n_saved, key='sample')
all_images.extend([custom_to_np(logs['sample'])])
if (n_saved >= n_samples):
print(f'Finish after generating {n_saved} samples')
break
all_img = np.concatenate(all_images, axis=0)
all_img = all_img[:n_samples]
shape_str = 'x'.join([str(x) for x in all_img.shape])
nppath = os.path.join(nplog, f'{shape_str}-samples.npz')
np.savez(nppath, all_img)
else:
raise NotImplementedError('Currently only sampling for unconditional models supported.')
print(f'sampling of {n_saved} images finished in {((time.time() - tstart) / 60.0):.2f} minutes.')
|
def save_logs(logs, path, n_saved=0, key='sample', np_path=None):
for k in logs:
if (k == key):
batch = logs[key]
if (np_path is None):
for x in batch:
img = custom_to_pil(x)
imgpath = os.path.join(path, f'{key}_{n_saved:06}.png')
img.save(imgpath)
n_saved += 1
else:
npbatch = custom_to_np(batch)
shape_str = 'x'.join([str(x) for x in npbatch.shape])
nppath = os.path.join(np_path, f'{n_saved}-{shape_str}-samples.npz')
np.savez(nppath, npbatch)
n_saved += npbatch.shape[0]
return n_saved
|
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--resume', type=str, nargs='?', help='load from logdir or checkpoint in logdir')
parser.add_argument('-n', '--n_samples', type=int, nargs='?', help='number of samples to draw', default=50000)
parser.add_argument('-e', '--eta', type=float, nargs='?', help='eta for ddim sampling (0.0 yields deterministic sampling)', default=1.0)
parser.add_argument('-v', '--vanilla_sample', default=False, action='store_true', help='vanilla sampling (default option is DDIM sampling)?')
parser.add_argument('-l', '--logdir', type=str, nargs='?', help='extra logdir', default='none')
parser.add_argument('-c', '--custom_steps', type=int, nargs='?', help='number of steps for ddim and fastdpm sampling', default=50)
parser.add_argument('--batch_size', type=int, nargs='?', help='the bs', default=10)
return parser
|
def load_model_from_config(config, sd):
model = instantiate_from_config(config)
model.load_state_dict(sd, strict=False)
model.cuda()
model.eval()
return model
|
def load_model(config, ckpt, gpu, eval_mode):
if ckpt:
print(f'Loading model from {ckpt}')
pl_sd = torch.load(ckpt, map_location='cpu')
global_step = pl_sd['global_step']
else:
pl_sd = {'state_dict': None}
global_step = None
model = load_model_from_config(config.model, pl_sd['state_dict'])
return (model, global_step)
|
def testit(img_path):
bgr = cv2.imread(img_path)
decoder = WatermarkDecoder('bytes', 136)
watermark = decoder.decode(bgr, 'dwtDct')
try:
dec = watermark.decode('utf-8')
except:
dec = 'null'
print(dec)
|
def complex_flatten(real, imag):
real = tf.keras.layers.Flatten()(real)
imag = tf.keras.layers.Flatten()(imag)
return (real, imag)
|
def CReLU(real, imag):
real = tf.keras.layers.ReLU()(real)
imag = tf.keras.layers.ReLU()(imag)
return (real, imag)
|
def CLeaky_ReLU(real, imag):
real = tf.nn.leaky_relu(real)
imag = tf.nn.leaky_relu(imag)
return (real, imag)
|
def zReLU(real, imag):
real = tf.keras.layers.ReLU()(real)
imag = tf.keras.layers.ReLU()(imag)
' \n parts์ ๊ฐ์ด ์์ผ๋ฉด True == 1๋ก ๋ง๋ค๊ณ , ๊ฐ์ด 0์ด๋ฉด False == 0์ ๋ฐํ\n part๊ฐ True == 1์ด๋ฉด 1 ๋ฐํ, ํ๋๋ผ๋ False == 0์ด๋ฉด 0 ๋ฐํ\n ๊ทธ๋์ real, imag ์ค ํ๋๋ผ๋ ์ถ ์์ ๊ฐ์ด ์์ผ๋ฉด flag๋ (0, ...) ์ด๋ค.\n '
real_flag = tf.cast(tf.cast(real, tf.bool), tf.float32)
imag_flag = tf.cast(tf.cast(imag, tf.bool), tf.float32)
flag = (real_flag * imag_flag)
'\n flag๊ณผ ํ๋ ฌ๋ผ๋ฆฌ ์์๊ณฑ์ ํ์ฌ, flag (1, ...)์์๋ ReLU๋ฅผ ์ ์ง\n (0, ...) flag์์๋ ๊ฐ์ ๊ธฐ๊ฐํ๋ค.\n '
real = tf.math.multiply(real, flag)
imag = tf.math.multiply(imag, flag)
return (real, imag)
|
def modReLU(real, imag):
norm = tf.abs(tf.complex(real, imag))
bias = tf.Variable(np.zeros([norm.get_shape()[(- 1)]]), trainable=True, dtype=tf.float32)
relu = tf.nn.relu((norm + bias))
real = tf.math.multiply(((relu / norm) + 100000.0), real)
imag = tf.math.multiply(((relu / norm) + 100000.0), imag)
return (real, imag)
|
def complex_tanh(real, imag):
real = tf.nn.tanh(real)
imag = tf.nn.tanh(imag)
return (real, imag)
|
def complex_softmax(real, imag):
magnitude = tf.abs(tf.complex(real, imag))
magnitude = tf.keras.layers.Softmax()(magnitude)
return magnitude
|
class Naive_DCUnet16():
def __init__(self, input_size=16384, length=1023, over_lapping=256, padding='same', norm_trainig=True):
self.input_size = input_size
self.length = length
self.over_lapping = over_lapping
self.padding = padding
self.norm_trainig = norm_trainig
self.STFT_network_arguments = {'window_length': self.length, 'over_lapping': self.over_lapping, 'padding': self.padding}
def model(self):
noisy_speech = Input(shape=(self.input_size, 1), name='noisy_speech')
(stft_real, stft_imag) = STFT_network(**self.STFT_network_arguments)(noisy_speech)
(stft_real, stft_imag) = tranposed_STFT(stft_real, stft_imag)
(real, imag, conv_real1, conv_imag1) = encoder_module(stft_real, stft_imag, 32, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real2, conv_imag2) = encoder_module(real, imag, 32, (7, 5), (2, 1), training=self.norm_trainig)
(real, imag, conv_real3, conv_imag3) = encoder_module(real, imag, 64, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real4, conv_imag4) = encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real5, conv_imag5) = encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real6, conv_imag6) = encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real7, conv_imag7) = encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, _, _) = encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(center_real1, center_imag1) = decoder_module(real, imag, None, None, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(center_real1, center_imag1, conv_real7, conv_imag7, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real6, conv_imag6, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real5, conv_imag5, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real4, conv_imag4, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real3, conv_imag3, 32, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real2, conv_imag2, 32, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real1, conv_imag1, 1, (5, 3), (2, 2), training=self.norm_trainig)
(enhancement_stft_real, enhancement_stft_imag) = mask_processing(deconv_real1, deconv_imag1, stft_real, stft_imag)
(enhancement_stft_real, enhancement_stft_imag) = transpoed_ISTFT(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = ISTFT_network(**self.STFT_network_arguments)(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = tf.reshape(enhancement_speech, ((- 1), self.input_size, 1))
return Model(inputs=[noisy_speech], outputs=[enhancement_speech])
|
class Naive_DCUnet20():
def __init__(self, input_size=16384, length=1023, over_lapping=256, padding='same', norm_trainig=True):
self.input_size = input_size
self.length = length
self.over_lapping = over_lapping
self.padding = padding
self.norm_trainig = norm_trainig
self.STFT_network_arguments = {'window_length': self.length, 'over_lapping': self.over_lapping, 'padding': self.padding}
def model(self):
noisy_speech = Input(shape=(self.input_size, 1), name='noisy_speech')
(stft_real, stft_imag) = STFT_network(**self.STFT_network_arguments)(noisy_speech)
(stft_real, stft_imag) = tranposed_STFT(stft_real, stft_imag)
(real, imag, conv_real1, conv_imag1) = encoder_module(stft_real, stft_imag, 32, (7, 1), (1, 1), training=self.norm_trainig)
(real, imag, conv_real2, conv_imag2) = encoder_module(real, imag, 32, (1, 7), (1, 1), training=self.norm_trainig)
(real, imag, conv_real3, conv_imag3) = encoder_module(real, imag, 64, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real4, conv_imag4) = encoder_module(real, imag, 64, (7, 5), (2, 1), training=self.norm_trainig)
(real, imag, conv_real5, conv_imag5) = encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real6, conv_imag6) = encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real7, conv_imag7) = encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real8, conv_imag8) = encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real9, conv_imag9) = encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, _, _) = encoder_module(real, imag, 90, (5, 3), (2, 1), training=self.norm_trainig)
(center_real1, center_imag1) = decoder_module(real, imag, None, None, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(center_real1, center_imag1, conv_real9, conv_imag9, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real8, conv_imag8, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real7, conv_imag7, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real6, conv_imag6, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real5, conv_imag5, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real4, conv_imag4, 64, (7, 5), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real3, conv_imag3, 32, (7, 5), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real2, conv_imag2, 32, (1, 7), (1, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = decoder_module(deconv_real1, deconv_imag1, conv_real1, conv_imag1, 1, (7, 1), (1, 1), training=self.norm_trainig)
(enhancement_stft_real, enhancement_stft_imag) = mask_processing(deconv_real1, deconv_imag1, stft_real, stft_imag)
(enhancement_stft_real, enhancement_stft_imag) = transpoed_ISTFT(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = ISTFT_network(**self.STFT_network_arguments)(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = tf.reshape(enhancement_speech, ((- 1), self.input_size, 1))
return Model(inputs=[noisy_speech], outputs=[enhancement_speech])
|
class DCUnet16():
def __init__(self, input_size=16384, length=1023, over_lapping=256, padding='same', norm_trainig=True):
self.input_size = input_size
self.length = length
self.over_lapping = over_lapping
self.padding = padding
self.norm_trainig = norm_trainig
self.STFT_network_arguments = {'window_length': self.length, 'over_lapping': self.over_lapping, 'padding': self.padding}
def model(self):
noisy_speech = Input(shape=(self.input_size, 1), name='noisy_speech')
(stft_real, stft_imag) = STFT_network(**self.STFT_network_arguments)(noisy_speech)
(stft_real, stft_imag) = tranposed_STFT(stft_real, stft_imag)
(real, imag, conv_real1, conv_imag1) = convariance_encoder_module(stft_real, stft_imag, 32, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real2, conv_imag2) = convariance_encoder_module(real, imag, 32, (7, 5), (2, 1), training=self.norm_trainig)
(real, imag, conv_real3, conv_imag3) = convariance_encoder_module(real, imag, 64, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real4, conv_imag4) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real5, conv_imag5) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real6, conv_imag6) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real7, conv_imag7) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, _, _) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(center_real1, center_imag1) = convariance_decoder_module(real, imag, None, None, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(center_real1, center_imag1, conv_real7, conv_imag7, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real6, conv_imag6, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real5, conv_imag5, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real4, conv_imag4, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real3, conv_imag3, 32, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real2, conv_imag2, 32, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real1, conv_imag1, 1, (5, 3), (2, 2), training=self.norm_trainig)
(enhancement_stft_real, enhancement_stft_imag) = mask_processing(deconv_real1, deconv_imag1, stft_real, stft_imag)
(enhancement_stft_real, enhancement_stft_imag) = transpoed_ISTFT(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = ISTFT_network(**self.STFT_network_arguments)(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = tf.reshape(enhancement_speech, ((- 1), self.input_size, 1))
return Model(inputs=[noisy_speech], outputs=[enhancement_speech])
|
class DCUnet20():
def __init__(self, input_size=16384, length=1023, over_lapping=256, padding='same', norm_trainig=True):
self.input_size = input_size
self.length = length
self.over_lapping = over_lapping
self.padding = padding
self.norm_trainig = norm_trainig
self.STFT_network_arguments = {'window_length': self.length, 'over_lapping': self.over_lapping, 'padding': self.padding}
def model(self):
noisy_speech = Input(shape=(self.input_size, 1), name='noisy_speech')
(stft_real, stft_imag) = STFT_network(**self.STFT_network_arguments)(noisy_speech)
(stft_real, stft_imag) = tranposed_STFT(stft_real, stft_imag)
(real, imag, conv_real1, conv_imag1) = convariance_encoder_module(stft_real, stft_imag, 32, (7, 1), (1, 1), training=self.norm_trainig)
(real, imag, conv_real2, conv_imag2) = convariance_encoder_module(real, imag, 32, (1, 7), (1, 1), training=self.norm_trainig)
(real, imag, conv_real3, conv_imag3) = convariance_encoder_module(real, imag, 64, (7, 5), (2, 2), training=self.norm_trainig)
(real, imag, conv_real4, conv_imag4) = convariance_encoder_module(real, imag, 64, (7, 5), (2, 1), training=self.norm_trainig)
(real, imag, conv_real5, conv_imag5) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real6, conv_imag6) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real7, conv_imag7) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, conv_real8, conv_imag8) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 1), training=self.norm_trainig)
(real, imag, conv_real9, conv_imag9) = convariance_encoder_module(real, imag, 64, (5, 3), (2, 2), training=self.norm_trainig)
(real, imag, _, _) = convariance_encoder_module(real, imag, 90, (5, 3), (2, 1), training=self.norm_trainig)
(center_real1, center_imag1) = convariance_decoder_module(real, imag, None, None, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(center_real1, center_imag1, conv_real9, conv_imag9, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real8, conv_imag8, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real7, conv_imag7, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real6, conv_imag6, 64, (5, 3), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real5, conv_imag5, 64, (5, 3), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real4, conv_imag4, 64, (7, 5), (2, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real3, conv_imag3, 32, (7, 5), (2, 2), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real2, conv_imag2, 32, (1, 7), (1, 1), training=self.norm_trainig)
(deconv_real1, deconv_imag1) = convariance_decoder_module(deconv_real1, deconv_imag1, conv_real1, conv_imag1, 1, (7, 1), (1, 1), training=self.norm_trainig)
(enhancement_stft_real, enhancement_stft_imag) = mask_processing(deconv_real1, deconv_imag1, stft_real, stft_imag)
(enhancement_stft_real, enhancement_stft_imag) = transpoed_ISTFT(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = ISTFT_network(**self.STFT_network_arguments)(enhancement_stft_real, enhancement_stft_imag)
enhancement_speech = tf.reshape(enhancement_speech, ((- 1), self.input_size, 1))
return Model(inputs=[noisy_speech], outputs=[enhancement_speech])
|
class datagenerator(tf.keras.utils.Sequence):
def __init__(self, inputs_ids, outputs_ids, inputs_dir, outputs_dir, batch_size=16, shuffle=True):
'\n inputs_ids : ์
๋ ฅํ noisy speech์ ๋ฐ์ดํฐ ๋ค์\n outputs_ids : ํ๊ฒ์ผ๋ก ์ผ์ clean speech์ ๋ฐ์ดํฐ ๋ค์\n inputs_dir : ์
๋ ฅํ noisy speech์ ํ์ผ ๊ฒฝ๋ก\n outputs_dir : ํ๊ฒ์ผ๋ก ์ผ์ clean speech์ ํ์ผ ๊ฒฝ๋ก\n '
self.inputs_ids = inputs_ids
self.outputs_ids = outputs_ids
self.inputs_dir = inputs_dir
self.outputs_dir = outputs_dir
'\n ๋ค์ด์ฌ ์
๋ ฅ ๋ฐ์ดํธ์ ํฌ๊ธฐ๋งํผ np.arange๋ก ์ธ๋ฑ์ค๋ฅผ ๋์ด\n self.batch_size : ๋ฐฐ์น ์ฌ์ด์ฆ ์ต์
\n self.shuffle : ์
ํ ์ต์
\n '
self.indexes = np.arange(len(self.inputs_ids))
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def on_epoch_end(self):
'Updates indexes after each epoch'
'epoch๊ฐ ๋๋ ๋๋ง๋ค ์ธ๋ฑ์ค๋ฅผ ๋ค์ shuffleํ๋ ์ต์
'
self.indexes = np.arange(len(self.inputs_ids))
if (self.shuffle == True):
np.random.shuffle(self.indexes)
def __data_generation__(self, inputs_ids, outputs_ids):
'Generates data containing batch_size samples'
'๋ฐ์ดํฐ ๋ค์๊ณผ ๋ฐ์ดํฐ ๋๋ ํ ๋ฆฌ๋ก ์ ์ฒด ๊ฒฝ๋ก๋ฅผ ์ก๊ณ ๊ทธ ๊ฒฝ๋ก์์ ์๋ฆฌ ํ์ผ ๋ก๋'
inputs_path = os.path.join((self.inputs_dir + inputs_ids))
outputs_path = os.path.join((self.outputs_dir + outputs_ids))
(_, inputs) = scipy.io.wavfile.read(inputs_path)
(_, outputs) = scipy.io.wavfile.read(outputs_path)
return (inputs, outputs)
def __len__(self):
'Denotes the number of batches per epoch'
'\n self.id_names : ์กด์ฌํ๋ ์ด ์ด๋ฏธ์ง ๊ฐ์๋ฅผ ์๋ฏธํฉ๋๋ค.\n self.batch_size : ๋ฐฐ์น์ฌ์ด์ฆ๋ฅผ ์๋ฏธํฉ๋๋ค.\n ์ ์ฒด ๋ฐ์ดํฐ ๊ฐฏ์์์ ๋ฐฐ์น ์ฌ์ด์ฆ๋ก ๋๋ ๊ฒ == 1 epoch๋น iteration ์\n '
return int(np.floor((len(self.inputs_ids) / self.batch_size)))
def __getitem__(self, index):
'\n 1. ํ epoch์์ ๋งค ๋ฐฐ์น๋ฅผ ๋ฐ๋ณตํ ๋๋ง๋ค ํด๋นํ๋ ์ธ๋ฑ์ค๋ฅผ ํธ์ถ\n 2. ๊ฐ ๋ฐ์ดํฐ์ ์์ด๋์์ ํด๋น ๋ฐฐ์น์ ์ธ๋ฑ์ค๋ฅผ ํ ๋น\n 3. ๋ฆฌํดํ ๋ฆฌ์คํธ ์ ์\n 4. ํ ๋นํ ์ธ๋ฑ์ค์ ๋ฐ์ดํฐ์์ ๋ฐ์ดํฐ ์ ๋ค๋ ์ด์
์ผ๋ก ํ์ผ์ x, y๋ฅผ ๋ถ๋ฌ์ค๊ณ ์ ๋นํ ์ ์ฒ๋ฆฌ (์ฌ๊ธฐ์ reshape)\n 4. ๋ฆฌํดํ ๋ฆฌ์คํธ์ .appendํ์ฌ ๋ฐฐ์น ๋ฐ์ดํฐ ์
์ ์์ฑ\n 5. np.array ํํ๋ก ๋ฐ๊พธ์ด์ค ํ ๋ฆฌํด\n '
indexes = self.indexes[(index * self.batch_size):((index + 1) * self.batch_size)]
inputs_batch_ids = [self.inputs_ids[k] for k in indexes]
outputs_batch_ids = [self.outputs_ids[k] for k in indexes]
inputs_batch_ids = natsort.natsorted(inputs_batch_ids, reverse=False)
outputs_batch_ids = natsort.natsorted(outputs_batch_ids, reverse=False)
inputs_list = list()
output_list = list()
for (inputs, outputs) in zip(inputs_batch_ids, outputs_batch_ids):
(x, y) = self.__data_generation__(inputs, outputs)
x = np.reshape(x, (16384, 1))
y = np.reshape(y, (16384, 1))
inputs_list.append(x)
output_list.append(y)
inputs_list = np.array(inputs_list)
output_list = np.array(output_list)
return (inputs_list, output_list)
|
def modified_SDR_loss(pred, true, eps=1e-08):
num = K.sum((true * pred))
den = (K.sqrt(K.sum((true * true))) * K.sqrt(K.sum((pred * pred))))
return (- (num / (den + eps)))
|
def weighted_SDR_loss(noisy_speech, pred_speech, true_speech):
def SDR_loss(pred, true, eps=1e-08):
num = K.sum((pred * true))
den = (K.sqrt(K.sum((true * true))) * K.sqrt(K.sum((pred * pred))))
return (- (num / (den + eps)))
pred_noise = (noisy_speech - pred_speech)
true_noise = (noisy_speech - true_speech)
alpha = (K.sum((true_speech ** 2)) / (K.sum((true_speech ** 2)) + K.sum((true_noise ** 2))))
sound_SDR = SDR_loss(pred_speech, true_speech)
noise_SDR = SDR_loss(pred_noise, true_noise)
return ((alpha * sound_SDR) + ((1 - alpha) * noise_SDR))
|
def get_file_list(file_path):
file_list = []
for (root, dirs, files) in os.walk(file_path):
for fname in files:
if ((fname == 'desktop.ini') or (fname == '.DS_Store')):
continue
full_fname = os.path.join(root, fname)
file_list.append(full_fname)
file_list = natsort.natsorted(file_list, reverse=False)
file_list = np.array(file_list)
return file_list
|
def inference(path_list, save_path):
for (index1, speech_file_path) in tqdm(enumerate(path_list)):
(_, unseen_noisy_speech) = scipy.io.wavfile.read(speech_file_path)
restore = []
for index2 in range(int((len(unseen_noisy_speech) / speech_length))):
split_speech = unseen_noisy_speech[(speech_length * index2):(speech_length * (index2 + 1))]
split_speech = np.reshape(split_speech, (1, speech_length, 1))
enhancement_speech = model.predict([split_speech])
predict = np.reshape(enhancement_speech, (speech_length, 1))
restore.extend(predict)
restore = np.array(restore)
scipy.io.wavfile.write((('./model_pred/' + '{:04d}'.format((index1 + 1))) + '.wav'), rate=sampling_rate, data=restore)
|
def data_generator(train_arguments, test_arguments):
train_generator = datagenerator(**train_arguments)
test_generator = datagenerator(**test_arguments)
return (train_generator, test_generator)
|
@tf.function
def loop_train(model, optimizer, train_noisy_speech, train_clean_speech):
with tf.GradientTape() as tape:
train_predict_speech = model(train_noisy_speech)
if (loss_function == 'SDR'):
train_loss = modified_SDR_loss(train_predict_speech, train_clean_speech)
elif (loss_function == 'wSDR'):
train_loss = weighted_SDR_loss(train_noisy_speech, train_predict_speech, train_clean_speech)
gradients = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return train_loss
|
@tf.function
def loop_test(model, test_noisy_speech, test_clean_speech):
'Test loop do not caclultae gradient and backpropagation'
test_predict_speech = model(test_noisy_speech)
if (loss_function == 'SDR'):
test_loss = modified_SDR_loss(test_predict_speech, test_clean_speech)
elif (loss_function == 'wSDR'):
test_loss = weighted_SDR_loss(test_noisy_speech, test_predict_speech, test_clean_speech)
return test_loss
|
def learning_rate_scheduler(epoch, learning_rate):
if ((epoch + 1) <= int((0.5 * epoch))):
return (1.0 * learning_rate)
elif (((epoch + 1) > int((0.5 * epoch))) and ((epoch + 1) <= int((0.75 * epoch)))):
return (0.2 * learning_rate)
else:
return (0.05 * learning_rate)
|
def model_flow(model, total_epochs, train_generator, test_generator):
train_step = (len(os.listdir(train_noisy_path)) // batch_size)
test_step = (len(os.listdir(test_noisy_path)) // batch_size)
print('TRAIN STEPS, TEST STEPS ', train_step, test_step)
for epoch in tqdm(range(total_epochs)):
train_batch_losses = 0
test_batch_losses = 0
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate_scheduler(epoch, learning_rate), beta_1=0.9)
'Training Loop'
for (index, (train_noisy_speech, train_clean_speech)) in tqdm(enumerate(train_generator)):
loss = loop_train(model, optimizer, train_noisy_speech, train_clean_speech)
train_batch_losses = (train_batch_losses + loss)
'Test Loop'
for (index, (test_noisy_speech, test_clean_speech)) in tqdm(enumerate(test_generator)):
loss = loop_test(model, test_noisy_speech, test_clean_speech)
test_batch_losses = (test_batch_losses + loss)
'Calculate loss per batch data'
train_loss = (train_batch_losses / train_step)
test_loss = (test_batch_losses / test_step)
templet = 'Epoch : {:3d}, TRAIN LOSS : {:.5f}, TEST LOSS : {:.5f}'
print(templet.format((epoch + 1), train_loss.numpy(), test_loss.numpy()))
if (((epoch + 1) % 10) == 0):
model.save_weights(((('./model_save/' + save_file_name) + str((epoch + 1))) + '.h5'))
|
class GIN(torch.nn.Module):
def __init__(self, in_channels, out_channels, num_layers, batch_norm=False, cat=True, lin=True):
super(GIN, self).__init__()
self.in_channels = in_channels
self.num_layers = num_layers
self.batch_norm = batch_norm
self.cat = cat
self.lin = lin
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
mlp = MLP(in_channels, out_channels, 2, batch_norm, dropout=0.0)
self.convs.append(GINConv(mlp, train_eps=True))
in_channels = out_channels
if self.cat:
in_channels = (self.in_channels + (num_layers * out_channels))
else:
in_channels = out_channels
if self.lin:
self.out_channels = out_channels
self.final = Lin(in_channels, out_channels)
else:
self.out_channels = in_channels
self.reset_parameters()
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.lin:
self.final.reset_parameters()
def forward(self, x, edge_index, *args):
''
xs = [x]
for conv in self.convs:
xs += [conv(xs[(- 1)], edge_index)]
x = (torch.cat(xs, dim=(- 1)) if self.cat else xs[(- 1)])
x = (self.final(x) if self.lin else x)
return x
def __repr__(self):
return '{}({}, {}, num_layers={}, batch_norm={}, cat={}, lin={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.num_layers, self.batch_norm, self.cat, self.lin)
|
class MLP(torch.nn.Module):
def __init__(self, in_channels, out_channels, num_layers, batch_norm=False, dropout=0.0):
super(MLP, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_layers = num_layers
self.batch_norm = batch_norm
self.dropout = dropout
self.lins = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for _ in range(num_layers):
self.lins.append(Lin(in_channels, out_channels))
self.batch_norms.append(BN(out_channels))
in_channels = out_channels
self.reset_parameters()
def reset_parameters(self):
for (lin, batch_norm) in zip(self.lins, self.batch_norms):
lin.reset_parameters()
batch_norm.reset_parameters()
def forward(self, x, *args):
for (i, (lin, bn)) in enumerate(zip(self.lins, self.batch_norms)):
if (i == (self.num_layers - 1)):
x = F.dropout(x, p=self.dropout, training=self.training)
x = lin(x)
if (i < (self.num_layers - 1)):
x = F.relu(x)
x = (bn(x) if self.batch_norm else x)
return x
def __repr__(self):
return '{}({}, {}, num_layers={}, batch_norm={}, dropout={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.num_layers, self.batch_norm, self.dropout)
|
class RelConv(MessagePassing):
def __init__(self, in_channels, out_channels):
super(RelConv, self).__init__(aggr='mean')
self.in_channels = in_channels
self.out_channels = out_channels
self.lin1 = Lin(in_channels, out_channels, bias=False)
self.lin2 = Lin(in_channels, out_channels, bias=False)
self.root = Lin(in_channels, out_channels)
self.reset_parameters()
def reset_parameters(self):
self.lin1.reset_parameters()
self.lin2.reset_parameters()
self.root.reset_parameters()
def forward(self, x, edge_index):
''
self.flow = 'source_to_target'
out1 = self.propagate(edge_index, x=self.lin1(x))
self.flow = 'target_to_source'
out2 = self.propagate(edge_index, x=self.lin2(x))
return ((self.root(x) + out1) + out2)
def message(self, x_j):
return x_j
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels)
|
class RelCNN(torch.nn.Module):
def __init__(self, in_channels, out_channels, num_layers, batch_norm=False, cat=True, lin=True, dropout=0.0):
super(RelCNN, self).__init__()
self.in_channels = in_channels
self.num_layers = num_layers
self.batch_norm = batch_norm
self.cat = cat
self.lin = lin
self.dropout = dropout
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for _ in range(num_layers):
self.convs.append(RelConv(in_channels, out_channels))
self.batch_norms.append(BN(out_channels))
in_channels = out_channels
if self.cat:
in_channels = (self.in_channels + (num_layers * out_channels))
else:
in_channels = out_channels
if self.lin:
self.out_channels = out_channels
self.final = Lin(in_channels, out_channels)
else:
self.out_channels = in_channels
self.reset_parameters()
def reset_parameters(self):
for (conv, batch_norm) in zip(self.convs, self.batch_norms):
conv.reset_parameters()
batch_norm.reset_parameters()
if self.lin:
self.final.reset_parameters()
def forward(self, x, edge_index, *args):
''
xs = [x]
for (conv, batch_norm) in zip(self.convs, self.batch_norms):
x = conv(xs[(- 1)], edge_index)
x = (batch_norm(F.relu(x)) if self.batch_norm else F.relu(x))
x = F.dropout(x, p=self.dropout, training=self.training)
xs.append(x)
x = (torch.cat(xs, dim=(- 1)) if self.cat else xs[(- 1)])
x = (self.final(x) if self.lin else x)
return x
def __repr__(self):
return '{}({}, {}, num_layers={}, batch_norm={}, cat={}, lin={}, dropout={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.num_layers, self.batch_norm, self.cat, self.lin, self.dropout)
|
class SplineCNN(torch.nn.Module):
def __init__(self, in_channels, out_channels, dim, num_layers, cat=True, lin=True, dropout=0.0):
super(SplineCNN, self).__init__()
self.in_channels = in_channels
self.dim = dim
self.num_layers = num_layers
self.cat = cat
self.lin = lin
self.dropout = dropout
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = SplineConv(in_channels, out_channels, dim, kernel_size=5)
self.convs.append(conv)
in_channels = out_channels
if self.cat:
in_channels = (self.in_channels + (num_layers * out_channels))
else:
in_channels = out_channels
if self.lin:
self.out_channels = out_channels
self.final = Lin(in_channels, out_channels)
else:
self.out_channels = in_channels
self.reset_parameters()
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
if self.lin:
self.final.reset_parameters()
def forward(self, x, edge_index, edge_attr, *args):
''
xs = [x]
for conv in self.convs:
xs += [F.relu(conv(xs[(- 1)], edge_index, edge_attr))]
x = (torch.cat(xs, dim=(- 1)) if self.cat else xs[(- 1)])
x = F.dropout(x, p=self.dropout, training=self.training)
x = (self.final(x) if self.lin else x)
return x
def __repr__(self):
return '{}({}, {}, dim={}, num_layers={}, cat={}, lin={}, dropout={})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.dim, self.num_layers, self.cat, self.lin, self.dropout)
|
class SumEmbedding(object):
def __call__(self, data):
(data.x1, data.x2) = (data.x1.sum(dim=1), data.x2.sum(dim=1))
return data
|
def train():
model.train()
optimizer.zero_grad()
(_, S_L) = model(data.x1, data.edge_index1, None, None, data.x2, data.edge_index2, None, None, data.train_y)
loss = model.loss(S_L, data.train_y)
loss.backward()
optimizer.step()
return loss
|
@torch.no_grad()
def test():
model.eval()
(_, S_L) = model(data.x1, data.edge_index1, None, None, data.x2, data.edge_index2, None, None)
hits1 = model.acc(S_L, data.test_y)
hits10 = model.hits_at_k(10, S_L, data.test_y)
return (hits1, hits10)
|
def generate_y(y_col):
y_row = torch.arange(y_col.size(0), device=device)
return torch.stack([y_row, y_col], dim=0)
|
def train():
model.train()
total_loss = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
(S_0, S_L) = model(data.x_s, data.edge_index_s, data.edge_attr_s, data.x_s_batch, data.x_t, data.edge_index_t, data.edge_attr_t, data.x_t_batch)
y = generate_y(data.y)
loss = model.loss(S_0, y)
loss = ((model.loss(S_L, y) + loss) if (model.num_steps > 0) else loss)
loss.backward()
optimizer.step()
total_loss += (loss.item() * (data.x_s_batch.max().item() + 1))
return (total_loss / len(train_loader.dataset))
|
@torch.no_grad()
def test(dataset):
model.eval()
loader = DataLoader(dataset, args.batch_size, shuffle=False, follow_batch=['x_s', 'x_t'])
correct = num_examples = 0
while (num_examples < args.test_samples):
for data in loader:
data = data.to(device)
(S_0, S_L) = model(data.x_s, data.edge_index_s, data.edge_attr_s, data.x_s_batch, data.x_t, data.edge_index_t, data.edge_attr_t, data.x_t_batch)
y = generate_y(data.y)
correct += model.acc(S_L, y, reduction='sum')
num_examples += y.size(1)
if (num_examples >= args.test_samples):
return (correct / num_examples)
|
class RandomGraphDataset(torch.utils.data.Dataset):
def __init__(self, min_inliers, max_inliers, min_outliers, max_outliers, min_scale=0.9, max_scale=1.2, noise=0.05, transform=None):
self.min_inliers = min_inliers
self.max_inliers = max_inliers
self.min_outliers = min_outliers
self.max_outliers = max_outliers
self.min_scale = min_scale
self.max_scale = max_scale
self.noise = noise
self.transform = transform
def __len__(self):
return 1024
def __getitem__(self, idx):
num_inliers = random.randint(self.min_inliers, self.max_inliers)
num_outliers = random.randint(self.min_outliers, self.max_outliers)
pos_s = ((2 * torch.rand((num_inliers, 2))) - 1)
pos_t = (pos_s + (self.noise * torch.randn_like(pos_s)))
y_s = torch.arange(pos_s.size(0))
y_t = torch.arange(pos_t.size(0))
pos_s = torch.cat([pos_s, (3 - torch.rand((num_outliers, 2)))], dim=0)
pos_t = torch.cat([pos_t, (3 - torch.rand((num_outliers, 2)))], dim=0)
data_s = Data(pos=pos_s, y_index=y_s)
data_t = Data(pos=pos_t, y=y_t)
if (self.transform is not None):
data_s = self.transform(data_s)
data_t = self.transform(data_t)
data = Data(num_nodes=pos_s.size(0))
for key in data_s.keys:
data['{}_s'.format(key)] = data_s[key]
for key in data_t.keys:
data['{}_t'.format(key)] = data_t[key]
return data
|
def train():
model.train()
total_loss = total_examples = total_correct = 0
for (i, data) in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
(S_0, S_L) = model(data.x_s, data.edge_index_s, data.edge_attr_s, data.x_s_batch, data.x_t, data.edge_index_t, data.edge_attr_t, data.x_t_batch)
y = torch.stack([data.y_index_s, data.y_t], dim=0)
loss = model.loss(S_0, y)
loss = ((model.loss(S_L, y) + loss) if (model.num_steps > 0) else loss)
loss.backward()
optimizer.step()
total_loss += loss.item()
total_correct += model.acc(S_L, y, reduction='sum')
total_examples += y.size(1)
return ((total_loss / len(train_loader)), (total_correct / total_examples))
|
@torch.no_grad()
def test(dataset):
model.eval()
correct = num_examples = 0
for pair in dataset.pairs:
(data_s, data_t) = (dataset[pair[0]], dataset[pair[1]])
(data_s, data_t) = (data_s.to(device), data_t.to(device))
(S_0, S_L) = model(data_s.x, data_s.edge_index, data_s.edge_attr, None, data_t.x, data_t.edge_index, data_t.edge_attr, None)
y = torch.arange(data_s.num_nodes, device=device)
y = torch.stack([y, y], dim=0)
correct += model.acc(S_L, y, reduction='sum')
num_examples += y.size(1)
return (correct / num_examples)
|
def generate_voc_y(y_col):
y_row = torch.arange(y_col.size(0), device=device)
return torch.stack([y_row, y_col], dim=0)
|
def pretrain():
model.train()
total_loss = 0
for data in pretrain_loader:
optimizer.zero_grad()
data = data.to(device)
(S_0, S_L) = model(data.x_s, data.edge_index_s, data.edge_attr_s, data.x_s_batch, data.x_t, data.edge_index_t, data.edge_attr_t, data.x_t_batch)
y = generate_voc_y(data.y)
loss = model.loss(S_0, y)
loss = ((model.loss(S_L, y) + loss) if (model.num_steps > 0) else loss)
loss.backward()
optimizer.step()
total_loss += (loss.item() * (data.x_s_batch.max().item() + 1))
return (total_loss / len(pretrain_loader.dataset))
|
def generate_y(num_nodes, batch_size):
row = torch.arange((num_nodes * batch_size), device=device)
col = row[:num_nodes].view(1, (- 1)).repeat(batch_size, 1).view((- 1))
return torch.stack([row, col], dim=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.