code stringlengths 17 6.64M |
|---|
class ResBlk(nn.Module):
def __init__(self, dim_in, dim_out, actv=nn.LeakyReLU(0.2), normalize=False, downsample=False):
super().__init__()
self.actv = actv
self.normalize = normalize
self.downsample = downsample
self.learned_sc = (dim_in != dim_out)
self._build_weights(dim_in, dim_out)
def _build_weights(self, dim_in, dim_out):
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
if self.normalize:
self.norm1 = nn.InstanceNorm2d(dim_in, affine=True)
self.norm2 = nn.InstanceNorm2d(dim_in, affine=True)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
if self.normalize:
x = self.norm1(x)
x = self.actv(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
if self.normalize:
x = self.norm2(x)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x):
x = (self._shortcut(x) + self._residual(x))
return (x / math.sqrt(2))
|
class AdaIN(nn.Module):
def __init__(self, style_dim, num_features):
super().__init__()
self.norm = nn.InstanceNorm2d(num_features, affine=False)
self.fc = nn.Linear(style_dim, (num_features * 2))
def forward(self, x, s):
h = self.fc(s)
h = h.view(h.size(0), h.size(1), 1, 1)
(gamma, beta) = torch.chunk(h, chunks=2, dim=1)
return (((1 + gamma) * self.norm(x)) + beta)
|
class AdainResBlk(nn.Module):
def __init__(self, dim_in, dim_out, style_dim=64, w_hpf=0, actv=nn.LeakyReLU(0.2), upsample=False):
super().__init__()
self.w_hpf = w_hpf
self.actv = actv
self.upsample = upsample
self.learned_sc = (dim_in != dim_out)
self._build_weights(dim_in, dim_out, style_dim)
def _build_weights(self, dim_in, dim_out, style_dim=64):
self.conv1 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_out, dim_out, 3, 1, 1)
self.norm1 = AdaIN(style_dim, dim_in)
self.norm2 = AdaIN(style_dim, dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.learned_sc:
x = self.conv1x1(x)
return x
def _residual(self, x, s):
x = self.norm1(x, s)
x = self.actv(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv1(x)
x = self.norm2(x, s)
x = self.actv(x)
x = self.conv2(x)
return x
def forward(self, x, s):
out = self._residual(x, s)
if (self.w_hpf == 0):
out = ((out + self._shortcut(x)) / math.sqrt(2))
return out
|
class HighPass(nn.Module):
def __init__(self, w_hpf, device):
super(HighPass, self).__init__()
self.filter = (torch.tensor([[(- 1), (- 1), (- 1)], [(- 1), 8.0, (- 1)], [(- 1), (- 1), (- 1)]]).to(device) / w_hpf)
def forward(self, x):
filter = self.filter.unsqueeze(0).unsqueeze(1).repeat(x.size(1), 1, 1, 1)
return F.conv2d(x, filter, padding=1, groups=x.size(1))
|
class Attention(nn.Module):
def __init__(self, style_dim=64):
super().__init__()
self.layers = nn.Sequential(nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim))
def forward(self, s):
return self.layers(s)
|
class Generator(nn.Module):
def __init__(self, img_size=256, style_dim=64, max_conv_dim=512, w_hpf=1):
super().__init__()
dim_in = ((2 ** 14) // img_size)
self.img_size = img_size
self.from_rgb = nn.Conv2d(3, dim_in, 3, 1, 1)
self.encode = nn.ModuleList()
self.decode = nn.ModuleList()
self.to_rgb = nn.Sequential(nn.InstanceNorm2d(dim_in, affine=True), nn.LeakyReLU(0.2), nn.Conv2d(dim_in, 3, 1, 1, 0))
self.attention = Attention(style_dim)
repeat_num = (int(np.log2(img_size)) - 4)
if (w_hpf > 0):
repeat_num += 1
for _ in range(repeat_num):
dim_out = min((dim_in * 2), max_conv_dim)
self.encode.append(ResBlk(dim_in, dim_out, normalize=True, downsample=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_in, style_dim, w_hpf=w_hpf, upsample=True))
dim_in = dim_out
for _ in range(2):
self.encode.append(ResBlk(dim_out, dim_out, normalize=True))
self.decode.insert(0, AdainResBlk(dim_out, dim_out, style_dim, w_hpf=w_hpf))
if (w_hpf > 0):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.hpf = HighPass(w_hpf, device)
def forward(self, x, s, masks=None):
x = self.from_rgb(x)
cache = {}
for block in self.encode:
if ((masks is not None) and (x.size(2) in [32, 64, 128])):
cache[x.size(2)] = x
x = block(x)
for block in self.decode:
x = block(x, s)
if ((masks is not None) and (x.size(2) in [32, 64, 128])):
mask = (masks[0] if (x.size(2) in [32]) else masks[1])
mask = F.interpolate(mask, size=x.size(2), mode='bilinear')
x = (x + self.hpf((mask * cache[x.size(2)])))
return self.to_rgb(x)
|
class StyleEncoder(nn.Module):
def __init__(self, img_size=256, style_dim=64, num_domains=2, max_conv_dim=512):
super().__init__()
dim_in = ((2 ** 14) // img_size)
blocks = []
blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]
repeat_num = (int(np.log2(img_size)) - 2)
for _ in range(repeat_num):
dim_out = min((dim_in * 2), max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Linear(dim_out, style_dim)]
self.transform1 = nn.Sequential(nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim))
self.transform2 = nn.ModuleList()
for _ in range(num_domains):
self.transform2 += [nn.Sequential(nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim), nn.ReLU(), nn.Linear(style_dim, style_dim))]
self.attention = Attention(style_dim)
def forward(self, x, y, y1):
h = self.shared(x)
h = h.view(h.size(0), (- 1))
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[(idx, y)]
s1 = self.transform1(s)
s2 = (s - s1)
out1 = []
for layer in self.transform2:
out1 += [layer(s2)]
out1 = torch.stack(out1, dim=1)
idx1 = torch.LongTensor(range(y1.size(0))).to(y1.device)
s3 = out1[(idx1, y1)]
return (s, s1, s2, s3)
|
class Discriminator(nn.Module):
def __init__(self, img_size=256, num_domains=2, max_conv_dim=512):
super().__init__()
dim_in = ((2 ** 14) // img_size)
blocks = []
blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]
repeat_num = (int(np.log2(img_size)) - 2)
for _ in range(repeat_num):
dim_out = min((dim_in * 2), max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
def forward(self, x, y):
out = self.main(x)
out = out.view(out.size(0), (- 1))
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[(idx, y)]
return out
|
def build_model(args):
generator = Generator(args.img_size, args.style_dim, w_hpf=args.w_hpf)
style_encoder = StyleEncoder(args.img_size, args.style_dim, args.num_domains)
discriminator = Discriminator(args.img_size, args.num_domains)
generator_ema = copy.deepcopy(generator)
style_encoder_ema = copy.deepcopy(style_encoder)
nets = Munch(generator=generator, style_encoder=style_encoder, discriminator=discriminator)
nets_ema = Munch(generator=generator_ema, style_encoder=style_encoder_ema)
if (args.w_hpf > 0):
fan = FAN(fname_pretrained=args.wing_path).eval()
nets.fan = fan
nets_ema.fan = fan
resnet = getattr(torchvision.models, 'resnet18')(pretrained=True)
resnet_features = nn.Sequential(*list(resnet.children())[:(- 1)])
resnet_features = resnet_features.eval()
nets.vgg = resnet_features
nets_ema.vgg = resnet_features
return (nets, nets_ema)
|
class Solver(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(self.nets, self.nets_ema) = build_model(args)
for (name, module) in self.nets.items():
utils.print_network(module, name)
setattr(self, name, module)
for (name, module) in self.nets_ema.items():
setattr(self, (name + '_ema'), module)
self.ckptios = [CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'), **self.nets_ema)]
self.to(self.device)
for (name, network) in self.named_children():
if (('ema' not in name) and ('fan' not in name) and ('vgg' not in name)):
print(('Initializing %s...' % name))
network.apply(utils.he_init)
def _save_checkpoint(self, step):
for ckptio in self.ckptios:
ckptio.save(step)
def _load_checkpoint(self, step):
for ckptio in self.ckptios:
ckptio.load(step)
def _reset_grad(self):
for optim in self.optims.values():
optim.zero_grad()
@torch.no_grad()
def evaluate(self):
args = self.args
nets_ema = self.nets_ema
resume_iter = args.resume_iter
self._load_checkpoint(args.resume_iter)
calculate_metrics(nets_ema, args, step=resume_iter, mode='reference')
|
def save_json(json_file, filename):
with open(filename, 'w') as f:
json.dump(json_file, f, indent=4, sort_keys=False)
|
def print_network(network, name):
num_params = 0
for p in network.parameters():
num_params += p.numel()
print(('Number of parameters of %s: %i' % (name, num_params)))
|
def he_init(module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if (module.bias is not None):
nn.init.constant_(module.bias, 0)
if isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight, mode='fan_in', nonlinearity='relu')
if (module.bias is not None):
nn.init.constant_(module.bias, 0)
|
def denormalize(x):
out = ((x + 1) / 2)
return out.clamp_(0, 1)
|
def save_image(x, ncol, filename):
x = denormalize(x)
vutils.save_image(x.cpu(), filename, nrow=ncol, padding=0)
|
@torch.no_grad()
def translate_and_reconstruct(nets, args, x_src, y_src, x_ref, y_ref, filename):
(N, C, H, W) = x_src.size()
(s_ref, s_ref1, s_ref2, s_ref3) = nets.style_encoder(x_ref, y_src, y_ref)
masks = (nets.fan.get_heatmap(x_src) if (args.w_hpf > 0) else None)
x_fake = nets.generator(x_src, (s_ref1 + s_ref3), masks=masks)
(s_src, _, _, _) = nets.style_encoder(x_src, y_src, y_src)
masks = (nets.fan.get_heatmap(x_fake) if (args.w_hpf > 0) else None)
x_rec = nets.generator(x_fake, s_src, masks=masks)
x_concat = [x_src, x_ref, x_fake, x_rec]
x_concat = torch.cat(x_concat, dim=0)
save_image(x_concat, N, filename)
del x_concat
|
@torch.no_grad()
def translate_using_reference(nets, args, x_src, y_src, x_ref, y_ref, filename):
(N, C, H, W) = x_src.size()
wb = torch.ones(1, C, H, W).to(x_src.device)
x_src_with_wb = torch.cat([wb, x_src], dim=0)
x_ref_with_wb = torch.cat([wb, x_ref], dim=0)
masks = (nets.fan.get_heatmap(x_src) if (args.w_hpf > 0) else None)
x_concat = [x_src_with_wb]
x_concat += [x_ref_with_wb]
for i in range(args.num_domains):
(s_src, s_src1, s_src2, s_src3) = nets.style_encoder(x_src, y_src, y_ref)
(s_ref, s_ref1, s_ref2, s_ref3) = nets.style_encoder(x_ref, y_ref, y_src)
x_fake = nets.generator(x_src, (s_src1 + s_ref2), masks=masks)
x_fake_with_ref = torch.cat([wb, x_fake], dim=0)
x_concat += [x_fake_with_ref]
x_concat = torch.cat(x_concat, dim=0)
print(x_concat.shape)
save_image(x_concat, (N + 1), filename)
del x_concat
|
@torch.no_grad()
def translate_self(nets, args, x_src, y_src, filename):
(N, C, H, W) = x_src.size()
wb = torch.ones(1, C, H, W).to(x_src.device)
x_src_with_wb = torch.cat([wb, x_src], dim=0)
masks = (nets.fan.get_heatmap(x_src) if (args.w_hpf > 0) else None)
x_concat = [x_src_with_wb]
for i in range(args.num_domains):
y_trg = np.zeros(x_src.size(0))
for j in range(x_src.size(0)):
y_trg[j] = i
y_ref = torch.from_numpy(y_trg).long().to(x_src.device)
(s_ref, s_ref1, s_ref2, s_ref3) = nets.style_encoder(x_src, y_src, y_ref)
x_fake = nets.generator(x_src, (s_ref1 + s_ref3), masks=masks)
x_fake1 = nets.generator(x_src, (s_ref1 + (s_ref2 + (1.25 * (s_ref3 - s_ref2)))), masks=masks)
x_fake_with_ref = torch.cat([wb, x_fake], dim=0)
x_fake_with_ref1 = torch.cat([wb, x_fake1], dim=0)
x_concat += [x_fake_with_ref]
x_concat += [x_fake_with_ref1]
for j in range(N):
save_image(x_fake[j], 1, ospj(args.result_dir, ('%02d.png' % j)))
save_image(x_fake1[j], 1, ospj(args.result_dir, ('%02d_1.png' % j)))
x_concat = torch.cat(x_concat, dim=0)
save_image(x_concat, (N + 1), filename)
del x_concat
|
@torch.no_grad()
def debug_image(nets, args, inputs, step):
(x_src, y_src) = (inputs.x_src, inputs.y_src)
device = inputs.x_src.device
N = inputs.x_src.size(0)
y_ref = np.zeros(N)
for i in range(N):
while (y_ref[i] == y_src[i]):
y_ref[i] = random.randint(0, (args.num_domains - 1))
y_ref = torch.from_numpy(y_ref).long().to(device)
filename = ospj(args.sample_dir, ('%06d_cycle_consistency.jpg' % step))
translate_and_reconstruct(nets, args, x_src, y_src, x_src, y_ref, filename)
|
def str2bool(v):
return (v.lower() in 'true')
|
def subdirs(dname):
return [d for d in os.listdir(dname) if os.path.isdir(os.path.join(dname, d))]
|
def main(args):
print(args)
cudnn.benchmark = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
solver = Solver(args)
solver.evaluate()
|
@torch.no_grad()
def calculate_metrics(nets, args, step, mode):
print('Calculating evaluation metrics...')
assert (mode in ['latent', 'reference'])
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
domains = os.listdir(args.val_img_dir)
domains.sort()
num_domains = len(domains)
print(('Number of domains: %d' % num_domains))
lpips_dict = OrderedDict()
for (trg_idx, trg_domain) in enumerate(domains):
src_domains = [x for x in domains if (x != trg_domain)]
for (src_idx, src_domain) in enumerate(src_domains):
path_src = os.path.join(args.val_img_dir, src_domain)
loader_src = get_eval_loader(root=path_src, img_size=args.img_size, batch_size=args.val_batch_size, imagenet_normalize=False, shuffle=False)
images = os.listdir(path_src)
images.sort()
img_num = 0
task = ('%s2%s' % (src_domain, trg_domain))
path_fake = os.path.join(args.eval_dir, task)
shutil.rmtree(path_fake, ignore_errors=True)
os.makedirs(path_fake)
print(('Generating images for %s...' % task))
for (i, x_src) in enumerate(tqdm(loader_src, total=len(loader_src))):
N = x_src.size(0)
x_src = x_src.to(device)
if (src_idx >= trg_idx):
y_src = torch.tensor(([(src_idx + 1)] * N)).to(device)
else:
y_src = torch.tensor(([src_idx] * N)).to(device)
y_trg = torch.tensor(([trg_idx] * N)).to(device)
masks = (nets.fan.get_heatmap(x_src) if (args.w_hpf > 0) else None)
for j in range(args.num_outs_per_domain):
(s_trg, s_trg1, s_trg2, s_trg3) = nets.style_encoder(x_src, y_src, y_trg)
x_fake = nets.generator(x_src, ((s_trg1 + s_trg2) + (args.degree * (s_trg3 - s_trg2))), masks=masks)
for k in range(N):
filename = os.path.join(path_fake, ('%s.png' % images[img_num].split('.')[0]))
utils.save_image(x_fake[k], ncol=1, filename=filename)
img_num += 1
del loader_src
calculate_fid_for_all_tasks(args, domains, step=step, mode=mode)
|
def calculate_fid_for_all_tasks(args, domains, step, mode):
print('Calculating FID for all tasks...')
fid_values = OrderedDict()
for trg_domain in domains:
src_domains = [x for x in domains if (x != trg_domain)]
for src_domain in src_domains:
task = ('%s2%s' % (src_domain, trg_domain))
path_real = os.path.join(args.train_img_dir, trg_domain)
path_fake = os.path.join(args.eval_dir, task)
print(('Calculating FID for %s...' % task))
fid_value = calculate_fid_given_paths(paths=[path_real, path_fake], img_size=299, batch_size=args.val_batch_size)
fid_values[('FID_%s/%s' % (mode, task))] = fid_value
fid_mean = 0
for (_, value) in fid_values.items():
fid_mean += (value / len(fid_values))
fid_values[('FID_%s/mean' % mode)] = fid_mean
filename = os.path.join(args.eval_dir, ('FID_%.5i_%s.json' % (step, mode)))
utils.save_json(fid_values, filename)
|
class InceptionV3(nn.Module):
def __init__(self):
super().__init__()
inception = models.inception_v3(pretrained=True)
self.block1 = nn.Sequential(inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2))
self.block2 = nn.Sequential(inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2))
self.block3 = nn.Sequential(inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e)
self.block4 = nn.Sequential(inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1)))
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
return x.view(x.size(0), (- 1))
|
def frechet_distance(mu, cov, mu2, cov2):
(cc, _) = linalg.sqrtm(np.dot(cov, cov2), disp=False)
dist = (np.sum(((mu - mu2) ** 2)) + np.trace(((cov + cov2) - (2 * cc))))
return np.real(dist)
|
@torch.no_grad()
def calculate_fid_given_paths(paths, img_size=256, batch_size=50):
print(('Calculating FID given paths %s and %s...' % (paths[0], paths[1])))
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
inception = InceptionV3().eval().to(device)
loaders = [get_eval_loader(path, img_size, batch_size) for path in paths]
(mu, cov) = ([], [])
for loader in loaders:
actvs = []
for x in tqdm(loader, total=len(loader)):
actv = inception(x.to(device))
actvs.append(actv)
actvs = torch.cat(actvs, dim=0).cpu().detach().numpy()
mu.append(np.mean(actvs, axis=0))
cov.append(np.cov(actvs, rowvar=False))
fid_value = frechet_distance(mu[0], cov[0], mu[1], cov[1])
return fid_value
|
def normalize(x):
return (x / torch.sqrt(x.pow(2).sum((- 1), keepdim=True)))
|
def slerp(a, b, t):
a = normalize(a)
b = normalize(b)
d = (a * b).sum((- 1), keepdim=True)
p = (t * torch.acos(d))
c = normalize((b - (d * a)))
d = ((a * torch.cos(p)) + (c * torch.sin(p)))
return normalize(d)
|
def lerp(a, b, t):
return (a + ((b - a) * t))
|
class Dataset(torch.utils.data.Dataset):
def __init__(self, args: dict, split='train'):
self.args = args
self.split = split
self.sample_length = args['sample_length']
self.size = (self.w, self.h) = (args['w'], args['h'])
if (args['name'] == 'YouTubeVOS'):
vid_lst_prefix = os.path.join(args['data_root'], args['name'], (split + '_all_frames/JPEGImages'))
vid_lst = os.listdir(vid_lst_prefix)
self.video_names = [os.path.join(vid_lst_prefix, name) for name in vid_lst]
self._to_tensors = transforms.Compose([Stack(), ToTorchFormatTensor()])
def __len__(self):
return len(self.video_names)
def __getitem__(self, index):
try:
item = self.load_item(index)
except:
print('Loading error in video {}'.format(self.video_names[index]))
item = self.load_item(0)
return item
def load_item(self, index):
video_name = self.video_names[index]
all_frames = [os.path.join(video_name, name) for name in sorted(os.listdir(video_name))]
all_masks = create_random_shape_with_random_motion(len(all_frames), imageHeight=self.h, imageWidth=self.w)
ref_index = get_ref_index(len(all_frames), self.sample_length)
frames = []
masks = []
for idx in ref_index:
img = Image.open(all_frames[idx]).convert('RGB')
img = img.resize(self.size)
frames.append(img)
masks.append(all_masks[idx])
if (self.split == 'train'):
frames = GroupRandomHorizontalFlip()(frames)
frame_tensors = ((self._to_tensors(frames) * 2.0) - 1.0)
mask_tensors = self._to_tensors(masks)
return (frame_tensors, mask_tensors)
|
def get_ref_index(length, sample_length):
if (random.uniform(0, 1) > 0.5):
ref_index = random.sample(range(length), sample_length)
ref_index.sort()
else:
pivot = random.randint(0, (length - sample_length))
ref_index = [(pivot + i) for i in range(sample_length)]
return ref_index
|
def get_world_size():
'Find OMPI world size without calling mpi functions\n :rtype: int\n '
if (os.environ.get('PMI_SIZE') is not None):
return int((os.environ.get('PMI_SIZE') or 1))
elif (os.environ.get('OMPI_COMM_WORLD_SIZE') is not None):
return int((os.environ.get('OMPI_COMM_WORLD_SIZE') or 1))
else:
return torch.cuda.device_count()
|
def get_global_rank():
'Find OMPI world rank without calling mpi functions\n :rtype: int\n '
if (os.environ.get('PMI_RANK') is not None):
return int((os.environ.get('PMI_RANK') or 0))
elif (os.environ.get('OMPI_COMM_WORLD_RANK') is not None):
return int((os.environ.get('OMPI_COMM_WORLD_RANK') or 0))
else:
return 0
|
def get_local_rank():
'Find OMPI local rank without calling mpi functions\n :rtype: int\n '
if (os.environ.get('MPI_LOCALRANKID') is not None):
return int((os.environ.get('MPI_LOCALRANKID') or 0))
elif (os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') is not None):
return int((os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') or 0))
else:
return 0
|
def get_master_ip():
if (os.environ.get('AZ_BATCH_MASTER_NODE') is not None):
return os.environ.get('AZ_BATCH_MASTER_NODE').split(':')[0]
elif (os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') is not None):
return os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE')
else:
return '127.0.0.1'
|
class AdversarialLoss(nn.Module):
'\n Adversarial loss\n https://arxiv.org/abs/1711.10337\n '
def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0):
'\n type = nsgan | lsgan | hinge\n '
super(AdversarialLoss, self).__init__()
self.type = type
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
if (type == 'nsgan'):
self.criterion = nn.BCELoss()
elif (type == 'lsgan'):
self.criterion = nn.MSELoss()
elif (type == 'hinge'):
self.criterion = nn.ReLU()
def __call__(self, outputs, is_real, is_disc=None):
if (self.type == 'hinge'):
if is_disc:
if is_real:
outputs = (- outputs)
return self.criterion((1 + outputs)).mean()
else:
return (- outputs).mean()
else:
labels = (self.real_label if is_real else self.fake_label).expand_as(outputs)
loss = self.criterion(outputs, labels)
return loss
|
class SpectralNorm(object):
_version = 1
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if (n_power_iterations <= 0):
raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def reshape_weight_to_matrix(self, weight):
weight_mat = weight
if (self.dim != 0):
weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)])
height = weight_mat.size(0)
return weight_mat.reshape(height, (- 1))
def compute_weight(self, module, do_power_iteration):
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
v = getattr(module, (self.name + '_v'))
weight_mat = self.reshape_weight_to_matrix(weight)
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v)
u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u)
if (self.n_power_iterations > 0):
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight_mat, v))
weight = (weight / sigma)
return weight
def remove(self, module):
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_v'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
def _solve_v_and_rescale(self, weight_mat, u, target_sigma):
v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1)
return v.mul_((target_sigma / torch.dot(u, torch.mv(weight_mat, v))))
@staticmethod
def apply(module, name, n_power_iterations, dim, eps):
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
raise RuntimeError('Cannot register two spectral_norm hooks on the same parameter {}'.format(name))
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
with torch.no_grad():
weight_mat = fn.reshape_weight_to_matrix(weight)
(h, w) = weight_mat.size()
u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps)
v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
setattr(module, fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_buffer((fn.name + '_v'), v)
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormStateDictHook(fn))
module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn))
return fn
|
class SpectralNormLoadStateDictPreHook(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
fn = self.fn
version = local_metadata.get('spectral_norm', {}).get((fn.name + '.version'), None)
if ((version is None) or (version < 1)):
with torch.no_grad():
weight_orig = state_dict[((prefix + fn.name) + '_orig')]
weight_mat = fn.reshape_weight_to_matrix(weight_orig)
u = state_dict[((prefix + fn.name) + '_u')]
|
class SpectralNormStateDictHook(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, module, state_dict, prefix, local_metadata):
if ('spectral_norm' not in local_metadata):
local_metadata['spectral_norm'] = {}
key = (self.fn.name + '.version')
if (key in local_metadata['spectral_norm']):
raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key))
local_metadata['spectral_norm'][key] = self.fn._version
|
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):
'Applies spectral normalization to a parameter in the given module.\n\n .. math::\n \\mathbf{W}_{SN} = \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})},\n \\sigma(\\mathbf{W}) = \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}\n\n Spectral normalization stabilizes the training of discriminators (critics)\n in Generative Adversarial Networks (GANs) by rescaling the weight tensor\n with spectral norm :math:`\\sigma` of the weight matrix calculated using\n power iteration method. If the dimension of the weight tensor is greater\n than 2, it is reshaped to 2D in power iteration method to get spectral\n norm. This is implemented via a hook that calculates spectral norm and\n rescales weight before every :meth:`~Module.forward` call.\n\n See `Spectral Normalization for Generative Adversarial Networks`_ .\n\n .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n n_power_iterations (int, optional): number of power iterations to\n calculate spectral norm\n eps (float, optional): epsilon for numerical stability in\n calculating norms\n dim (int, optional): dimension corresponding to number of outputs,\n the default is ``0``, except for modules that are instances of\n ConvTranspose{1,2,3}d, when it is ``1``\n\n Returns:\n The original module with the spectral norm hook\n\n Example::\n\n >>> m = spectral_norm(nn.Linear(20, 40))\n >>> m\n Linear(in_features=20, out_features=40, bias=True)\n >>> m.weight_u.size()\n torch.Size([40])\n\n '
if (dim is None):
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module
|
def remove_spectral_norm(module, name='weight'):
'Removes the spectral normalization reparameterization from a module.\n\n Args:\n module (Module): containing module\n name (str, optional): name of weight parameter\n\n Example:\n >>> m = spectral_norm(nn.Linear(40, 10))\n >>> remove_spectral_norm(m)\n '
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNorm) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
|
def use_spectral_norm(module, use_sn=False):
if use_sn:
return spectral_norm(module)
return module
|
class Trainer():
def __init__(self, config):
self.config = config
self.epoch = 0
self.iteration = 0
self.train_dataset = Dataset(config['data_loader'], split='train')
self.train_sampler = None
self.train_args = config['trainer']
if config['distributed']:
self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config['world_size'], rank=config['global_rank'])
self.train_loader = DataLoader(self.train_dataset, batch_size=(self.train_args['batch_size'] // config['world_size']), shuffle=(self.train_sampler is None), num_workers=self.train_args['num_workers'], sampler=self.train_sampler)
self.adversarial_loss = AdversarialLoss(type=self.config['losses']['GAN_LOSS'])
self.adversarial_loss = self.adversarial_loss.to(self.config['device'])
self.l1_loss = nn.L1Loss()
net = importlib.import_module(('model.' + config['model']['net']))
self.netG = net.InpaintGenerator()
self.netG = self.netG.to(self.config['device'])
if (not self.config['model']['no_dis']):
self.netD = net.Discriminator(in_channels=3, use_sigmoid=(config['losses']['GAN_LOSS'] != 'hinge'))
self.netD = self.netD.to(self.config['device'])
self.optimG = torch.optim.Adam(self.netG.parameters(), lr=config['trainer']['lr'], betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2']))
if (not self.config['model']['no_dis']):
self.optimD = torch.optim.Adam(self.netD.parameters(), lr=config['trainer']['lr'], betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2']))
self.load()
if config['distributed']:
self.netG = DDP(self.netG, device_ids=[self.config['local_rank']], output_device=self.config['local_rank'], broadcast_buffers=True, find_unused_parameters=True)
if (not self.config['model']['no_dis']):
self.netD = DDP(self.netD, device_ids=[self.config['local_rank']], output_device=self.config['local_rank'], broadcast_buffers=True, find_unused_parameters=False)
self.dis_writer = None
self.gen_writer = None
self.summary = {}
if ((self.config['global_rank'] == 0) or (not config['distributed'])):
self.dis_writer = SummaryWriter(os.path.join(config['save_dir'], 'dis'))
self.gen_writer = SummaryWriter(os.path.join(config['save_dir'], 'gen'))
def get_lr(self):
return self.optimG.param_groups[0]['lr']
def adjust_learning_rate(self):
decay = (0.1 ** (min(self.iteration, self.config['trainer']['niter']) // self.config['trainer']['niter']))
new_lr = (self.config['trainer']['lr'] * decay)
if (new_lr != self.get_lr()):
for param_group in self.optimG.param_groups:
param_group['lr'] = new_lr
if (not self.config['model']['no_dis']):
for param_group in self.optimD.param_groups:
param_group['lr'] = new_lr
def add_summary(self, writer, name, val):
if (name not in self.summary):
self.summary[name] = 0
self.summary[name] += val
if ((writer is not None) and ((self.iteration % 100) == 0)):
writer.add_scalar(name, (self.summary[name] / 100), self.iteration)
self.summary[name] = 0
def load(self):
model_path = self.config['save_dir']
if os.path.isfile(os.path.join(model_path, 'latest.ckpt')):
latest_epoch = open(os.path.join(model_path, 'latest.ckpt'), 'r').read().splitlines()[(- 1)]
else:
ckpts = [os.path.basename(i).split('.pth')[0] for i in glob.glob(os.path.join(model_path, '*.pth'))]
ckpts.sort()
latest_epoch = (ckpts[(- 1)] if (len(ckpts) > 0) else None)
if (latest_epoch is not None):
gen_path = os.path.join(model_path, 'gen_{}.pth'.format(str(latest_epoch).zfill(5)))
dis_path = os.path.join(model_path, 'dis_{}.pth'.format(str(latest_epoch).zfill(5)))
opt_path = os.path.join(model_path, 'opt_{}.pth'.format(str(latest_epoch).zfill(5)))
if (self.config['global_rank'] == 0):
print('Loading model from {}...'.format(gen_path))
data = torch.load(gen_path, map_location=self.config['device'])
self.netG.load_state_dict(data['netG'])
if (not self.config['model']['no_dis']):
data = torch.load(dis_path, map_location=self.config['device'])
self.netD.load_state_dict(data['netD'])
data = torch.load(opt_path, map_location=self.config['device'])
self.optimG.load_state_dict(data['optimG'])
if (not self.config['model']['no_dis']):
self.optimD.load_state_dict(data['optimD'])
self.epoch = data['epoch']
self.iteration = data['iteration']
elif (self.config['global_rank'] == 0):
print('Warnning: There is no trained model found. An initialized model will be used.')
def save(self, it):
if (self.config['global_rank'] == 0):
gen_path = os.path.join(self.config['save_dir'], 'gen_{}.pth'.format(str(it).zfill(5)))
dis_path = os.path.join(self.config['save_dir'], 'dis_{}.pth'.format(str(it).zfill(5)))
opt_path = os.path.join(self.config['save_dir'], 'opt_{}.pth'.format(str(it).zfill(5)))
print('\nsaving model to {} ...'.format(gen_path))
if (isinstance(self.netG, torch.nn.DataParallel) or isinstance(self.netG, DDP)):
netG = self.netG.module
if (not self.config['model']['no_dis']):
netD = self.netD.module
else:
netG = self.netG
if (not self.config['model']['no_dis']):
netD = self.netD
torch.save({'netG': netG.state_dict()}, gen_path)
if (not self.config['model']['no_dis']):
torch.save({'netD': netD.state_dict()}, dis_path)
torch.save({'epoch': self.epoch, 'iteration': self.iteration, 'optimG': self.optimG.state_dict(), 'optimD': self.optimD.state_dict()}, opt_path)
else:
torch.save({'epoch': self.epoch, 'iteration': self.iteration, 'optimG': self.optimG.state_dict()}, opt_path)
os.system('echo {} > {}'.format(str(it).zfill(5), os.path.join(self.config['save_dir'], 'latest.ckpt')))
def train(self):
pbar = range(int(self.train_args['iterations']))
if (self.config['global_rank'] == 0):
pbar = tqdm(pbar, initial=self.iteration, dynamic_ncols=True, smoothing=0.01)
os.makedirs('logs', exist_ok=True)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='logs/{}.log'.format(self.config['save_dir'].split('/')[(- 1)]), filemode='w')
while True:
self.epoch += 1
if self.config['distributed']:
self.train_sampler.set_epoch(self.epoch)
self._train_epoch(pbar)
if (self.iteration > self.train_args['iterations']):
break
print('\nEnd training....')
def _train_epoch(self, pbar):
device = self.config['device']
for (frames, masks) in self.train_loader:
self.adjust_learning_rate()
self.iteration += 1
(frames, masks) = (frames.to(device), masks.to(device))
(b, t, c, h, w) = frames.size()
masked_frame = (frames * (1 - masks).float())
pred_img = self.netG(masked_frame)
frames = frames.view((b * t), c, h, w)
masks = masks.view((b * t), 1, h, w)
comp_img = ((frames * (1.0 - masks)) + (masks * pred_img))
gen_loss = 0
dis_loss = 0
if (not self.config['model']['no_dis']):
real_vid_feat = self.netD(frames)
fake_vid_feat = self.netD(comp_img.detach())
dis_real_loss = self.adversarial_loss(real_vid_feat, True, True)
dis_fake_loss = self.adversarial_loss(fake_vid_feat, False, True)
dis_loss += ((dis_real_loss + dis_fake_loss) / 2)
self.add_summary(self.dis_writer, 'loss/dis_vid_fake', dis_fake_loss.item())
self.add_summary(self.dis_writer, 'loss/dis_vid_real', dis_real_loss.item())
self.optimD.zero_grad()
dis_loss.backward()
self.optimD.step()
gen_vid_feat = self.netD(comp_img)
gan_loss = self.adversarial_loss(gen_vid_feat, True, False)
gan_loss = (gan_loss * self.config['losses']['adversarial_weight'])
gen_loss += gan_loss
self.add_summary(self.gen_writer, 'loss/gan_loss', gan_loss.item())
hole_loss = self.l1_loss((pred_img * masks), (frames * masks))
hole_loss = ((hole_loss / torch.mean(masks)) * self.config['losses']['hole_weight'])
gen_loss += hole_loss
self.add_summary(self.gen_writer, 'loss/hole_loss', hole_loss.item())
valid_loss = self.l1_loss((pred_img * (1 - masks)), (frames * (1 - masks)))
valid_loss = ((valid_loss / torch.mean((1 - masks))) * self.config['losses']['valid_weight'])
gen_loss += valid_loss
self.add_summary(self.gen_writer, 'loss/valid_loss', valid_loss.item())
self.optimG.zero_grad()
gen_loss.backward()
self.optimG.step()
if (self.config['global_rank'] == 0):
pbar.update(1)
if (not self.config['model']['no_dis']):
pbar.set_description(f'd: {dis_loss.item():.3f}; g: {gan_loss.item():.3f};hole: {hole_loss.item():.3f}; valid: {valid_loss.item():.3f}')
else:
pbar.set_description(f'hole: {hole_loss.item():.3f}; valid: {valid_loss.item():.3f}')
if ((self.iteration % self.train_args['log_freq']) == 0):
if (not self.config['model']['no_dis']):
logging.info('[Iter {}] d: {:.4f}; g: {:.4f}; hole: {:.4f}; valid: {:.4f}'.format(self.iteration, dis_loss.item(), gan_loss.item(), hole_loss.item(), valid_loss.item()))
else:
logging.info('[Iter {}] hole: {:.4f}; valid: {:.4f}'.format(self.iteration, hole_loss.item(), valid_loss.item()))
if ((self.iteration % self.train_args['save_freq']) == 0):
self.save(int((self.iteration // self.train_args['save_freq'])))
if (self.iteration > self.train_args['iterations']):
break
|
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def print_network(self):
if isinstance(self, list):
self = self[0]
num_params = 0
for param in self.parameters():
num_params += param.numel()
print(('Network [%s] was created. Total number of parameters: %.1f million. To see the architecture, do print(network).' % (type(self).__name__, (num_params / 1000000))))
def init_weights(self, init_type='normal', gain=0.02):
"\n initialize network's weights\n init_type: normal | xavier | kaiming | orthogonal\n https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39\n "
def init_func(m):
classname = m.__class__.__name__
if (classname.find('InstanceNorm2d') != (- 1)):
if (hasattr(m, 'weight') and (m.weight is not None)):
nn.init.constant_(m.weight.data, 1.0)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, 0.0)
elif (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier'):
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'xavier_uniform'):
nn.init.xavier_uniform_(m.weight.data, gain=1.0)
elif (init_type == 'kaiming'):
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
nn.init.orthogonal_(m.weight.data, gain=gain)
elif (init_type == 'none'):
m.reset_parameters()
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, 0.0)
self.apply(init_func)
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights(init_type, gain)
|
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.group = [1, 2, 4, 8, 1]
self.layers = nn.ModuleList([nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1, groups=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(640, 512, kernel_size=3, stride=1, padding=1, groups=2), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(768, 384, kernel_size=3, stride=1, padding=1, groups=4), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(640, 256, kernel_size=3, stride=1, padding=1, groups=8), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1, groups=1), nn.LeakyReLU(0.2, inplace=True)])
def forward(self, x):
(bt, c, h, w) = x.size()
(h, w) = ((h // 4), (w // 4))
out = x
for (i, layer) in enumerate(self.layers):
if (i == 8):
x0 = out
if ((i > 8) and ((i % 2) == 0)):
g = self.group[((i - 8) // 2)]
x = x0.view(bt, g, (- 1), h, w)
o = out.view(bt, g, (- 1), h, w)
out = torch.cat([x, o], 2).view(bt, (- 1), h, w)
out = layer(out)
return out
|
class InpaintGenerator(BaseNetwork):
def __init__(self, init_weights=True):
super(InpaintGenerator, self).__init__()
channel = 256
hidden = 512
stack_num = 8
num_head = 4
kernel_size = (7, 7)
padding = (3, 3)
stride = (3, 3)
output_size = (60, 108)
blocks = []
dropout = 0.0
t2t_params = {'kernel_size': kernel_size, 'stride': stride, 'padding': padding, 'output_size': output_size}
n_vecs = 1
for (i, d) in enumerate(kernel_size):
n_vecs *= int((((((output_size[i] + (2 * padding[i])) - (d - 1)) - 1) / stride[i]) + 1))
for _ in range(stack_num):
blocks.append(TransformerBlock(hidden=hidden, num_head=num_head, dropout=dropout, n_vecs=n_vecs, t2t_params=t2t_params))
self.transformer = nn.Sequential(*blocks)
self.ss = SoftSplit((channel // 2), hidden, kernel_size, stride, padding, dropout=dropout)
self.add_pos_emb = AddPosEmb(n_vecs, hidden)
self.sc = SoftComp((channel // 2), hidden, output_size, kernel_size, stride, padding)
self.encoder = Encoder()
self.decoder = nn.Sequential(deconv((channel // 2), 128, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), deconv(64, 64, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1))
if init_weights:
self.init_weights()
def forward(self, masked_frames):
(b, t, c, h, w) = masked_frames.size()
time0 = time.time()
enc_feat = self.encoder(masked_frames.view((b * t), c, h, w))
(_, c, h, w) = enc_feat.size()
trans_feat = self.ss(enc_feat, b)
trans_feat = self.add_pos_emb(trans_feat)
trans_feat = self.transformer(trans_feat)
trans_feat = self.sc(trans_feat, t)
enc_feat = (enc_feat + trans_feat)
output = self.decoder(enc_feat)
output = torch.tanh(output)
return output
|
class deconv(nn.Module):
def __init__(self, input_channel, output_channel, kernel_size=3, padding=0):
super().__init__()
self.conv = nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=1, padding=padding)
def forward(self, x):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return self.conv(x)
|
class Attention(nn.Module):
"\n Compute 'Scaled Dot Product Attention\n "
def __init__(self, p=0.1):
super(Attention, self).__init__()
self.dropout = nn.Dropout(p=p)
def forward(self, query, key, value, m=None):
scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(query.size((- 1))))
if (m is not None):
scores.masked_fill_(m, (- 1000000000.0))
p_attn = F.softmax(scores, dim=(- 1))
p_attn = self.dropout(p_attn)
p_val = torch.matmul(p_attn, value)
return (p_val, p_attn)
|
class AddPosEmb(nn.Module):
def __init__(self, n, c):
super(AddPosEmb, self).__init__()
self.pos_emb = nn.Parameter(torch.zeros(1, 1, n, c).float().normal_(mean=0, std=0.02), requires_grad=True)
self.num_vecs = n
def forward(self, x):
(b, n, c) = x.size()
x = x.view(b, (- 1), self.num_vecs, c)
x = (x + self.pos_emb)
x = x.view(b, n, c)
return x
|
class SoftSplit(nn.Module):
def __init__(self, channel, hidden, kernel_size, stride, padding, dropout=0.1):
super(SoftSplit, self).__init__()
self.kernel_size = kernel_size
self.t2t = nn.Unfold(kernel_size=kernel_size, stride=stride, padding=padding)
c_in = (reduce((lambda x, y: (x * y)), kernel_size) * channel)
self.embedding = nn.Linear(c_in, hidden)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, b):
feat = self.t2t(x)
feat = feat.permute(0, 2, 1)
feat = self.embedding(feat)
feat = feat.view(b, (- 1), feat.size(2))
feat = self.dropout(feat)
return feat
|
class SoftComp(nn.Module):
def __init__(self, channel, hidden, output_size, kernel_size, stride, padding):
super(SoftComp, self).__init__()
self.relu = nn.LeakyReLU(0.2, inplace=True)
c_out = (reduce((lambda x, y: (x * y)), kernel_size) * channel)
self.embedding = nn.Linear(hidden, c_out)
self.t2t = torch.nn.Fold(output_size=output_size, kernel_size=kernel_size, stride=stride, padding=padding)
(h, w) = output_size
self.bias = nn.Parameter(torch.zeros((channel, h, w), dtype=torch.float32), requires_grad=True)
def forward(self, x, t):
feat = self.embedding(x)
(b, n, c) = feat.size()
feat = feat.view((b * t), (- 1), c).permute(0, 2, 1)
feat = (self.t2t(feat) + self.bias[None])
return feat
|
class MultiHeadedAttention(nn.Module):
'\n Take in model size and number of heads.\n '
def __init__(self, d_model, head, p=0.1):
super().__init__()
self.query_embedding = nn.Linear(d_model, d_model)
self.value_embedding = nn.Linear(d_model, d_model)
self.key_embedding = nn.Linear(d_model, d_model)
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention(p=p)
self.head = head
def forward(self, x):
(b, n, c) = x.size()
c_h = (c // self.head)
key = self.key_embedding(x)
key = key.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
query = self.query_embedding(x)
query = query.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
value = self.value_embedding(x)
value = value.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
(att, _) = self.attention(query, key, value)
att = att.permute(0, 2, 1, 3).contiguous().view(b, n, c)
output = self.output_linear(att)
return output
|
class FeedForward(nn.Module):
def __init__(self, d_model, p=0.1):
super(FeedForward, self).__init__()
self.conv = nn.Sequential(nn.Linear(d_model, (d_model * 4)), nn.ReLU(inplace=True), nn.Dropout(p=p), nn.Linear((d_model * 4), d_model), nn.Dropout(p=p))
def forward(self, x):
x = self.conv(x)
return x
|
class FusionFeedForward(nn.Module):
def __init__(self, d_model, p=0.1, n_vecs=None, t2t_params=None):
super(FusionFeedForward, self).__init__()
hd = 1960
self.conv1 = nn.Sequential(nn.Linear(d_model, hd))
self.conv2 = nn.Sequential(nn.ReLU(inplace=True), nn.Dropout(p=p), nn.Linear(hd, d_model), nn.Dropout(p=p))
assert ((t2t_params is not None) and (n_vecs is not None))
tp = t2t_params.copy()
self.fold = nn.Fold(**tp)
del tp['output_size']
self.unfold = nn.Unfold(**tp)
self.n_vecs = n_vecs
def forward(self, x):
x = self.conv1(x)
(b, n, c) = x.size()
normalizer = x.new_ones(b, n, 49).view((- 1), self.n_vecs, 49).permute(0, 2, 1)
x = self.unfold((self.fold(x.view((- 1), self.n_vecs, c).permute(0, 2, 1)) / self.fold(normalizer))).permute(0, 2, 1).contiguous().view(b, n, c)
x = self.conv2(x)
return x
|
class TransformerBlock(nn.Module):
'\n Transformer = MultiHead_Attention + Feed_Forward with sublayer connection\n '
def __init__(self, hidden=128, num_head=4, dropout=0.1, n_vecs=None, t2t_params=None):
super().__init__()
self.attention = MultiHeadedAttention(d_model=hidden, head=num_head, p=dropout)
self.ffn = FusionFeedForward(hidden, p=dropout, n_vecs=n_vecs, t2t_params=t2t_params)
self.norm1 = nn.LayerNorm(hidden)
self.norm2 = nn.LayerNorm(hidden)
self.dropout = nn.Dropout(p=dropout)
def forward(self, input):
x = self.norm1(input)
x = (input + self.dropout(self.attention(x)))
y = self.norm2(x)
x = (x + self.ffn(y))
return x
|
class Discriminator(BaseNetwork):
def __init__(self, in_channels=3, use_sigmoid=False, use_spectral_norm=True, init_weights=True):
super(Discriminator, self).__init__()
self.use_sigmoid = use_sigmoid
nf = 32
self.conv = nn.Sequential(spectral_norm(nn.Conv3d(in_channels=in_channels, out_channels=(nf * 1), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 1), (nf * 2), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 2), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2)))
if init_weights:
self.init_weights()
def forward(self, xs):
xs_t = torch.transpose(xs, 0, 1)
xs_t = xs_t.unsqueeze(0)
feat = self.conv(xs_t)
if self.use_sigmoid:
feat = torch.sigmoid(feat)
out = torch.transpose(feat, 1, 2)
return out
|
def spectral_norm(module, mode=True):
if mode:
return _spectral_norm(module)
return module
|
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if ((s % self.stride[dim]) == 0):
return max((self.kernel_size[dim] - self.stride[dim]), 0)
else:
return max((self.kernel_size[dim] - (s % self.stride[dim])), 0)
def forward(self, x):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = (pad_t // 2)
pad_t_b = (pad_t - pad_t_f)
pad_h_f = (pad_h // 2)
pad_h_b = (pad_h - pad_h_f)
pad_w_f = (pad_w // 2)
pad_w_b = (pad_w - pad_w_f)
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
|
class Unit3D(nn.Module):
def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'):
'Initializes Unit3D module.'
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
def compute_pad(self, dim, s):
if ((s % self._stride[dim]) == 0):
return max((self._kernel_shape[dim] - self._stride[dim]), 0)
else:
return max((self._kernel_shape[dim] - (s % self._stride[dim])), 0)
def forward(self, x):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = (pad_t // 2)
pad_t_b = (pad_t - pad_t_f)
pad_h_f = (pad_h // 2)
pad_h_b = (pad_h - pad_h_f)
pad_w_f = (pad_w // 2)
pad_w_b = (pad_w - pad_w_f)
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if (self._activation_fn is not None):
x = self._activation_fn(x)
return x
|
class InceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name):
super(InceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0, name=(name + '/Branch_0/Conv3d_0a_1x1'))
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0, name=(name + '/Branch_1/Conv3d_0a_1x1'))
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[3, 3, 3], name=(name + '/Branch_1/Conv3d_0b_3x3'))
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0, name=(name + '/Branch_2/Conv3d_0a_1x1'))
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[3, 3, 3], name=(name + '/Branch_2/Conv3d_0b_3x3'))
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0, name=(name + '/Branch_3/Conv3d_0b_1x1'))
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0, b1, b2, b3], dim=1)
|
class InceptionI3d(nn.Module):
'Inception-v1 I3D architecture.\n The model is introduced in:\n Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset\n Joao Carreira, Andrew Zisserman\n https://arxiv.org/pdf/1705.07750v1.pdf.\n See also the Inception architecture, introduced in:\n Going deeper with convolutions\n Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,\n Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.\n http://arxiv.org/pdf/1409.4842v1.pdf.\n '
VALID_ENDPOINTS = ('Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool3d_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool3d_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Logits', 'Predictions')
def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"Initializes I3D model instance.\n Args:\n num_classes: The number of outputs in the logit layer (default 400, which\n matches the Kinetics dataset).\n spatial_squeeze: Whether to squeeze the spatial dimensions for the logits\n before returning (default True).\n final_endpoint: The model contains many possible endpoints.\n `final_endpoint` specifies the last endpoint for the model to be built\n up to. In addition to the output at `final_endpoint`, all the outputs\n at endpoints up to `final_endpoint` will also be returned, in a\n dictionary. `final_endpoint` must be one of\n InceptionI3d.VALID_ENDPOINTS (default 'Logits').\n name: A string (optional). The name of this module.\n Raises:\n ValueError: if `final_endpoint` is not recognized.\n "
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule((((128 + 192) + 96) + 64), [192, 96, 208, 16, 48, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule((((192 + 208) + 48) + 64), [160, 112, 224, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule((((160 + 224) + 64) + 64), [128, 128, 256, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule((((128 + 256) + 64) + 64), [112, 144, 288, 32, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule((((112 + 288) + 64) + 64), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [384, 192, 384, 48, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
self.build()
def replace_logits(self, num_classes):
self._num_classes = num_classes
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
for end_point in self.VALID_ENDPOINTS:
if (end_point in self.end_points):
x = self._modules[end_point](x)
x = self.logits(self.dropout(self.avg_pool(x)))
if self._spatial_squeeze:
logits = x.squeeze(3).squeeze(3)
return logits
def extract_features(self, x, target_endpoint='Logits'):
for end_point in self.VALID_ENDPOINTS:
if (end_point in self.end_points):
x = self._modules[end_point](x)
if (end_point == target_endpoint):
break
if (target_endpoint == 'Logits'):
return x.mean(4).mean(3).mean(2)
else:
return x
|
def main_worker(rank, config):
if ('local_rank' not in config):
config['local_rank'] = config['global_rank'] = rank
if config['distributed']:
torch.cuda.set_device(int(config['local_rank']))
torch.distributed.init_process_group(backend='nccl', init_method=config['init_method'], world_size=config['world_size'], rank=config['global_rank'], group_name='mtorch')
print('using GPU {}-{} for training'.format(int(config['global_rank']), int(config['local_rank'])))
config['save_dir'] = os.path.join(config['save_dir'], '{}_{}'.format(config['model']['net'], os.path.basename(args.config).split('.')[0]))
if torch.cuda.is_available():
config['device'] = torch.device('cuda:{}'.format(config['local_rank']))
else:
config['device'] = 'cpu'
if ((not config['distributed']) or (config['global_rank'] == 0)):
os.makedirs(config['save_dir'], exist_ok=True)
config_path = os.path.join(config['save_dir'], args.config.split('/')[(- 1)])
if (not os.path.isfile(config_path)):
copyfile(args.config, config_path)
print('[**] create folder {}'.format(config['save_dir']))
trainer = Trainer(config)
trainer.train()
|
def train(model, dataset_paths, save_every, steps, save_path, bsize):
data_dicts = []
for d_path in dataset_paths:
data_dicts.extend(pkl.load(open(d_path, 'rb')))
print(('%d datapoints' % len(data_dicts)))
random.shuffle(data_dicts)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in model.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=1e-05)
scheduler = get_linear_schedule_with_warmup(optimizer, 100, steps)
loss_func = torch.nn.NLLLoss()
for i in trange(steps):
ds = [data_dicts[(j % len(data_dicts))] for j in range((i * bsize), ((i + 1) * bsize))]
logits = model.get_logits_from_qc_(ds)
gold = torch.tensor([d['a'] for d in ds]).to(device)
loss = loss_func(logits, gold)
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (((i % save_every) == 0) and (i != 0)):
model.save((save_path + str(i)))
|
def normalize(t):
return re.sub("'(.+)'", '\\1', t.lower())
|
def qc2input(d):
return normalize(((d['q'] + '\\n') + d['c']))
|
class BERTZeroShotClfQA(torch.nn.Module):
def __init__(self, model_name, max_seq_length=128):
super(BERTZeroShotClfQA, self).__init__()
if (max_seq_length > 128):
raise Exception('We only trained our model for context length 128. Feel free to remove this if you are training your own model.')
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
self.model.to(device)
self.max_seq_length = max_seq_length
self.lsm = torch.nn.LogSoftmax(dim=(- 1))
def create_batch(self, q_dicts):
input_strings = [qc2input(d) for d in q_dicts]
input_dict = self.tokenizer(input_strings, padding=True, return_tensors='pt', truncation=True, max_length=self.max_seq_length).to(device)
return input_dict
def forward(self, input_dict):
output = self.model(**input_dict)
return self.lsm(output.logits)
def get_logits_from_qc_(self, datapoint_dicts):
input_dict = self.create_batch(datapoint_dicts)
return self.forward(input_dict)
def get_logits_from_qc(self, q_dicts, bsize=32, progress_bar=True):
self.model.eval()
result_logits = []
iter_count = (((len(q_dicts) - 1) // bsize) + 1)
ranger = (range(iter_count) if (not progress_bar) else tqdm.trange(iter_count))
for i in ranger:
l = self.get_logits_from_qc_(q_dicts[(i * bsize):((i + 1) * bsize)]).detach().cpu().numpy().tolist()
result_logits.extend(l)
return np.array(result_logits)
|
def get_id_from_path_name(p: str):
bname = os.path.basename(p)
return int(bname.split('_')[0].replace('group', ''))
|
def get_name_from_path_name(p):
bname = os.path.basename(p)
return bname.split('_')[1].split('.')[0]
|
def split(ids, train, val, test):
assert (((train + val) + test) == 1)
IDs = np.unique(ids)
num_ids = len(IDs)
test_split = math.ceil((test * num_ids))
val_split = math.ceil((val * num_ids))
train_split = ((num_ids - val_split) - test_split)
train = np.where(np.isin(ids, IDs[:train_split]))[0]
val = np.where(np.isin(ids, IDs[train_split:(train_split + val_split)]))[0]
test = np.where(np.isin(ids, IDs[(train_split + val_split):]))[0]
return (train, val, test)
|
class EEGViT_raw(nn.Module):
def __init__(self, ViTLayers):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=(1, 36), stride=(1, 36), padding=(0, 2), bias=False)
self.batchnorm1 = nn.BatchNorm2d(256, False)
config = transformers.ViTConfig(hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_channels=256, image_size=(129, 14), patch_size=(8, 1))
model = ViTModel(config)
model.embeddings.patch_embeddings.projection = torch.nn.Conv2d(256, 768, kernel_size=(8, 1), stride=(8, 1), padding=(0, 0), groups=256)
model.pooler.activation = torch.nn.Sequential(torch.nn.Dropout(p=0.1), torch.nn.Linear(768, 2, bias=True))
self.ViT = model
def forward(self, x):
x = self.conv1(x)
x = self.batchnorm1(x)
x = self.ViT(x).pooler_output
return x
|
class EEGViT_pretrained(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=(1, 36), stride=(1, 36), padding=(0, 2), bias=False)
self.batchnorm1 = nn.BatchNorm2d(256, False)
model_name = 'google/vit-base-patch16-224'
config = transformers.ViTConfig.from_pretrained(model_name)
config.update({'num_channels': 256})
config.update({'image_size': (129, 14)})
config.update({'patch_size': (8, 1)})
model = transformers.ViTForImageClassification.from_pretrained(model_name, config=config, ignore_mismatched_sizes=True)
model.vit.embeddings.patch_embeddings.projection = torch.nn.Conv2d(256, 768, kernel_size=(8, 1), stride=(8, 1), padding=(0, 0), groups=256)
model.classifier = torch.nn.Sequential(torch.nn.Linear(768, 1000, bias=True), torch.nn.Dropout(p=0.1), torch.nn.Linear(1000, 2, bias=True))
self.ViT = model
def forward(self, x):
x = self.conv1(x)
x = self.batchnorm1(x)
x = self.ViT.forward(x).logits
return x
|
class ViTBase(nn.Module):
def __init__(self):
super().__init__()
config = transformers.ViTConfig(hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_channels=1, image_size=(129, 500), patch_size=(8, 35))
model = ViTModel(config)
model.embeddings.patch_embeddings.projection = torch.nn.Conv2d(1, 768, kernel_size=(8, 36), stride=(8, 36), padding=(0, 2))
model.pooler.activation = torch.nn.Sequential(torch.nn.Dropout(p=0.1), torch.nn.Linear(768, 2, bias=True))
def forward(self, x):
x = self.model(x).pooler_output
return x
|
class ViTBase_pretrained(nn.Module):
def __init__(self):
super().__init__()
model_name = 'google/vit-base-patch16-224'
config = transformers.ViTConfig.from_pretrained(model_name)
config.update({'num_channels': 1})
config.update({'image_size': (129, 500)})
config.update({'patch_size': (8, 35)})
model = transformers.ViTForImageClassification.from_pretrained(model_name, config=config, ignore_mismatched_sizes=True)
model.vit.embeddings.patch_embeddings.projection = nn.Sequential(torch.nn.Conv2d(1, 768, kernel_size=(8, 36), stride=(8, 36), padding=(0, 2)), nn.BatchNorm2d(768))
model.classifier = torch.nn.Sequential(torch.nn.Linear(768, 1000, bias=True), torch.nn.Dropout(p=0.1), torch.nn.Linear(1000, 2, bias=True))
self.ViT = model
def forward(self, x):
x = self.ViT(x).logits
return x
|
def scot(X, y, k, e, rho=1, mode='connectivity', metric='correlation', XontoY=True, returnCoupling=False, balanced=True):
'\n\tGiven two datasets (X and y) and \n\tthe hyperparameters (k: number of neighbors to be used in kNN graph construction; and e: eplison value in entropic regularization),\n\treturns the resulting datasets after transport\n\tFor transport in the opposite direction, set XontoY to False\n\t'
Cx = ut.get_graph_distance_matrix(X, k, mode=mode, metric=metric)
Cy = ut.get_graph_distance_matrix(y, k, mode=mode, metric=metric)
X_sampleNo = Cx.shape[0]
y_sampleNo = Cy.shape[0]
p = ot.unif(X_sampleNo)
q = ot.unif(y_sampleNo)
if balanced:
(couplingM, log) = ot.gromov.entropic_gromov_wasserstein(Cx, Cy, p, q, 'square_loss', epsilon=e, log=True, verbose=True)
else:
solver = TLBSinkhornSolver(nits=1000, nits_sinkhorn=2500, gradient=False, tol=0.001, tol_sinkhorn=0.001)
(couplingM, _) = solver.tlb_sinkhorn(torch.Tensor(p).cuda(), torch.Tensor(Cx).cuda(), torch.Tensor(q).cuda(), torch.Tensor(Cy).cuda(), rho=((rho * 0.5) * (Cx.mean() + Cy.mean())), eps=e, init=None)
couplingM = couplingM.cpu().numpy()
log = None
converged = True
if (np.isnan(couplingM).any() or np.any((~ couplingM.any(axis=1))) or np.any((~ couplingM.any(axis=0)))):
print('Did not converge. Try increasing the epsilon value. ')
converged = False
if (returnCoupling == True):
return (couplingM, log)
else:
if (converged == False):
return (None, None)
if (XontoY == True):
X_transported = ut.transport_data(X, y, couplingM, transposeCoupling=False)
return (X_transported, y)
else:
y_transported = ut.transport_data(X, y, couplingM, transposeCoupling=True)
return (X, y_transported)
|
def search_scot(X, y, ks, es, plot_values=False):
X_sampleNo = X.shape[0]
y_sampleNo = y.shape[0]
p = ot.unif(X_sampleNo)
q = ot.unif(y_sampleNo)
k_plot = []
e_plot = []
g_plot = []
total = (len(es) * len(ks))
counter = 0
for k in ks:
Cx = ut.get_graph_distance_matrix(X, k, mode='connectivity', metric='correlation')
Cy = ut.get_graph_distance_matrix(y, k, mode='connectivity', metric='correlation')
for e in es:
counter += 1
if ((counter % 10) == 0):
print(((str(counter) + '/') + str(total)))
(gw, log) = ot.gromov.entropic_gromov_wasserstein(Cx, Cy, p, q, 'square_loss', epsilon=e, log=True, verbose=False, max_iter=200)
if (np.isnan(gw).any() or np.any((~ gw.any(axis=1))) or np.any((~ gw.any(axis=0))) or (sum(sum(gw)) < 0.95)):
print('Did not converge')
else:
g_plot.append(log['gw_dist'])
k_plot.append(k)
e_plot.append(e)
gmin = np.amin(g_plot)
gminI = np.argmin(g_plot)
e_best = e_plot[gminI]
k_best = k_plot[gminI]
print('Best result with GW distance is when e and k are:', e_best, k_best, ' with lowest GW dist:', gmin)
if plot_values:
return (g_plot, k_plot, e_plot)
else:
return (k_best, e_best)
|
def unsupervised_scot(X, y, XontoY=True):
n = min(X.shape[0], y.shape[0])
k_best = min((n // 5), 50)
es = np.logspace((- 2), (- 3), 6)
(g1, k1, e1) = search_scot(X, y, [k_best], es, plot_values=True)
gmin = np.min(g1)
gminI = np.argmin(g1)
e_best = e1[gminI]
if (n > 250):
ks = np.linspace(20, 100, 4)
else:
ks = np.linspace((X.shape[0] // 20), (X.shape[0] // 6), 4)
ks = ks.astype(int)
(g2, k2, e2) = search_scot(X, y, ks, [e_best], plot_values=True)
gminI = np.argmin(g2)
if (g2[gminI] < gmin):
gmin = g2[gminI]
k_best = k2[gminI]
scale = np.log10(e_best)
eps_refined = np.logspace((scale + 0.25), (scale - 0.25), 5)
ks_refined = np.linspace(max(5, (k_best - 5)), min((X.shape[0] // 2), (k_best + 5)), 2)
ks_refined = ks_refined.astype(int)
(g3, k3, e3) = search_scot(X, y, ks_refined, eps_refined, plot_values=True)
gminI = np.argmin(g3)
if (g3[gminI] < gmin):
gmin = g3[gminI]
k_best = k3[gminI]
e_best = e3[gminI]
print('Lowest GW distance is ', gmin, ' for epsilon = ', e_best, ' and k = ', k_best)
(X_t, y_t) = scot(X, y, k_best, e_best, XontoY=XontoY)
return (X_t, y_t, k_best, e_best)
|
def unit_normalize(data, norm='l2', bySample=True):
'\n\tDefault norm used is l2-norm. Other options: "l1", and "max"\n\tIf bySample==True, then we independently normalize each sample. If bySample==False, then we independently normalize each feature\n\t'
assert (norm in ['l1', 'l2', 'max']), "Norm argument has to be either one of 'max', 'l1', or 'l2'."
if (bySample == True):
axis = 1
else:
axis = 0
return normalize(data, norm=norm, axis=axis)
|
def zscore_standardize(data):
scaler = StandardScaler()
scaledData = scaler.fit_transform(data)
return scaledData
|
def get_spatial_distance_matrix(data, metric='eucledian'):
Cdata = sp.spatial.distance.cdist(data, data, metric=metric)
return (Cdata / Cdata.max())
|
def get_graph_distance_matrix(data, num_neighbors, mode='distance', metric='minkowski'):
'\n\tThe default distance metric used with sklearn kneighbors_graph is ‘euclidean’ (‘minkowski’ metric with the p param equal to 2.). \n\tThat\'s why metric is set to "minkowski". If set to something else, it\'s possible we might need to input other params.\n\tI have not played with it to see if it makes any difference or if another metric makes more sense. \n\t'
assert (mode in ['connectivity', 'distance']), "Norm argument has to be either one of 'connectivity', or 'distance'. "
if (mode == 'connectivity'):
include_self = True
else:
include_self = False
graph_data = kneighbors_graph(data, num_neighbors, mode=mode, metric=metric, include_self=include_self)
shortestPath_data = dijkstra(csgraph=csr_matrix(graph_data), directed=False, return_predecessors=False)
shortestPath_max = np.nanmax(shortestPath_data[(shortestPath_data != np.inf)])
shortestPath_data[(shortestPath_data > shortestPath_max)] = shortestPath_max
shortestPath_data = (shortestPath_data / shortestPath_data.max())
return shortestPath_data
|
def transport_data(source, target, couplingMatrix, transposeCoupling=False):
'\n\tGiven: data in the target space, data in the source space, a coupling matrix learned via Gromow-Wasserstein OT\n\tReturns: \n\n\ttransposeCoupling would need to be True only when the coupling matrix is of the form \n\t'
if (transposeCoupling == False):
P = (couplingMatrix.T / couplingMatrix.sum(1)).T
transported_data = np.matmul(P, target)
else:
P = (couplingMatrix / couplingMatrix.sum(0)).T
transported_data = np.matmul(P, source)
return transported_data
|
class BaseFuzzer():
def __init__(self, elements: List, p: float, max_l0: float=float('inf')):
self.elements = [e for e in elements if (e is not None)]
self.p = (p if (len(self.elements) != 0) else 1)
self.max_l0 = max_l0
self.rand_elements = []
def one_sample(self):
raise NotImplementedError
def n_examples(self, num_samples: int) -> List:
return [self.one_sample() for _ in range(num_samples)]
|
class BoolFuzzer(BaseFuzzer):
def __init__(self, elements, p):
super(BoolFuzzer, self).__init__(elements, p)
def one_sample(self):
if ((random.random() < self.p) or (len(self.elements) == 0)):
return (random.random() < 0.5)
else:
return random.choice(self.elements)
|
class BitFuzzer(BaseFuzzer):
def __init__(self, elements, p):
super(BitFuzzer, self).__init__(elements, p)
def one_sample(self):
if ((random.random() < self.p) or (len(self.elements) == 0)):
return (1 if (random.random() < 0.5) else 0)
else:
return random.choice(self.elements)
|
def random_date(start, end):
delta = (end - start)
int_delta = ((((delta.days * 24) * 60) * 60) + delta.seconds)
random_second = random.randrange(int_delta)
return (start + timedelta(seconds=random_second))
|
class DateFuzzer(BaseFuzzer):
def __init__(self, elements, p=0.5, max_l0=float('inf')):
super(DateFuzzer, self).__init__(elements, p, max_l0)
self.template = '%Y-%m-%d %H:%M:%S'
template_found = False
self.orig_type = str
element_dates = []
for element in elements:
if (type(element) == int):
self.orig_type = int
parse_result = date_parser(element)
if ((parse_result['value'] is not None) and (not template_found)):
self.template = parse_result['template']
template_found = True
if (parse_result['value'] is not None):
element_dates.append(parse_result['value'])
self.element_dates = element_dates
if (len(self.element_dates) != 0):
(self.min_date, self.max_date) = (min(self.element_dates), max(self.element_dates))
else:
(self.min_date, self.max_date) = (START, END)
if (self.min_date == self.max_date):
(self.min_date, self.max_date) = (START, END)
def one_sample(self):
if ((random.random() < self.p) and (len(self.elements) != 0)):
return random.choice(self.elements)
if (len(self.rand_elements) >= self.max_l0):
return random.choice(self.rand_elements)
(start, end) = ((START, END) if ((random.random() < self.p) or (len(self.element_dates) <= 1)) else (self.min_date, self.max_date))
r = random_date(start, end)
s = r.strftime(self.template)
if (self.template == '%d-%m-%Y'):
for (num, month) in num2month.items():
s = s.replace((('-' + num) + '-'), (('-' + month) + '-'))
if (self.orig_type == str):
start_w0 = False
for e in self.elements:
if (('-0' in e) or (e[0] == '0')):
start_w0 = True
break
if (not start_w0):
s = s.replace('-0', '-')
if (s[0] == '0'):
s = s[1:]
result = self.orig_type(s)
self.rand_elements.append(result)
return result
|
def random_choices(l: List[E], k: int) -> List[E]:
if (len(l) == 0):
return [None for _ in range(k)]
idxes = [random.randint(0, (len(l) - 1)) for _ in range(k)]
return [l[idx] for idx in idxes]
|
def get_fuzzer_from_type_str(dtype_str: str, elements: List, p=0.5, max_l0=float('inf')) -> BaseFuzzer:
elements = [e for e in elements if (random.random() > ELEMENT_DROPOUT)]
dtype_str = dtype_str.lower()
if (dtype_str == 'time'):
return TimeFuzzer(elements, p=p, max_l0=max_l0)
if ((dtype_str == '') or (dtype_str == 'blob')):
return StringFuzzer(elements, p=p, max_l0=max_l0)
if (('date' in dtype_str) or ('timestamp' in dtype_str)):
if ((len(elements) > 0) and isint(elements[0])):
return NumberFuzzer(elements, p=p, max_l0=max_l0, scale=4)
return DateFuzzer(elements, p=p, max_l0=max_l0)
if ('bool' in dtype_str):
return BoolFuzzer(elements, p=p)
if ('bit' in dtype_str):
return BitFuzzer(elements, p=p)
if ('year' in dtype_str):
return NumberFuzzer(elements, p=p, scale=4, max_l0=max_l0)
unsigned = False
if ('unsigned' in dtype_str):
unsigned = True
args = []
if ('(' in dtype_str):
args = [int(x) for x in dtype_str[(dtype_str.index('(') + 1):dtype_str.index(')')].split(',')]
for s in number_dtype_str:
if (s in dtype_str):
(scale, precision) = (10, 0)
if (len(args) != 0):
scale = args[0]
if (len(args) > 1):
scale -= args[1]
precision = args[1]
is_int = ('int' in dtype_str)
return NumberFuzzer(elements, p, max_l0=max_l0, scale=scale, unsigned=unsigned, is_int=is_int, precision=precision)
for s in char_dtype_str:
if (s in dtype_str):
length = 20
if (len(args) != 0):
length = args[0]
return StringFuzzer(elements, p, max_l0=max_l0, length=length)
|
def filter_by_primary(column2elements: Dict[(str, List)], primary_keys: List[str]) -> Dict[(str, List)]:
if (len(primary_keys) == 0):
return column2elements
num_elements = len(column2elements[primary_keys[0]])
(filtered_idx, existing_keys) = (set(), set())
for idx in range(num_elements):
key = tuple([column2elements[k][idx] for k in primary_keys])
if (key in existing_keys):
filtered_idx.add(idx)
continue
existing_keys.add(key)
filtered_column2elements = OrderedDict()
for (column_name, elements) in column2elements.items():
filtered_column2elements[column_name] = [elements[idx] for idx in range(num_elements) if (idx not in filtered_idx)]
return filtered_column2elements
|
def filter_by_unique_keys(column2elements: Dict[(str, List)], unique_keys: Set[str]) -> Dict[(str, List)]:
for k in unique_keys:
column2elements = filter_by_primary(column2elements, [k])
return column2elements
|
def restore_order(column2elements: Dict[(str, List)], column_order: List[str]) -> Dict[(str, List)]:
result = OrderedDict()
for column in column_order:
result[column] = column2elements[column]
return result
|
def rand_lin(x: int, min_c: int=MIN_C) -> int:
return (min_c + int(((random.random() * ADDITIVE_C) + (x * (random.random() * MULTIPLICATIVE_C)))))
|
class DBFuzzer():
def __init__(self, sqlite_path: str, tab_col2values: Dict[(Tuple[(str, str)], List[str])], p: float=0.2):
self.sqlite_path = sqlite_path
self.values = tab_col2values
(self.table_column_properties, self.child2parent, self.table_column2elements) = get_all_db_info_path(sqlite_path)
self.orig_table_name2table_size = get_table_size(self.table_column2elements)
(self.table_column_order, self.table_order) = get_process_order(self.child2parent, self.table_column_properties)
self.table_primary_keys = get_primary_keys(self.table_column_properties)
self.p = p
self.fuzzers = OrderedDict()
self.initialize_fuzzer()
def get_fuzz(self, table_name2table_size=None) -> Dict[(str, Dict[(str, List)])]:
if (table_name2table_size is None):
if (random.random() < 0.5):
table_name2table_size = {table_name: rand_lin(v, (MIN_C * random.random())) for (table_name, v) in self.orig_table_name2table_size.items()}
else:
table_name2table_size = {}
cur_level_size = (1 + (MIN_C * random.random()))
for tables in self.table_order:
for table_name in tables:
table_name2table_size[table_name] = cur_level_size
cur_level_size *= 4
cur_level_size = min(cur_level_size, 2500)
table2column2elements = {}
for table_names in self.table_order:
for table_name in table_names:
column2elements = {}
table2column2elements[table_name] = column2elements
primary_keys = self.table_primary_keys[table_name]
fuzz_order_column_names = []
for table_columns in self.table_column_order:
for table_column in table_columns:
if (table_column[0] == table_name):
fuzz_order_column_names.append(table_column[1])
orig_order_column_names = []
for (t, column_name) in self.table_column_properties:
if (t == table_name):
orig_order_column_names.append(column_name)
unique_keys = set([k for k in orig_order_column_names if self.table_column_properties[(table_name, k)]['unique']])
table_size = int(table_name2table_size[table_name])
for column_name in fuzz_order_column_names:
table_column = (table_name, column_name)
if (table_column in self.child2parent):
(parent_table, parent_column) = self.child2parent[table_column]
assert (parent_table in table2column2elements.keys()), ('table %s should have been fuzzed' % parent_table)
parent_elements = table2column2elements[parent_table][parent_column]
column2elements[column_name] = random_choices(parent_elements, k=table_size)
else:
column_fuzzer = self.fuzzers[table_column]
column2elements[column_name] = [column_fuzzer.one_sample() for _ in range(table_size)]
transformations = [(filter_by_primary, primary_keys), (restore_order, orig_order_column_names), (filter_by_unique_keys, unique_keys)]
for (f, arg) in transformations:
column2elements = f(column2elements, arg)
table2column2elements[table_name] = column2elements
return table2column2elements
def initialize_fuzzer(self):
if (random.random() < 0.3):
self.p = 0
for k in self.table_column_order[0]:
(dtype_str, elements) = (self.table_column_properties[k]['type'], self.table_column2elements[k])
elements = (random.choices(elements, k=min(5, len(elements))) + list(set(self.values[k])))
checked = self.table_column_properties[k]['checked']
p = (0 if checked else self.p)
special_key = (self.sqlite_path, k[0], k[1])
if (special_key in special_cases):
self.fuzzers[k] = special_cases[special_key](elements, p)
else:
if (special_key in corrected_keys):
dtype_str = corrected_keys[special_key]
if (random.random() < 0.5):
(table_name, column_name) = k
primary_keys = self.table_primary_keys[table_name]
max_l0 = (random.random() * 30)
if (column_name in primary_keys):
if (len(primary_keys) == 1):
max_l0 = float('inf')
if (len(primary_keys) > 1):
max_l0 = 30
self.fuzzers[k] = get_fuzzer_from_type_str(dtype_str, elements, p, max_l0=max_l0)
else:
self.fuzzers[k] = get_fuzzer_from_type_str(dtype_str, elements, p)
|
def fuzz_db_wrapper(args: Tuple[(str, str, Dict[(Tuple[(str, str)], List[str])])]):
(orig_path, target_path, tab_col2values) = args
print(('now fuzzing based on database %s, target path %s.' % (orig_path, target_path)))
dbfuzzer = DBFuzzer(orig_path, tab_col2values)
tables = dbfuzzer.get_fuzz()
write_db_path(orig_path, target_path, tables, overwrite=True)
assert os.path.exists(target_path), ('path %s does not exists.' % target_path)
|
def generate_random_db_with_queries_wrapper(args: Tuple[(str, str, List[str], Dict[(str, str)])]):
(orig_path, target_path, queries, _) = args
typed_values = list(chain(*[extract_typed_value_in_comparison_from_query(query) for query in queries]))
tab_col2values = type_values_w_db(orig_path, typed_values, loose=True)
fuzz_db_wrapper((orig_path, target_path, tab_col2values))
|
def count_table_occurences(query_toks: List[str], table_names: List[str]) -> int:
counter = 0
for t in query_toks:
if (t.lower() in table_names):
counter += 1
return counter
|
def _other_toks_same_family(tok: Token, family: Set[str]) -> List[Token]:
(t, v) = (tok.ttype, tok.value)
result = []
if ((v.lower() in family) or (v.upper() in family)):
for s in family:
if ((v.lower() != s) and (v.upper() != s)):
result.append(Token(t, s))
return result
|
def _get_int_replacement(tok: Token) -> List[Token]:
result = []
if (tok.ttype == tokens.Token.Literal.Number.Integer):
v = int(tok.value)
random_ints = np.random.randint(((- np.abs(v)) - 1), (np.abs(v) + 1), NUM_ALTERNATIVES)
for r in random_ints:
if (r != v):
result.append(r)
result.append((v + 1))
result.append((v - 1))
return [Token(tok.ttype, str(r)) for r in set(result)]
|
def _get_float_replacement(tok: Token) -> List[Token]:
result = []
if (tok.ttype == tokens.Token.Literal.Number.Float):
v = float(tok.value)
random_vals = [(((np.random.random() * 2) * v) - v) for _ in range(NUM_ALTERNATIVES)]
for r in random_vals:
if (r != v):
result.append(r)
result.append((v + epsilon))
result.append((v - epsilon))
return [Token(tok.ttype, str(r)) for r in set(result)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.