code
stringlengths
17
6.64M
def add_acc_diff_cols(dists_df, acc_dict, guid_set): for stress_test in guid_set: new_column = [] for pre_seed1 in range(1, 11): for fine_seed1 in range(1, 11): for pre_seed2 in range(pre_seed1, 11): for fine_seed2 in range(1, 11): if ((pre_seed2 == pre_seed1) and (fine_seed2 < fine_seed1)): continue else: new_column += (num_layers * [get_acc_diff(acc_dict, stress_test, (pre_seed1 - 1), (pre_seed2 - 1), (fine_seed1 - 1), (fine_seed2 - 1))]) dists_df[f'{stress_test}_diff'] = np.array(new_column) return dists_df
def get_full_df(scores_path, dists_path, full_df_path): dists_df = pd.read_csv(dists_path) dists_df = dists_df.rename(columns={'step1': 'fine_seed1', 'step2': 'fine_seed2', 'seed1': 'pre_seed1', 'seed2': 'pre_seed2'}) print('got dists_df') print('adding probing scores to get full_df') (guid_set, acc_dict) = collect_scores(scores_path) full_df = add_acc_diff_cols(dists_df, acc_dict, guid_set) print('got full_df, saving:') full_df.to_csv(full_df_path) print('saved') return full_df
def best_seed_pair(task): (_, acc_dict) = collect_scores(scores_path) acc_array = acc_dict[task].flatten() idxs = acc_array.argsort()[(- 1):][::(- 1)] ref_seeds = [] for idx in idxs: ref_seeds.append((int((idx / 10)), (idx % 10))) return ref_seeds[0]
def ftvft_sub_df(df, task, ref_depth): (best_pre_seed, best_fine_seed) = best_seed_pair(task) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & (((df.pre_seed1 == best_pre_seed) & (df.fine_seed1 == best_fine_seed)) | ((df.pre_seed2 == best_pre_seed) & (df.fine_seed2 == best_fine_seed))))] return sub_df
def best_seed_pair(task): (_, acc_dict) = collect_scores(scores_path) acc_array = acc_dict[task].flatten() idxs = acc_array.argsort()[(- 1):][::(- 1)] ref_seeds = [] for idx in idxs: ref_seeds.append((int((idx / 10)), (idx % 10))) return ref_seeds[0]
def ftvft_sub_df(df, task, ref_depth): (best_pre_seed, best_fine_seed) = best_seed_pair(task) sub_df = df[(((df.layer1 == ref_depth) & (df.layer2 == ref_depth)) & (((df.pre_seed1 == best_pre_seed) & (df.fine_seed1 == best_fine_seed)) | ((df.pre_seed2 == best_pre_seed) & (df.fine_seed2 == best_fine_seed))))] return sub_df
def qs(xs): return np.array(list(map((lambda x: (pc(xs, x, 'rank') / 100)), xs)))
def plot_rank_corrs(rho, rho_p, tau, tau_p, METRICS, scatter=False, title=''): (fig, ax) = plt.subplots(2, 2, figsize=(10, 10)) fig.suptitle(title) if scatter: (x, y) = ([], []) for (i, metric) in enumerate(METRICS): x += (len(rho[metric]) * [i]) y += rho[metric] ax[(0, 0)].scatter(x, y) ax[(0, 0)].scatter(list(range(len(METRICS))), [np.mean(rho[metric]) for metric in METRICS]) else: ax[(0, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(rho[metric]) for metric in METRICS]) ax[(0, 0)].set_title("Spearman's rho") ax[(0, 0)].set_xticks(list(range(len(METRICS)))) ax[(0, 0)].set_xticklabels(METRICS) if scatter: (x, y) = ([], []) for (i, metric) in enumerate(METRICS): x += (len(rho_p[metric]) * [i]) y += rho_p[metric] ax[(0, 1)].scatter(x, y) ax[(0, 1)].scatter(list(range(len(METRICS))), [np.mean(rho_p[metric]) for metric in METRICS]) else: ax[(0, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(rho_p[metric]) for metric in METRICS]) ax[(0, 1)].set_title("Spearman's rho: p-values") ax[(0, 1)].set_xticks(list(range(len(METRICS)))) ax[(0, 1)].set_xticklabels(METRICS) ax[(0, 1)].set_yscale('log') if scatter: (x, y) = ([], []) for (i, metric) in enumerate(METRICS): x += (len(tau[metric]) * [i]) y += tau[metric] ax[(1, 0)].scatter(x, y) ax[(1, 0)].scatter(list(range(len(METRICS))), [np.mean(tau[metric]) for metric in METRICS]) else: ax[(1, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(tau[metric]) for metric in METRICS]) ax[(1, 0)].set_title("Kendall's tau") ax[(1, 0)].set_xticks(list(range(len(METRICS)))) ax[(1, 0)].set_xticklabels(METRICS) if scatter: (x, y) = ([], []) for (i, metric) in enumerate(METRICS): x += (len(tau_p[metric]) * [i]) y += tau_p[metric] ax[(1, 1)].scatter(x, y) ax[(1, 1)].scatter(list(range(len(METRICS))), [np.mean(tau_p[metric]) for metric in METRICS]) else: ax[(1, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(tau_p[metric]) for metric in METRICS]) ax[(1, 1)].set_title("Kendall's tau: p-values") ax[(1, 1)].set_xticks(list(range(len(METRICS)))) ax[(1, 1)].set_xticklabels(METRICS) ax[(1, 1)].set_yscale('log') plt.show()
def get_rank_corrs(sub_df, metric, task): plot_x = sub_df[metric] plot_y = sub_df[f'{task}_diff'] rho = spearmanr(plot_x, plot_y) rho_corr = rho.correlation rho_os_p = ((rho.pvalue / 2) if (rho_corr > 0) else (1 - (rho.pvalue / 2))) tau = kendalltau(plot_x, plot_y) tau_corr = tau.correlation tau_os_p = ((tau.pvalue / 2) if (tau_corr > 0) else (1 - (tau.pvalue / 2))) q_x = qs(plot_x) q_y = qs(plot_y) bad_frac = np.mean(((q_x < 0.2) * (q_y > 0.8))) return (rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac)
def aggregate_rank_corrs(full_df, task, num_layers, METRICS, sub_df_fn, list_layers=None): if (list_layers == None): list_layers = list(range(num_layers)) rho = {metric: [] for metric in METRICS} rho_p = {metric: [] for metric in METRICS} tau = {metric: [] for metric in METRICS} tau_p = {metric: [] for metric in METRICS} bad_fracs = {metric: [] for metric in METRICS} for ref_depth in list_layers: sub_df = sub_df_fn(full_df, task, ref_depth) for metric in METRICS: (rho_corr, rho_os_p, tau_corr, tau_os_p, bad_frac) = get_rank_corrs(sub_df, metric, task) rho[metric].append(rho_corr) rho_p[metric].append(rho_os_p) tau[metric].append(tau_corr) tau_p[metric].append(tau_os_p) bad_fracs[metric].append(bad_frac) return (rho, rho_p, tau, tau_p, bad_fracs)
class FontDataLoader(): def __init__(self, dataset, sampler, batch_size): self.data_loader = torch.utils.data.DataLoader(dataset, sampler=sampler, batch_size=batch_size) def __iter__(self): self.data_loader_iterator = iter(self.data_loader) return self def __next__(self): return next(self.data_loader_iterator)
class FontData(): def __init__(self, font_name, font_path, image=None): self.font_name = font_name self.font_path = font_path self.image = None def load_data(self, loader): if (self.image == None): self.image = loader(self.font_path) return self.image def __repr__(self): return ('<FontData font_name: %s>' % self.font_name)
class FontDataset(Dataset): 'The Font Dataset.' def __init__(self, root_dir, glyph_size=(64, 64), glyphs_per_image=26): self.fonts = self.load_font_filenames(root_dir) self.root_dir = root_dir self.glyph_size = glyph_size self.glyphs_per_image = glyphs_per_image def __len__(self): return len(self.fonts) def __getitem__(self, index): _index = index if torch.is_tensor(_index): _index = _index.tolist() font = self.fonts[_index] font_data = font.load_data(image_loader) transform = transforms.Compose([transforms.Resize(self.glyph_size[0]), transforms.Grayscale(num_output_channels=1), transforms.ToTensor()]) return transform(font_data) def load_font_filenames(self, root_dir): font_images = [] assert os.path.isdir(root_dir), ('%s is not a valid directory!' % root_dir) for (root, _, filenames) in sorted(os.walk(root_dir)): for filename in filenames: font_images.append(FontData(filename, os.path.join(root, filename))) return font_images
def image_loader(path): return Image.open(path).convert('RGB')
def l1_and_adversarial_loss(D, G, real_data, generated_data, losses, options): l1_lamba = 10 return (min_max_loss(D, G, real_data, generated_data, losses, options) + (l1_lamba * l1_loss(D, G, real_data, generated_data, losses, options)))
def wasserstein_loss(D, G, real_data, generated_data, losses, options): real_loss = D(real_data) generated_loss = D(generated_data) (batch_size, data_type) = itemgetter('batch_size', 'data_type')(options) gradient_penalty_weight = 10 gradient_penalty = calculate_gradient_penalty(D, real_data, generated_data, batch_size, gradient_penalty_weight, losses, data_type) losses['GP'].append(gradient_penalty.data) loss = ((generated_loss.mean() - real_loss.mean()) + gradient_penalty) losses['Generated'].append(generated_loss.mean().data) losses['Real'].append(real_loss.mean().data) losses['D'].append(loss.data) return loss
def min_max_loss(D, G, real_data, generated_data, losses, options): discriminator_loss = D(generated_data) loss = (- discriminator_loss.mean()) return loss
def l1_loss(D, G, real_data, generated_data, losses, options): '\n Performs the L1 loss between the generated data and the real data.\n\n It is expected that both `real_data` and `generated_data` are of the same shape.\n ' return torch.nn.L1Loss()(generated_data, real_data)
def calculate_gradient_penalty(D, real_data, generated_data, batch_size, gradient_penalty_weight, losses, data_type): alpha = torch.rand(batch_size, 1, 1, 1).expand_as(real_data).type(data_type) interpolated = ((alpha * real_data.data) + ((1 - alpha) * generated_data.data)).type(data_type) interpolated.requires_grad = True probability_interpolated = D(interpolated) gradients = torch_grad(outputs=probability_interpolated, inputs=interpolated, grad_outputs=torch.ones(probability_interpolated.size()).type(data_type), create_graph=True, retain_graph=True)[0] gradients = gradients.view(batch_size, (- 1)) losses['gradient_norm'].append(gradients.norm(2, dim=1).mean().data) gradients_norm = torch.sqrt((torch.sum((gradients ** 2), dim=1) + 1e-12)) return (gradient_penalty_weight * ((gradients_norm - 1) ** 2).mean())
def build_font_shape_generator(glyph_size=(64, 64, 1), glyph_count=26, dimension=16): '\n Generator model for our GAN.\n\n Architecture is similar to DC-GAN with the exception of the input being an image.\n\n Inputs:\n - `image_size`: A triple (W, H, C) for the size of the images and number of channels. This model generates images the same size as the input (but for every character of the alphabet.)\n - `dimension`: Depth\n\n Output:\n -\n ' return intermediate_generator_alt(glyph_size=glyph_size, glyph_count=glyph_count, dimension=dimension)
def simple_upscale_generator(dimension): '\n A generator that performs several ConvTranpsose2D Operations to upscale an image from `individual_image_size` to `final_image_size`. The dimensions of `final_image_size` must be an integer multiple of `individual_image_size.`\n\n Inputs:\n - `individual_image_size`: (W, H) the size of the images provided (and the expected output size.)\n - `dimension`: This imapcts the scale of the number of features in the upscale.\n\n Output:\n - An image that is 256 * 512\n ' return nn.Sequential(nn.ConvTranspose2d(in_channels=1, out_channels=(8 * dimension), kernel_size=(4, 4), stride=(2, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(8 * dimension), out_channels=(4 * dimension), kernel_size=(4, 4), stride=(2, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(4 * dimension), out_channels=(2 * dimension), kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=(2 * dimension), out_channels=dimension, kernel_size=(3, 4), stride=(1, 2), padding=(1, 1)), nn.ReLU(), nn.ConvTranspose2d(in_channels=dimension, out_channels=1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), nn.Sigmoid())
def intermediate_generator(glyph_size=(64, 64), glyph_count=26, dimension=16): linear_width = int((((((2 * dimension) * glyph_size[0]) / 4) * glyph_size[1]) / 4)) hidden_width = int((glyph_size[0] * glyph_size[1])) final_width = int(((((4 * dimension) * glyph_count) * 2) * 2)) return nn.Sequential(nn.Conv2d(1, dimension, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(dimension, (2 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(in_features=linear_width, out_features=hidden_width), nn.ReLU(), nn.Linear(hidden_width, final_width), nn.ReLU(), Unflatten(C=(dimension * 4), H=2, W=(2 * glyph_count)), nn.BatchNorm2d((dimension * 4)), nn.ConvTranspose2d((4 * dimension), (2 * dimension), 4, 2, 1), nn.BatchNorm2d((2 * dimension)), nn.ReLU(), nn.ConvTranspose2d((2 * dimension), dimension, 4, 2, 1), nn.BatchNorm2d(dimension), nn.ReLU(), nn.ConvTranspose2d(dimension, 1, 4, 2, 1), nn.Sigmoid())
def intermediate_generator_alt(glyph_size=(16, 16), glyph_count=26, dimension=512): conv_dimensions = [dimension, int((dimension / 2)), int((dimension / 4))] fc_layer_widths = [int(((((conv_dimensions[2] * glyph_size[0]) / 8) * glyph_size[1]) / 8)), int((glyph_size[0] * glyph_size[1])), int(((((((glyph_size[0] / 8) * glyph_size[1]) / 8) * glyph_count) * dimension) / 4))] upconv_dimensions = [int((dimension / 4)), int((dimension / 8)), int((dimension / 16)), 1] return nn.Sequential(nn.Conv2d(1, conv_dimensions[0], 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(conv_dimensions[0], conv_dimensions[1], 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(conv_dimensions[1], conv_dimensions[2], 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(in_features=fc_layer_widths[0], out_features=fc_layer_widths[1]), nn.ReLU(), nn.Linear(fc_layer_widths[1], fc_layer_widths[2]), nn.ReLU(), Unflatten(C=upconv_dimensions[0], H=int((glyph_size[0] / 8)), W=(int((glyph_size[1] / 8)) * glyph_count)), nn.BatchNorm2d(upconv_dimensions[0]), nn.ConvTranspose2d(upconv_dimensions[0], upconv_dimensions[1], 4, 2, 1), nn.BatchNorm2d(upconv_dimensions[1]), nn.ReLU(), nn.ConvTranspose2d(upconv_dimensions[1], upconv_dimensions[2], 4, 2, 1), nn.BatchNorm2d(upconv_dimensions[2]), nn.ReLU(), nn.ConvTranspose2d(upconv_dimensions[2], upconv_dimensions[3], 4, 2, 1), nn.Sigmoid())
def build_font_shape_discriminator(image_size=(64, 1664), dimension=16): '\n PyTorch model implementing the GlyphGAN critic.\n\n Inputs:\n - `image_size`: The size of the entire alphabet (usually (H, W * 26))\n - `dimension`: The filter depth after each conv. Doubles per conv layer (1 - > 2 -> 4 -> 8)\n ' output_size = int((((8 * dimension) * (image_size[0] / 16)) * (image_size[1] / 16))) return nn.Sequential(nn.Conv2d(1, dimension, 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d(dimension, (2 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d((2 * dimension), (4 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), nn.Conv2d((4 * dimension), (8 * dimension), 4, 2, 1), nn.LeakyReLU(0.2), Flatten(), nn.Linear(output_size, 1), nn.Sigmoid())
def get_optimizer(model, learning_rate=0.0002, beta1=0.5, beta2=0.99): '\n Adam optimizer for model\n\n Input:\n - model: A PyTorch model that we want to optimize.\n\n Returns:\n - An Adam optimizer for the model with the desired hyperparameters.\n ' optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(beta1, beta2)) return optimizer
class Flatten(nn.Module): def forward(self, x): (N, _, _, _) = x.size() return x.view(N, (- 1))
class Unflatten(nn.Module): '\n An Unflatten module receives an input of shape (N, C*H*W) and reshapes it\n to produce an output of shape (N, C, H, W).\n ' def __init__(self, N=(- 1), C=128, H=7, W=7): super(Unflatten, self).__init__() self.N = N self.C = C self.H = H self.W = W def forward(self, x): return x.view(self.N, self.C, self.H, self.W)
def initialize_weights(m): if (isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d)): nn.init.xavier_uniform_(m.weight.data)
class TestFontDatasets(unittest.TestCase): def test_cannot_create_invalid_font_dataset(self): with self.assertRaises(AssertionError): FontDataset('does_not_exist') def test_can_create_font_dataset(self): dataset = FontDataset(abspath(join(dirname(__file__), 'test_datasets/valid'))) self.assertEqual(1, len(dataset)) def test_length_of_empty_folder(self): dataset = FontDataset(abspath(join(dirname(__file__), 'test_datasets/empty'))) self.assertEqual(0, len(dataset))
def show_grayscale_image(image): plot.imshow(transforms.Compose([transforms.ToPILImage(), transforms.Grayscale(num_output_channels=3)])(image)) plot.axis('off') plot.show()
def perturb(V, word): print(model[word].predict(asarray(V).reshape(1, (- 1)))[0][1]) phonemes = ((len(V) - 1) // 4) pbs = [] for n in range(phonemes): Z = list(V) Z[((n * 4) + 1)] *= 1.5 Z[((n * 4) + 2)] *= 1.5 Z[((n * 4) + 3)] *= 1.5 p_i = model[word].predict(asarray(Z).reshape(1, (- 1)))[0][1] print(p_i) pbs.append(p_i) return [int(i) for i in rankdata(pbs)]
def init_matrix(data): for i in range(len(data)): data[i][0] = float('inf') for i in range(len(data[0])): data[0][i] = float('inf') data[0][0] = 0 return data
def LpDist(time_pt_1, time_pt_2): if ((type(time_pt_1) == int) and (type(time_pt_2) == int)): return abs((time_pt_1 - time_pt_2)) else: return sum(abs((time_pt_1 - time_pt_2)))
def TWED(t1, t2, lam, nu): '"Requires: t1: multivariate time series in numpy matrix format. t2: multivariate time series in numpy matrix format. lam: penalty lambda parameter, nu: stiffness coefficient' 'Returns the TWED distance between the two time series. ' t1_data = t1 t2_data = t2 result = [([0] * len(t2_data)) for row in range(len(t1_data))] result = init_matrix(result) n = len(t1_data) m = len(t2_data) t1_time = range(1, (len(t1_data) + 1)) t2_time = range(1, (len(t2_data) + 1)) assert (len(t1_time) == n) assert (len(t2_time) == m) for i in range(1, n): for j in range(1, m): cost = LpDist(t1_data[i], t2_data[j]) insertion = ((result[(i - 1)][j] + LpDist(t1_data[(i - 1)], t1_data[i])) + (nu * ((t1_time[i] - t1_time[(i - 1)]) + lam))) deletion = ((result[i][(j - 1)] + LpDist(t2_data[(j - 1)], t2_data[j])) + (nu * ((t2_time[j] - t2_time[(j - 1)]) + lam))) match = ((((result[(i - 1)][(j - 1)] + LpDist(t1_data[i], t2_data[j])) + (nu * abs((t1_time[i] - t2_time[j])))) + LpDist(t1_time[(i - 1)], t2_time[(j - 1)])) + (nu * abs((t1_time[(i - 1)] - t2_time[(j - 1)])))) result[i][j] = min(insertion, deletion, match) return result[(n - 1)][(m - 1)]
class HyperParams(): def __init__(self): pass def get_uniwarp_config(self, argv): config = {} config['optimizer:num_epochs'] = 1000000 config['model:num_batch_pairs'] = 100 config['uniwarp:length'] = 1024 config['uniwarp:rnn_encoder_layers'] = [256, 128, 64] config['uniwarp:warp_nn_layers'] = [64, 16, 1] config['uniwarp:eta'] = 0.0001 config['uniwarp:max_grad_norm'] = 10.0 config['uniwarp:lambda'] = 0.0 config['uniwarp:cnn_encoder_layers'] = [1024, 256, 64] config['uniwarp:cnn_kernel_lengths'] = [5, 5, 3] config['uniwarp:cnn_strides'] = [2, 1, 1] config['uniwarp:dropout_rate'] = 0.05 config['uniwarp:enable_batch_normalization'] = True config['dataset:num_channels'] = 1 return config def restore(file_path): return json.loads(file_path)
class Inference_Experiments(): def __init__(self, model_type, model_file, dataset_path): self.model_type = model_type self.model_file = model_file self.dataset_path = dataset_path hp = HyperParams() self.config = hp.get_uniwarp_config(None) self.ds = Dataset() self.ds.load_multivariate(dataset_path) self.config['uniwarp:length'] = self.ds.series_length self.config['dataset:num_channels'] = self.ds.num_channels self.model = None if (model_type == 'SiameseRNN'): self.model = rnn_models.SiameseRNN(config=self.config) elif (model_type == 'WarpedSiameseRNN'): self.model = rnn_models.WarpedSiameseRNN(config=self.config) elif (model_type == 'CNNSim'): self.model = cnn_models.CNNSim(config=self.config) elif (model_type == 'CNNWarpedSim'): self.model = cnn_models.CNNWarpedSim(config=self.config) else: print('Test - No model of type', model_type) self.model.create_model() self.saver = tf.train.Saver() self.X_batch = np.zeros(((2 * self.config['model:num_batch_pairs']), self.config['uniwarp:length'], self.config['dataset:num_channels'])) self.true_sim_batch = np.zeros((self.config['model:num_batch_pairs'],)) print('Model has', self.model.num_model_parameters(), 'parameters') def infer_dataset(self, start_pct, chunk_pct): start_range = int((start_pct * self.ds.num_test_instances)) stop_range = int(((start_pct + chunk_pct) * self.ds.num_test_instances)) if (stop_range > self.ds.num_test_instances): stop_range = self.ds.num_test_instances with tf.Session() as sess: self.saver.restore(sess, self.model_file) (correct, num_infers) = (0, 0) time = (- 1) for idx_test in range(start_range, stop_range): max_similarity = 0 max_similarity_idx = 0 for idx in range(0, self.ds.num_train_instances, self.config['model:num_batch_pairs']): start_idx = idx if ((idx + self.config['model:num_batch_pairs']) >= self.ds.num_train_instances): start_idx = (self.ds.num_train_instances - self.config['model:num_batch_pairs']) for i in range(self.config['model:num_batch_pairs']): self.X_batch[(2 * i)] = self.ds.X_test[idx_test] self.X_batch[((2 * i) + 1)] = self.ds.X_train[(start_idx + i)] sim = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: self.X_batch, self.model.is_training: False}) for i in range(self.config['model:num_batch_pairs']): if (sim[i] >= max_similarity): max_similarity = sim[i] max_similarity_idx = (start_idx + i) if np.array_equal(self.ds.Y_test[idx_test], self.ds.Y_train[max_similarity_idx]): correct += 1 num_infers += 1 print(idx_test, (correct / num_infers)) print(num_infers, correct, time, dataset_path) def test_pairwise_similarities(self, n, folder_path): num_test_series = n dists = np.zeros((num_test_series, num_test_series)) with tf.Session() as sess: self.saver.restore(sess, self.model_file) pairs_list = [] for i in np.arange(0, num_test_series, 1): for j in np.arange(0, num_test_series, 1): pairs_list.append((i, j)) num_pairs = len(pairs_list) batch_start_pair_idx = 0 print('Num pairs:', len(pairs_list)) while (batch_start_pair_idx < num_pairs): for i in range(self.config['model:num_batch_pairs']): j = (batch_start_pair_idx + i) if (j >= num_pairs): j = (num_pairs - 1) self.X_batch[(2 * i)] = self.ds.X_test[pairs_list[j][0]] self.X_batch[((2 * i) + 1)] = self.ds.X_test[pairs_list[j][1]] sim = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: self.X_batch, self.model.is_training: False}) for i in range(self.config['model:num_batch_pairs']): j = (batch_start_pair_idx + i) if (j >= num_pairs): j = (num_pairs - 1) dists[pairs_list[j][0]][pairs_list[j][1]] = (1.0 - sim[i]) batch_start_pair_idx += self.config['model:num_batch_pairs'] print(dists.shape) np.save(os.path.join(folder_path, (((self.model.name + '_') + self.ds.dataset_name) + '_dists.npy')), dists) np.save(os.path.join(folder_path, (((self.model.name + '_') + self.ds.dataset_name) + '_labels.npy')), self.ds.Y_test[:num_test_series]) def pairwise_test_accuracy(self, num_test_batches): test_acc = 0 with tf.Session() as sess: self.saver.restore(sess, self.model_file) for i in range(num_test_batches): batch_pairs_idxs = [] batch_true_similarities = [] for j in range((self.config['model:num_batch_pairs'] // 2)): pos_idxs = self.ds.draw_test_pair(True) batch_pairs_idxs.append(pos_idxs[0]) batch_pairs_idxs.append(pos_idxs[1]) batch_true_similarities.append(1.0) neg_idxs = self.ds.draw_test_pair(False) batch_pairs_idxs.append(neg_idxs[0]) batch_pairs_idxs.append(neg_idxs[1]) batch_true_similarities.append(0.0) X_batch = np.take(a=self.ds.X_test, indices=batch_pairs_idxs, axis=0) sim_batch = np.asarray(batch_true_similarities) pred_similarities = sess.run(self.model.pred_similarities, feed_dict={self.model.X_batch: X_batch, self.model.true_similarities: sim_batch, self.model.is_training: False}) pred_label = np.where((pred_similarities >= 0.5), 1, 0) test_acc += sklearn.metrics.accuracy_score(sim_batch, pred_label) print(i, (test_acc / (i + 1))) print((test_acc / num_test_batches)) def transductive_test_loss(self): test_loss = 0 with tf.Session() as sess: self.saver.restore(sess, self.model_file) for i in range(num_test_batches): batch_pairs_idxs = [] batch_true_similarities = [] for j in range((self.config['model:num_batch_pairs'] // 2)): pos_idxs = self.ds.draw_test_pair(True) batch_pairs_idxs.append(pos_idxs[0]) batch_pairs_idxs.append(pos_idxs[1]) batch_true_similarities.append(1.0) neg_idxs = self.ds.draw_test_pair(False) batch_pairs_idxs.append(neg_idxs[0]) batch_pairs_idxs.append(neg_idxs[1]) batch_true_similarities.append(0.0) X_batch = np.take(a=self.ds.X_test, indices=batch_pairs_idxs, axis=0) sim_batch = np.asarray(batch_true_similarities) batch_loss = sess.run(self.model.loss, feed_dict={self.model.X_batch: X_batch, self.model.true_similarities: sim_batch, self.model.is_training: False}) test_loss += batch_loss print(i, (test_loss / (i + 1))) print((test_loss / num_test_batches))
class Optimizer(): def __init__(self, config, dataset, sim_model): self.config = config self.dataset = dataset self.num_epochs = self.config['optimizer:num_epochs'] self.sim_model = sim_model self.saver = tf.train.Saver(max_to_keep=100) def optimize(self): with tf.Session() as sess: sess.run(tf.global_variables_initializer()) loss = 0 freq = 100 for epoch_idx in range(self.num_epochs): batch_true_similarities = [] batch_pairs_idxs = [] for i in range((self.config['model:num_batch_pairs'] // 2)): pos_idxs = self.dataset.draw_pair(True) batch_pairs_idxs.append(pos_idxs[0]) batch_pairs_idxs.append(pos_idxs[1]) batch_true_similarities.append(1.0) neg_idxs = self.dataset.draw_pair(False) batch_pairs_idxs.append(neg_idxs[0]) batch_pairs_idxs.append(neg_idxs[1]) batch_true_similarities.append(0.0) pair_loss = self.update_model(sess, batch_pairs_idxs, batch_true_similarities) loss += pair_loss if ((epoch_idx % freq) == 0): if (epoch_idx > 0): loss /= freq print('DS', epoch_idx, self.dataset.dataset_name, loss) self.saver.save(sess, (((('./saved_models/' + self.sim_model.name) + '_') + self.dataset.dataset_name) + '.ckpt'), global_step=(epoch_idx // freq)) loss = 0 def update_model(self, sess, batch_pairs_idxs, batch_true_similarities): X_batch = np.take(a=self.dataset.X_train, indices=batch_pairs_idxs, axis=0) sim_batch = np.asarray(batch_true_similarities) pair_loss = sess.run(self.sim_model.loss, feed_dict={self.sim_model.X_batch: X_batch, self.sim_model.true_similarities: sim_batch, self.sim_model.is_training: False}) sess.run(self.sim_model.update_rule, feed_dict={self.sim_model.X_batch: X_batch, self.sim_model.true_similarities: sim_batch, self.sim_model.is_training: True}) return pair_loss
class AbstractSimModel(): def __init__(self, config): self.config = config self.minus_one_constant = tf.constant((- 1.0), dtype=tf.float32) self.sequence_length = self.config['uniwarp:length'] self.X_batch = tf.placeholder(shape=((2 * self.config['model:num_batch_pairs']), self.config['uniwarp:length'], self.config['dataset:num_channels']), dtype=tf.float32) self.true_similarities = tf.placeholder(shape=(self.config['model:num_batch_pairs'],), dtype=tf.float32) self.pair_dists = None self.h = (None, None) (self.loss, self.pred_similarities, self.update_rule) = (None, None, None) self.reg_penalty = tf.constant(self.config['uniwarp:lambda'], dtype=tf.float32) self.name = 'AbstractSingleSimModel' self.is_training = tf.placeholder(tf.bool) self.additional_loss = None def num_model_parameters(self): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value total_parameters += variable_parameters return total_parameters def create_encoder(self): print('ERROR: Encoder left undefined') pass def create_similarity(self): print('ERROR: Similarity left undefined') pass def dist_pair(self, pair_ixd): print('ERROR: Distance of pairs left undefined') pass def create_optimization_routine(self): with tf.variable_scope('OptimizationRoutines'): self.loss = tf.losses.log_loss(self.true_similarities, self.pred_similarities) if (self.additional_loss is not None): print('Adding penalty term', self.additional_loss) self.loss += (self.reg_penalty * self.additional_loss) trainable_vars = tf.trainable_variables() update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): (clipped_grads, _) = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_vars), self.config['uniwarp:max_grad_norm']) self.update_rule = tf.train.AdamOptimizer(self.config['uniwarp:eta']).apply_gradients(zip(clipped_grads, trainable_vars)) def create_model(self): self.create_encoder() self.create_similarity() self.create_optimization_routine()
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): model = ert.Model(forward_params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): model = Model(forward_params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- (r ** 2))))
def forward_model(s, parallelization, ncores=None): params = {'nx': nx, 'ny': ny} model = mare2dem.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- (r ** 2))))
def forward_model(s, parallelization, ncores=None): params = {'nx': nx, 'ny': ny} model = mare2dem.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): params = {} model = dd.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
def kernel(r): return ((prior_std ** 2) * np.exp((- r)))
def forward_model(s, parallelization, ncores=None): params = {'log': True} model = dd.Model(params) if parallelization: simul_obs = model.run(s, parallelization, ncores) else: simul_obs = model.run(s, parallelization) return simul_obs
class SepConvGRU(nn.Module): def __init__(self): super(SepConvGRU, self).__init__() hidden_dim = 128 catt = 256 self.convz1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convr1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convq1 = nn.Conv2d(catt, hidden_dim, (1, 3), padding=(0, 1)) self.convz2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) self.convr2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) self.convq2 = nn.Conv2d(catt, hidden_dim, (3, 1), padding=(1, 0)) def forward(self, h, x): hx = torch.cat([h, x], dim=1) z = torch.sigmoid(self.convz1(hx)) r = torch.sigmoid(self.convr1(hx)) q = torch.tanh(self.convq1(torch.cat([(r * h), x], dim=1))) h = (((1 - z) * h) + (z * q)) hx = torch.cat([h, x], dim=1) z = torch.sigmoid(self.convz2(hx)) r = torch.sigmoid(self.convr2(hx)) q = torch.tanh(self.convq2(torch.cat([(r * h), x], dim=1))) h = (((1 - z) * h) + (z * q)) return h
class R_MSFM3(nn.Module): def __init__(self, x): super(R_MSFM3, self).__init__() self.convX11 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True), nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) if x: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) else: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) self.sigmoid = nn.Sigmoid() self.update_block = BasicUpdateBlock() self.gruc = SepConvGRU() def upsample_depth(self, flow, mask): ' Upsample depth field [H/8, W/8, 2] -> [H, W, 2] using convex combination ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold(flow, [3, 3], padding=1) up_flow = up_flow.view(N, 1, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 1, (8 * H), (8 * W)) def forward(self, features, iters=3): ' Estimate depth for a single image ' (x1, x2, x3) = features disp_predictions = {} (b, c, h, w) = x3.shape dispFea = torch.zeros([b, 1, h, w], requires_grad=True).to(x1.device) net = torch.zeros([b, 256, h, w], requires_grad=True).to(x1.device) for itr in range(iters): if (itr in [0]): corr = self.convX31(x3) elif (itr in [1]): corrh = corr corr = self.convX21(x2) corr = self.gruc(corrh, corr) elif (itr in [2]): corrh = corr corr = self.convX11(x1) corr = self.gruc(corrh, corr) (net, up_mask, delta_disp) = self.update_block(net, corr, dispFea) dispFea = (dispFea + delta_disp) disp = self.sigmoid(dispFea) if self.training: disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up elif ((iters - 1) == itr): disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up return disp_predictions
class R_MSFM6(nn.Module): def __init__(self, x): super(R_MSFM6, self).__init__() self.convX11 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True), nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX12 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) if x: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.Tanh()) else: self.convX21 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0, bias=True), torch.nn.Tanh()) self.convX31 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=0, dilation=1, bias=True), torch.nn.Tanh()) self.convX22 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) self.convX32 = torch.nn.Sequential(nn.Conv2d(128, 128, (1, 3), padding=(0, 1)), torch.nn.Tanh(), nn.Conv2d(128, 128, (3, 1), padding=(1, 0)), torch.nn.Tanh()) self.sigmoid = nn.Sigmoid() self.gruc = SepConvGRU() self.update_block = BasicUpdateBlock() def upsample_depth(self, flow, mask): ' Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination ' (N, _, H, W) = flow.shape mask = mask.view(N, 1, 9, 8, 8, H, W) mask = torch.softmax(mask, dim=2) up_flow = F.unfold(flow, [3, 3], padding=1) up_flow = up_flow.view(N, 1, 9, 1, 1, H, W) up_flow = torch.sum((mask * up_flow), dim=2) up_flow = up_flow.permute(0, 1, 4, 2, 5, 3) return up_flow.reshape(N, 1, (8 * H), (8 * W)) def forward(self, features, iters=6): ' Estimate depth for a single image ' (x1, x2, x3) = features disp_predictions = {} (b, c, h, w) = x3.shape dispFea = torch.zeros([b, 1, h, w], requires_grad=True).to(x1.device) net = torch.zeros([b, 256, h, w], requires_grad=True).to(x1.device) for itr in range(iters): if (itr in [0]): corr = self.convX31(x3) elif (itr in [1]): corrh = corr corr = self.convX32(corr) corr = self.gruc(corrh, corr) elif (itr in [2]): corrh = corr corr = self.convX21(x2) corr = self.gruc(corrh, corr) elif (itr in [3]): corrh = corr corr = self.convX22(corr) corr = self.gruc(corrh, corr) elif (itr in [4]): corrh = corr corr = self.convX11(x1) corr = self.gruc(corrh, corr) elif (itr in [5]): corrh = corr corr = self.convX12(corr) corr = self.gruc(corrh, corr) (net, up_mask, delta_disp) = self.update_block(net, corr, dispFea) dispFea = (dispFea + delta_disp) disp = self.sigmoid(dispFea) if self.training: disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up elif ((iters - 1) == itr): disp_up = self.upsample_depth(disp, up_mask) disp_predictions[('disp_up', itr)] = disp_up return disp_predictions
class ConvBlock(nn.Module): 'Layer to perform a convolution followed by LeakyReLU\n ' def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.LeakyReLU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out
class Conv3x3(nn.Module): 'Layer to pad and convolve input\n ' def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out
class dispHead(nn.Module): def __init__(self): super(dispHead, self).__init__() outD = 1 self.covd1 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=192, out_channels=256, kernel_size=3, stride=1, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.covd2 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=256, out_channels=outD, kernel_size=3, stride=1, padding=0, bias=True)) def forward(self, x): return self.covd2(self.covd1(x))
class BasicMotionEncoder(nn.Module): def __init__(self): super(BasicMotionEncoder, self).__init__() self.convc1 = ConvBlock(128, 160) self.convc2 = ConvBlock(160, 128) self.convf1 = torch.nn.Sequential(nn.ReflectionPad2d(3), torch.nn.Conv2d(in_channels=1, out_channels=64, kernel_size=7, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.convf2 = torch.nn.Sequential(nn.ReflectionPad2d(1), torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, padding=0, bias=True), torch.nn.LeakyReLU(inplace=True)) self.conv = ConvBlock((128 + 32), (192 - 1)) def forward(self, depth, corr): cor = self.convc1(corr) cor = self.convc2(cor) dep = self.convf1(depth) dep = self.convf2(dep) cor_depth = torch.cat([cor, dep], dim=1) out = self.conv(cor_depth) return torch.cat([out, depth], dim=1)
class BasicUpdateBlock(nn.Module): def __init__(self): super(BasicUpdateBlock, self).__init__() self.encoder = BasicMotionEncoder() self.flow_head = dispHead() self.mask = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(192, 324, 3), nn.LeakyReLU(inplace=True), nn.Conv2d(324, (64 * 9), 1, padding=0)) def forward(self, net, corr, depth): net = self.encoder(depth, corr) delta_depth = self.flow_head(net) mask = (0.25 * self.mask(net)) return (net, mask, delta_depth)
class ResNetMultiImageInput(models.ResNet): 'Constructs a resnet model with varying number of input images.\n Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n ' def __init__(self, block, layers, num_classes=1000, num_input_images=1): super(ResNetMultiImageInput, self).__init__(block, layers) self.inplanes = 64 self.conv1 = nn.Conv2d((num_input_images * 3), 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def resnet_multiimage_input(num_layers, pretrained=False, num_input_images=1): 'Constructs a ResNet model.\n Args:\n num_layers (int): Number of resnet layers. Must be 18 or 50\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n num_input_images (int): Number of frames stacked as input\n ' assert (num_layers in [18, 50]), 'Can only run with 18 or 50 layer resnet' blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers] block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers] model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images) if pretrained: loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)]) loaded['conv1.weight'] = (torch.cat(([loaded['conv1.weight']] * num_input_images), 1) / num_input_images) model.load_state_dict(loaded) return model
class ResnetEncoder(nn.Module): 'Pytorch module for a resnet encoder\n ' def __init__(self, num_layers, pretrained, num_input_images=1): super(ResnetEncoder, self).__init__() self.num_ch_enc = np.array([64, 64, 128, 256, 512]) resnets = {18: models.resnet18, 34: models.resnet34, 50: models.resnet50, 101: models.resnet101, 152: models.resnet152} if (num_layers not in resnets): raise ValueError('{} is not a valid number of resnet layers'.format(num_layers)) if (num_input_images > 1): self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) else: self.encoder = resnets[num_layers](pretrained) if (num_layers > 34): self.num_ch_enc[1:] *= 4 def forward(self, input_image): self.features = [] x = ((input_image - 0.45) / 0.225) x = self.encoder.conv1(x) x = self.encoder.bn1(x) self.features.append(self.encoder.relu(x)) self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[(- 1)]))) self.features.append(self.encoder.layer2(self.features[(- 1)])) return self.features
class ResnetEncoder2(nn.Module): 'Pytorch module for a resnet encoder\n ' def __init__(self, num_layers, pretrained, num_input_images=1): super(ResnetEncoder2, self).__init__() self.num_ch_enc = np.array([64, 64, 128, 256, 512]) resnets = {18: models.resnet18, 34: models.resnet34, 50: models.resnet50, 101: models.resnet101, 152: models.resnet152} if (num_layers not in resnets): raise ValueError('{} is not a valid number of resnet layers'.format(num_layers)) if (num_input_images > 1): self.encoder = resnet_multiimage_input(num_layers, pretrained, num_input_images) else: self.encoder = resnets[num_layers](pretrained) if (num_layers > 34): self.num_ch_enc[1:] *= 4 def forward(self, input_image): self.features = [] x = ((input_image - 0.45) / 0.225) x = self.encoder.conv1(x) x = self.encoder.bn1(x) self.features.append(self.encoder.relu(x)) self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[(- 1)]))) self.features.append(self.encoder.layer2(self.features[(- 1)])) self.features.append(self.encoder.layer3(self.features[(- 1)])) self.features.append(self.encoder.layer4(self.features[(- 1)])) return self.features
def init_fourier_(tensor, norm='ortho'): 'Initialise convolution weight with Inverse Fourier Transform' with torch.no_grad(): (nc_out, nc_in, N, kernel_size) = tensor.shape for k in range(N): for n in range(N): tensor.data[(k, 0, n, (kernel_size // 2))] = np.cos(((((2 * np.pi) * n) * k) / N)) tensor.data[(k, 1, n, (kernel_size // 2))] = (- np.sin(((((2 * np.pi) * n) * k) / N))) tensor.data[((k + N), 0, n, (kernel_size // 2))] = np.sin(((((2 * np.pi) * n) * k) / N)) tensor.data[((k + N), 1, n, (kernel_size // 2))] = np.cos(((((2 * np.pi) * n) * k) / N)) if (norm == 'ortho'): tensor.data[...] = (tensor.data[...] / np.sqrt(N)) return tensor
def init_fourier_2d(N, M, inverse=True, norm='ortho', out_tensor=None, complex_type=np.complex64): "Initialise fully connected layer as 2D Fourier transform\n\n Parameters\n ----------\n\n N, M: a number of rows and columns\n\n inverse: bool (default: True) - if True, initialise with the weights for\n inverse fourier transform\n\n norm: 'ortho' or None (default: 'ortho')\n\n out_tensor: torch.Tensor (default: None) - if given, copies the values to\n out_tensor\n\n " dft1mat_m = np.zeros((M, M), dtype=complex_type) dft1mat_n = np.zeros((N, N), dtype=complex_type) sign = (1 if inverse else (- 1)) for (l, m) in itertools.product(range(M), range(M)): dft1mat_m[(l, m)] = np.exp(((((sign * 2) * np.pi) * 1j) * ((m * l) / M))) for (k, n) in itertools.product(range(N), range(N)): dft1mat_n[(k, n)] = np.exp(((((sign * 2) * np.pi) * 1j) * ((n * k) / N))) mat_kron = np.kron(dft1mat_n, dft1mat_m) mat_split = np.block([[np.real(mat_kron), (- np.imag(mat_kron))], [np.imag(mat_kron), np.real(mat_kron)]]) if (norm == 'ortho'): mat_split /= np.sqrt((N * M)) elif inverse: mat_split /= (N * M) if (out_tensor is not None): out_tensor.data[...] = torch.Tensor(mat_split) else: out_tensor = mat_split return out_tensor
def init_noise_(tensor, init): with torch.no_grad(): return (getattr(torch.nn.init, init)(tensor) if init else tensor.zero_())
class GeneralisedIFT2Layer(nn.Module): def __init__(self, nrow, ncol, nch_in, nch_int=None, nch_out=None, kernel_size=1, nl=None, init_fourier=True, init=None, bias=False, batch_norm=False, share_tfxs=False, learnable=True): "Generalised domain transform layer\n\n The layer can be initialised as Fourier transform if nch_in == nch_int\n == nch_out == 2 and if init_fourier == True.\n\n It can also be initialised\n as Fourier transform plus noise by setting init_fourier == True and\n init == 'kaiming', for example.\n\n If nonlinearity nl is used, it is recommended to set bias = True\n\n One can use this layer as 2D Fourier transform by setting nch_in == nch_int\n == nch_out == 2 and learnable == False\n\n\n Parameters\n ----------\n nrow: int - the number of columns of input\n\n ncol: int - the number of rows of input\n\n nch_in: int - the number of input channels. One can put real & complex\n here, or put temporal coil channels, temporal frames, multiple\n z-slices, etc..\n\n nch_int: int - the number of intermediate channel after the transformation\n has been applied for each row. By default, this is the same as the input channel\n\n nch_out: int - the number of output channels. By default, this is the same as the input channel\n\n kernel_size: int - kernel size for second axis of 1d transforms\n\n init_fourier: bool - initialise generalised kernel with inverse fourier transform\n\n init_noise: str - initialise generalised kernel with standard initialisation. Option: ['kaiming', 'normal']\n\n nl: ('tanh', 'sigmoid', 'relu', 'lrelu') - add nonlinearity between two transformations. Currently only supports tanh\n\n bias: bool - add bias for each kernels\n\n share_tfxs: bool - whether to share two transformations\n\n learnable: bool\n\n " super(GeneralisedIFT2Layer, self).__init__() self.nrow = nrow self.ncol = ncol self.nch_in = nch_in self.nch_int = nch_int self.nch_out = nch_out self.kernel_size = kernel_size self.init_fourier = init_fourier self.init = init self.nl = nl if (not self.nch_int): self.nch_int = self.nch_in if (not self.nch_out): self.nch_out = self.nch_in idft1 = torch.nn.Conv2d(self.nch_in, (self.nch_int * self.nrow), (self.nrow, kernel_size), padding=(0, (kernel_size // 2)), bias=bias) idft2 = torch.nn.Conv2d(self.nch_int, (self.nch_out * self.ncol), (self.ncol, kernel_size), padding=(0, (kernel_size // 2)), bias=bias) init_noise_(idft1.weight, self.init) init_noise_(idft2.weight, self.init) if self.init_fourier: if (not (self.nch_in == self.nch_int == self.nch_out == 2)): raise ValueError if self.init: idft1.weight.data = F.normalize(idft1.weight.data, dim=2) idft2.weight.data = F.normalize(idft2.weight.data, dim=2) init_fourier_(idft1.weight) init_fourier_(idft2.weight) self.idft1 = idft1 self.idft2 = idft2 if (share_tfxs and (nrow == ncol)): self.idft2 = self.idft1 self.learnable = learnable self.set_learnable(self.learnable) self.batch_norm = batch_norm if self.batch_norm: self.bn1 = torch.nn.BatchNorm2d(self.nch_int) self.bn2 = torch.nn.BatchNorm2d(self.nch_out) def forward(self, X): batch_size = len(X) x_t = self.idft1(X) x_t = x_t.reshape([batch_size, self.nch_int, self.nrow, self.ncol]).permute(0, 1, 3, 2) if self.batch_norm: x_t = self.bn1(x_t.contiguous()) if self.nl: if (self.nl == 'tanh'): x_t = F.tanh(x_t) elif (self.nl == 'relu'): x_t = F.relu(x_t) elif (self.nl == 'sigmoid'): x_t = F.sigmoid(x_t) else: raise ValueError x_t = self.idft2(x_t) x_t = x_t.reshape([batch_size, self.nch_out, self.ncol, self.nrow]).permute(0, 1, 3, 2) if self.batch_norm: x_t = self.bn2(x_t.contiguous()) return x_t def set_learnable(self, flag=True): self.learnable = flag self.idft1.weight.requires_grad = flag self.idft2.weight.requires_grad = flag
def get_refinement_block(model='automap_scae', in_channel=1, out_channel=1): if (model == 'automap_scae'): return nn.Sequential(nn.Conv2d(in_channel, 64, 5, 1, 2), nn.ReLU(True), nn.Conv2d(64, 64, 5, 1, 2), nn.ReLU(True), nn.ConvTranspose2d(64, out_channel, 7, 1, 3)) elif (model == 'simple'): return nn.Sequential(nn.Conv2d(in_channel, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(True), nn.Conv2d(64, out_channel, 3, 1, 1)) else: raise NotImplementedError
class AUTOMAP(nn.Module): '\n Pytorch implementation of AUTOMAP [1].\n\n Reference:\n ----------\n [1] Zhu et al., AUTOMAP, Nature 2018. <url:https://www.nature.com/articles/nature25988.pdf>\n ' def __init__(self, input_shape, output_shape, init_fc2_fourier=False, init_fc3_fourier=False): super(AUTOMAP, self).__init__() self.input_shape = input_shape self.output_shape = output_shape self.ndim = input_shape[(- 1)] self.input_reshape = int(np.prod(self.input_shape)) self.output_reshape = int(np.prod(self.output_shape)) self.domain_transform = nn.Linear(self.input_reshape, self.output_reshape) self.domain_transform2 = nn.Linear(self.output_reshape, self.output_reshape) if (init_fc2_fourier or init_fc3_fourier): if (input_shape != output_shape): raise ValueError('To initialise the kernels with Fourier transform,the input and output shapes must be the same') if init_fc2_fourier: init_fourier_2d(input_shape[(- 2)], input_shape[(- 1)], self.domain_transform.weight) if init_fc3_fourier: init_fourier_2d(input_shape[(- 2)], input_shape[(- 1)], self.domain_transform2.weight) self.sparse_convolutional_autoencoder = get_refinement_block('automap_scae', output_shape[0], output_shape[0]) def forward(self, x): 'Expects input_shape (batch_size, 2, ndim, ndim)' batch_size = len(x) x = x.reshape(batch_size, int(np.prod(self.input_shape))) x = F.tanh(self.domain_transform(x)) x = F.tanh(self.domain_transform2(x)) x = x.reshape((- 1), *self.output_shape) x = self.sparse_convolutional_autoencoder(x) return x
class dAUTOMAP(nn.Module): '\n Pytorch implementation of dAUTOMAP\n\n Decomposes the automap kernel into 2 Generalised "1D" transforms to make it scalable.\n ' def __init__(self, input_shape, output_shape, tfx_params, tfx_params2=None): super(dAUTOMAP, self).__init__() self.input_shape = input_shape self.output_shape = output_shape if (tfx_params2 is None): tfx_params2 = tfx_params self.domain_transform = GeneralisedIFT2Layer(**tfx_params) self.domain_transform2 = GeneralisedIFT2Layer(**tfx_params2) self.refinement_block = get_refinement_block('automap_scae', input_shape[0], output_shape[0]) def forward(self, x): 'Assumes input to be (batch_size, 2, nrow, ncol)' x_mapped = self.domain_transform(x) x_mapped = F.tanh(x_mapped) x_mapped2 = self.domain_transform2(x_mapped) x_mapped2 = F.tanh(x_mapped2) out = self.refinement_block(x_mapped2) return out
class dAUTOMAPExt(nn.Module): '\n Pytorch implementation of dAUTOMAP with adjustable depth and nonlinearity\n\n Decomposes the automap kernel into 2 Generalised "1D" transforms to make it scalable.\n\n Parameters\n ----------\n\n input_shape: tuple (n_channel, nx, ny)\n\n output_shape: tuple (n_channel, nx, ny)\n\n depth: int (default: 2)\n\n tfx_params: list of dict or dict. If list of dict, it must provide the parameter for each. If dict, then the same parameter config will be shared for all the layers.\n\n\n ' def __init__(self, input_shape, output_shape, tfx_params=None, depth=2, nl='tanh'): super(dAUTOMAPExt, self).__init__() self.input_shape = input_shape self.output_shape = output_shape self.depth = depth self.nl = nl domain_transforms = [] if isinstance(tfx_params, list): if (self.depth and (self.depth != len(tfx_params))): raise ValueError('Depth and the length of tfx_params must be the same') else: tfx_params = ([tfx_params] * self.depth) for tfx_param in tfx_params: domain_transform = GeneralisedIFT2Layer(**tfx_param) domain_transforms.append(domain_transform) self.domain_transforms = nn.ModuleList(domain_transforms) self.refinement_block = get_refinement_block('automap_scae', input_shape[0], output_shape[0]) def forward(self, x): 'Assumes input to be (batch_size, 2, nrow, ncol)' for i in range(self.depth): x = self.domain_transforms[i](x) x = getattr(F, self.nl)(x) out = self.refinement_block(x) return out
class RawVideoExtractorCV2(): def __init__(self, centercrop=False, size=224, framerate=(- 1)): self.centercrop = centercrop self.size = size self.framerate = framerate self.transform = self._transform(self.size) def _transform(self, n_px): return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))]) def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None): if ((start_time is not None) or (end_time is not None)): assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time)) assert (sample_fp > (- 1)) cap = cv2.VideoCapture(video_file) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) total_duration = (((frameCount + fps) - 1) // fps) (start_sec, end_sec) = (0, total_duration) if (start_time is not None): (start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration)) cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps))) interval = 1 if (sample_fp > 0): interval = (fps // sample_fp) else: sample_fp = fps if (interval == 0): interval = 1 inds = [ind for ind in np.arange(0, fps, interval)] assert (len(inds) >= sample_fp) inds = inds[:sample_fp] ret = True (images, included) = ([], []) for sec in np.arange(start_sec, (end_sec + 1)): if (not ret): break sec_base = int((sec * fps)) for ind in inds: cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind)) (ret, frame) = cap.read() if (not ret): break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) images.append(preprocess(Image.fromarray(frame_rgb).convert('RGB'))) cap.release() if (len(images) > 0): video_data = th.tensor(np.stack(images)) else: video_data = th.zeros(1) return {'video': video_data} def get_video_data(self, video_path, start_time=None, end_time=None): image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time) return image_input def process_raw_data(self, raw_video_data): tensor_size = raw_video_data.size() tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)]) return tensor def process_frame_order(self, raw_video_data, frame_order=0): if (frame_order == 0): pass elif (frame_order == 1): reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1)) raw_video_data = raw_video_data[(reverse_order, ...)] elif (frame_order == 2): random_order = np.arange(raw_video_data.size(0)) np.random.shuffle(random_order) raw_video_data = raw_video_data[(random_order, ...)] return raw_video_data
def get_args(description='VQA Task'): parser = argparse.ArgumentParser(description=description) parser.add_argument('--do_pretrain', action='store_true', help='Whether to run training.') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--train_csv', type=str, default='data/.train.csv', help='') parser.add_argument('--val_csv', type=str, default='data/.val.csv', help='') parser.add_argument('--data_path', type=str, default='train_ans2label.json', help='data pickle file path') parser.add_argument('--features_path', type=str, default='MSRVTT_Videos', help='feature path') parser.add_argument('--num_thread_reader', type=int, default=1, help='') parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate') parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit') parser.add_argument('--batch_size', type=int, default=256, help='batch size') parser.add_argument('--batch_size_val', type=int, default=3500, help='batch size eval') parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay') parser.add_argument('--n_display', type=int, default=100, help='Information display frequence') parser.add_argument('--video_dim', type=int, default=1024, help='video feature dimension') parser.add_argument('--seed', type=int, default=42, help='random seed') parser.add_argument('--max_words', type=int, default=20, help='') parser.add_argument('--max_frames', type=int, default=100, help='') parser.add_argument('--feature_framerate', type=int, default=1, help='') parser.add_argument('--margin', type=float, default=0.1, help='margin for loss') parser.add_argument('--hard_negative_rate', type=float, default=0.5, help='rate of intra negative sample') parser.add_argument('--negative_weighting', type=int, default=1, help='Weight the loss for intra negative') parser.add_argument('--n_pair', type=int, default=1, help='Num of pair to output from data loader') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--cross_model', default='cross-base', type=str, required=False, help='Cross module') parser.add_argument('--init_model', default=None, type=str, required=False, help='Initial model.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--n_gpu', type=int, default=1, help='Changed in the execute process.') parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--task_type', default='retrieval', type=str, help='Point the task `retrieval` to finetune.') parser.add_argument('--datatype', default='msrvtt', type=str, help='Point the dataset to finetune.') parser.add_argument('--world_size', default=0, type=int, help='distribted training') parser.add_argument('--local_rank', default=0, type=int, help='distribted training') parser.add_argument('--rank', default=0, type=int, help='distribted training') parser.add_argument('--coef_lr', type=float, default=0.001, help='coefficient for bert branch.') parser.add_argument('--use_mil', action='store_true', help='Whether use MIL as Miech et. al. (2020).') parser.add_argument('--sampled_use_mil', action='store_true', help='Whether MIL, has a high priority than use_mil.') parser.add_argument('--text_num_hidden_layers', type=int, default=12, help='Layer NO. of text.') parser.add_argument('--visual_num_hidden_layers', type=int, default=12, help='Layer NO. of visual.') parser.add_argument('--cross_num_hidden_layers', type=int, default=4, help='Layer NO. of cross.') parser.add_argument('--loose_type', action='store_true', help='Default using tight type for retrieval.') parser.add_argument('--expand_msrvtt_sentences', action='store_true', help='') parser.add_argument('--train_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.') parser.add_argument('--eval_frame_order', type=int, default=0, choices=[0, 1, 2], help='Frame order, 0: ordinary order; 1: reverse order; 2: random order.') parser.add_argument('--freeze_layer_num', type=int, default=0, help='Layer NO. of CLIP need to freeze.') parser.add_argument('--slice_framepos', type=int, default=0, choices=[0, 1, 2], help='0: cut from head frames; 1: cut from tail frames; 2: extract frames uniformly.') parser.add_argument('--linear_patch', type=str, default='2d', choices=['2d', '3d'], help='linear projection of flattened patches.') parser.add_argument('--sim_header', type=str, default='meanP', choices=['meanP', 'seqLSTM', 'seqTransf', 'tightTransf', 'BTransf', 'denseTransf'], help='choice a similarity header.') parser.add_argument('--loss', type=str, default='CrossEn', choices=['CrossEn']) parser.add_argument('--K', type=int, default=16) parser.add_argument('--stage_num', type=int, default=5) parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--lamd', type=float, default=1) parser.add_argument('--beta', type=float, default=1) parser.add_argument('--num_labels', type=int, default=1000) args = parser.parse_args() if (args.sim_header == 'tightTransf'): args.loose_type = False if (args.gradient_accumulation_steps < 1): raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps)) if ((not args.do_train) and (not args.do_eval)): raise ValueError('At least one of `do_train` or `do_eval` must be True.') args.batch_size = int((args.batch_size / args.gradient_accumulation_steps)) return args
def set_seed_logger(args): global logger random.seed(args.seed) os.environ['PYTHONHASHSEED'] = str(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True world_size = torch.distributed.get_world_size() torch.cuda.set_device(args.local_rank) args.world_size = world_size rank = torch.distributed.get_rank() args.rank = rank if (not os.path.exists(args.output_dir)): os.makedirs(args.output_dir, exist_ok=True) logger = get_logger(os.path.join(args.output_dir, 'log.txt')) if (args.local_rank == 0): logger.info('Effective parameters:') for key in sorted(args.__dict__): logger.info(' <<< {}: {}'.format(key, args.__dict__[key])) return args
def init_device(args, local_rank): global logger device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'), local_rank) n_gpu = torch.cuda.device_count() logger.info('device: {} n_gpu: {}'.format(device, n_gpu)) args.n_gpu = n_gpu if (((args.batch_size % args.n_gpu) != 0) or ((args.batch_size_val % args.n_gpu) != 0)): raise ValueError('Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0'.format(args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu)) return (device, n_gpu)
def init_model(args, device, n_gpu, local_rank): if args.init_model: model_state_dict = torch.load(args.init_model, map_location='cpu') else: model_state_dict = None cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')) model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) model.to(device) return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.0): if hasattr(model, 'module'): model = model.module param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] decay_param_tp = [(n, p) for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))] no_decay_param_tp = [(n, p) for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))] decay_clip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' in n)] decay_noclip_param_tp = [(n, p) for (n, p) in decay_param_tp if ('clip.' not in n)] no_decay_clip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' in n)] no_decay_noclip_param_tp = [(n, p) for (n, p) in no_decay_param_tp if ('clip.' not in n)] weight_decay = 0.2 optimizer_grouped_parameters = [{'params': [p for (n, p) in decay_clip_param_tp], 'weight_decay': weight_decay, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in decay_noclip_param_tp], 'weight_decay': weight_decay}, {'params': [p for (n, p) in no_decay_clip_param_tp], 'weight_decay': 0.0, 'lr': (args.lr * coef_lr)}, {'params': [p for (n, p) in no_decay_noclip_param_tp], 'weight_decay': 0.0}] scheduler = None optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion, schedule='warmup_cosine', b1=0.9, b2=0.98, e=1e-06, t_total=num_train_optimization_steps, weight_decay=weight_decay, max_grad_norm=1.0) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True) return (optimizer, scheduler, model)
def dataloader_msrvtt_train(args, tokenizer): msrvtt_dataset = MSRVTT_TrainDataLoader(jsonl_path=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels) train_sampler = torch.utils.data.distributed.DistributedSampler(msrvtt_dataset) dataloader = DataLoader(msrvtt_dataset, batch_size=(args.batch_size // args.n_gpu), num_workers=args.num_thread_reader, pin_memory=True, shuffle=(train_sampler is None), sampler=train_sampler, drop_last=True) return (dataloader, len(msrvtt_dataset), train_sampler)
def dataloader_msrvtt_test(args, tokenizer): msrvtt_testset = MSRVTT_DataLoader(jsonl_path=args.val_csv, train_jsonl=args.train_csv, ans2label_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, unfold_sentences=args.expand_msrvtt_sentences, frame_order=args.train_frame_order, slice_framepos=args.slice_framepos, use_num=args.num_labels) dataloader_msrvtt = DataLoader(msrvtt_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False) return (dataloader_msrvtt, len(msrvtt_testset))
def save_model(epoch, args, model, type_name=''): model_to_save = (model.module if hasattr(model, 'module') else model) output_model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch)) torch.save(model_to_save.state_dict(), output_model_file) logger.info('Model saved to %s', output_model_file) return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None): if ((model_file is None) or (len(model_file) == 0)): model_file = os.path.join(args.output_dir, 'pytorch_model.bin.{}'.format(epoch)) if os.path.exists(model_file): model_state_dict = torch.load(model_file, map_location='cpu') if (args.local_rank == 0): logger.info('Model loaded from %s', model_file) cache_dir = (args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')) model = EMCL4QA.from_pretrained(args.cross_model, cache_dir=cache_dir, state_dict=model_state_dict, task_config=args) model.to(device) else: model = None return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, tokenizer=ClipTokenizer()): global logger torch.cuda.empty_cache() model.train() log_step = args.n_display start_time = time.time() total_loss = 0 for (step, batch) in enumerate(train_dataloader): if (n_gpu == 1): batch = tuple((t.to(device=device, non_blocking=True) for t in batch)) (input_ids, input_mask, segment_ids, video, video_mask, labels) = batch ce_loss = model(input_ids, segment_ids, input_mask, video, video_mask, labels) if (n_gpu > 1): ce_loss = ce_loss.mean() if (args.gradient_accumulation_steps > 1): ce_loss = (ce_loss / args.gradient_accumulation_steps) loss = ce_loss loss.backward() total_loss += float(loss) if (((step + 1) % args.gradient_accumulation_steps) == 0): torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) if (scheduler is not None): scheduler.step() optimizer.step() optimizer.zero_grad() if hasattr(model, 'module'): torch.clamp_(model.module.clip.logit_scale.data, max=np.log(100)) else: torch.clamp_(model.clip.logit_scale.data, max=np.log(100)) global_step += 1 if (((global_step % log_step) == 0) and (local_rank == 0)): logger.info('Epoch: %d/%s, Step: %d/%d, Lr: %s, CeLoss: %f, Time/step: %f', (epoch + 1), args.epochs, (step + 1), len(train_dataloader), '-'.join([str(('%.9f' % itm)) for itm in sorted(list(set(optimizer.get_lr())))]), float(ce_loss), ((time.time() - start_time) / (log_step * args.gradient_accumulation_steps))) start_time = time.time() total_loss = (total_loss / len(train_dataloader)) return (total_loss, global_step)
def eval_epoch(args, model, test_dataloader, device, n_gpu): top1 = AverageMeter() top5 = AverageMeter() if hasattr(model, 'module'): model = model.module.to(device) else: model = model.to(device) model.eval() with torch.no_grad(): for (bid, batch) in enumerate(test_dataloader): batch = tuple((t.to(device) for t in batch)) (input_ids, input_mask, segment_ids, video, video_mask, labels) = batch output = model(input_ids, segment_ids, input_mask, video, video_mask, labels) (prec1, prec5) = accuracy(output, labels, topk=(1, 5)) top1.update(prec1[0], input_ids.size(0)) top5.update(prec5[0], input_ids.size(0)) print('{}/{}\r'.format(bid, len(test_dataloader)), end='') logger.info('Video QA:') logger.info('\t>>> Prec@1: {top1.avg:.3f} - Prec@5: {top5.avg:.3f}'.format(top1=top1, top5=top5)) R1 = top1.avg return R1
def main(): global logger args = get_args() args = set_seed_logger(args) (device, n_gpu) = init_device(args, args.local_rank) tokenizer = ClipTokenizer() assert (args.task_type == 'retrieval') args.num_labels = 1500 model = init_model(args, device, n_gpu, args.local_rank) assert ((args.freeze_layer_num <= 12) and (args.freeze_layer_num >= (- 1))) if (hasattr(model, 'clip') and (args.freeze_layer_num > (- 1))): for (name, param) in model.clip.named_parameters(): if ((name.find('ln_final.') == 0) or (name.find('text_projection') == 0) or (name.find('logit_scale') == 0) or (name.find('visual.ln_post.') == 0) or (name.find('visual.proj') == 0)): continue elif ((name.find('visual.transformer.resblocks.') == 0) or (name.find('transformer.resblocks.') == 0)): layer_num = int(name.split('.resblocks.')[1].split('.')[0]) if (layer_num >= args.freeze_layer_num): continue if ((args.linear_patch == '3d') and name.find('conv2.')): continue else: param.requires_grad = False assert (args.datatype in DATALOADER_DICT) (test_dataloader, test_length) = DATALOADER_DICT[args.datatype]['test'](args, tokenizer) if (DATALOADER_DICT[args.datatype]['val'] is not None): (val_dataloader, val_length) = DATALOADER_DICT[args.datatype]['val'](args, tokenizer, subset='val') else: (val_dataloader, val_length) = (test_dataloader, test_length) if (args.local_rank == 0): logger.info('***** Running test *****') logger.info(' Num examples = %d', test_length) logger.info(' Batch size = %d', args.batch_size_val) logger.info(' Num steps = %d', len(test_dataloader)) logger.info('***** Running val *****') logger.info(' Num examples = %d', val_length) if args.do_train: (train_dataloader, train_length, train_sampler) = DATALOADER_DICT[args.datatype]['train'](args, tokenizer) num_train_optimization_steps = ((int(((len(train_dataloader) + args.gradient_accumulation_steps) - 1)) / args.gradient_accumulation_steps) * args.epochs) coef_lr = args.coef_lr (optimizer, scheduler, model) = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr) if (args.local_rank == 0): logger.info('***** Running training *****') logger.info(' Num examples = %d', train_length) logger.info(' Batch size = %d', args.batch_size) logger.info(' Num steps = %d', (num_train_optimization_steps * args.gradient_accumulation_steps)) best_score = 1e-05 best_output_model_file = 'None' global_step = 0 for epoch in range(args.epochs): if ((epoch == 0) and (args.local_rank == 0) and 0): logger.info('Eval first') output_model_file = None R1 = eval_epoch(args, model, test_dataloader, device, n_gpu) if (best_score <= R1): best_score = R1 best_output_model_file = output_model_file logger.info('The best model is: {}, the R1 is: {:.4f}'.format(best_output_model_file, best_score)) train_sampler.set_epoch(epoch) (tr_loss, global_step) = train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=args.local_rank, tokenizer=tokenizer) if (args.local_rank == 0): logger.info('Epoch %d/%s Finished, Train Loss: %f', (epoch + 1), args.epochs, tr_loss) output_model_file = None logger.info('Eval on val dataset') R1 = eval_epoch(args, model, val_dataloader, device, n_gpu) if (best_score <= R1): best_score = R1 best_output_model_file = output_model_file logger.info('The best model is: {}, the R1 is: {:.4f}'.format(best_output_model_file, best_score)) elif args.do_eval: if (args.local_rank == 0): eval_epoch(args, model, test_dataloader, device, n_gpu)
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
def accuracy(output, target, topk=(1,)): 'Computes the precision@k for the specified values of k' with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, (- 1)).expand_as(pred)).contiguous() res = [] for k in topk: correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / batch_size))) return res
def url_to_filename(url: str, etag: str=None) -> str: "\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n " url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += ('.' + etag_hash.hexdigest()) return filename
def filename_to_url(filename: str, cache_dir: Union[(str, Path)]=None) -> Tuple[(str, str)]: '\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.\n ' if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if (not os.path.exists(cache_path)): raise FileNotFoundError('file {} not found'.format(cache_path)) meta_path = (cache_path + '.json') if (not os.path.exists(meta_path)): raise FileNotFoundError('file {} not found'.format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return (url, etag)
def cached_path(url_or_filename: Union[(str, Path)], cache_dir: Union[(str, Path)]=None) -> str: "\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n " if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if (parsed.scheme in ('http', 'https', 's3')): return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): return url_or_filename elif (parsed.scheme == ''): raise FileNotFoundError('file {} not found'.format(url_or_filename)) else: raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
def split_s3_path(url: str) -> Tuple[(str, str)]: 'Split a full s3 path into the bucket name and path.' parsed = urlparse(url) if ((not parsed.netloc) or (not parsed.path)): raise ValueError('bad s3 path {}'.format(url)) bucket_name = parsed.netloc s3_path = parsed.path if s3_path.startswith('/'): s3_path = s3_path[1:] return (bucket_name, s3_path)
def s3_request(func: Callable): '\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n ' @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if (int(exc.response['Error']['Code']) == 404): raise FileNotFoundError('file {} not found'.format(url)) else: raise return wrapper
@s3_request def s3_etag(url: str) -> Optional[str]: 'Check ETag on S3 object.' s3_resource = boto3.resource('s3') (bucket_name, s3_path) = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag
@s3_request def s3_get(url: str, temp_file: IO) -> None: 'Pull a file directly from S3.' s3_resource = boto3.resource('s3') (bucket_name, s3_path) = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None: req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = (int(content_length) if (content_length is not None) else None) progress = tqdm(unit='B', total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: progress.update(len(chunk)) temp_file.write(chunk) progress.close()
def get_from_cache(url: str, cache_dir: Union[(str, Path)]=None) -> str: "\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n " if (cache_dir is None): cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) if url.startswith('s3://'): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if (response.status_code != 200): raise IOError('HEAD request failed for url {} with status code {}'.format(url, response.status_code)) etag = response.headers.get('ETag') filename = url_to_filename(url, etag) cache_path = os.path.join(cache_dir, filename) if (not os.path.exists(cache_path)): with tempfile.NamedTemporaryFile() as temp_file: logger.info('%s not found in cache, downloading to %s', url, temp_file.name) if url.startswith('s3://'): s3_get(url, temp_file) else: http_get(url, temp_file) temp_file.flush() temp_file.seek(0) logger.info('copying %s to cache at %s', temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info('creating metadata file for %s', cache_path) meta = {'url': url, 'etag': etag} meta_path = (cache_path + '.json') with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info('removing temp file %s', temp_file.name) return cache_path
def read_set_from_file(filename: str) -> Set[str]: '\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n ' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection
def get_file_extension(path: str, dot=True, lower: bool=True): ext = os.path.splitext(path)[1] ext = (ext if dot else ext[1:]) return (ext.lower() if lower else ext)
class CrossEn(nn.Module): def __init__(self, config=None): super(CrossEn, self).__init__() def forward(self, sim_matrix): logpt = F.log_softmax(sim_matrix, dim=(- 1)) logpt = th.diag(logpt) nce_loss = (- logpt) sim_loss = nce_loss.mean() return sim_loss
class InfoNceLoss(nn.Module): 'Implementation of the noise-constrastive estimation loss.' def __init__(self): super().__init__() self.loss = th.nn.CrossEntropyLoss(reduction='mean') def forward(self, x): n = x.size()[0] target = th.arange(n) if x.is_cuda: target = target.cuda() return (self.loss(x, target) + self.loss(th.transpose(x, 0, 1), target))
class MaxMarginRankingLoss(nn.Module): 'Implementation of the Max-margin ranking loss.' def __init__(self, margin=1, fix_norm=True): super().__init__() self.fix_norm = fix_norm self.loss = th.nn.MarginRankingLoss(margin) self.margin = margin def forward(self, x): n = x.size()[0] x1 = th.diag(x) x1 = x1.unsqueeze(1) x1 = x1.expand(n, n) x1 = x1.contiguous().view((- 1), 1) x1 = th.cat((x1, x1), 0) x2 = x.view((- 1), 1) x3 = x.transpose(0, 1).contiguous().view((- 1), 1) x2 = th.cat((x2, x3), 0) max_margin = F.relu((self.margin - (x1 - x2))) if self.fix_norm: keep = (th.ones(x.shape) - th.eye(x.shape[0])) keep1 = keep.view((- 1), 1) keep2 = keep.transpose(0, 1).contiguous().view((- 1), 1) keep_idx = th.nonzero(th.cat((keep1, keep2), 0).flatten()).flatten() if x1.is_cuda: keep_idx = keep_idx.cuda() x1_ = th.index_select(x1, dim=0, index=keep_idx) x2_ = th.index_select(x2, dim=0, index=keep_idx) max_margin = F.relu((self.margin - (x1_ - x2_))) return max_margin.mean()
def warmup_cosine(x, warmup=0.002): if (x < warmup): return (x / warmup) return (0.5 * (1.0 + math.cos((math.pi * x))))