code
stringlengths
17
6.64M
def extract_features_wrapper(paths, path2gt, model='vggish', save_as=False): 'Wrapper function for extracting features (MusiCNN, VGGish or OpenL3) per batch.\n If a save_as string argument is passed, the features wiil be saved in \n the specified file.\n ' if (model == 'vggish'): feature_extractor = extract_vggish_features elif ((model == 'openl3') or (model == 'musicnn')): feature_extractor = extract_other_features else: raise NotImplementedError('Current implementation only supports MusiCNN, VGGish and OpenL3 features') batch_size = config['batch_size'] first_batch = True for batch_id in tqdm(range(ceil((len(paths) / batch_size)))): batch_paths = paths[(batch_id * batch_size):((batch_id + 1) * batch_size)] [x, y, refs] = feature_extractor(batch_paths, path2gt, model) if first_batch: [X, Y, IDS] = [x, y, refs] first_batch = False else: X = np.concatenate((X, x), axis=0) Y = np.concatenate((Y, y), axis=0) IDS = np.concatenate((IDS, refs), axis=0) if save_as: audio_representations_folder = (DATA_FOLDER + 'audio_representations/') if (not os.path.exists(audio_representations_folder)): os.makedirs(audio_representations_folder) np.savez((audio_representations_folder + save_as), X=X, Y=Y, IDS=IDS) print('Audio features stored: ', save_as) return [X, Y, IDS]
def load_path2gt(paths_file, config): ' Given the path, construct the ground truth vectors.\n This function heavily relies on path2gt_datasets(.),\n where the relation between the path and ground truth\n are defined.\n ' paths = list() path2gt = dict() path2onehot = dict() pf = open(paths_file) for path in pf.readlines(): path = path.rstrip('\n') paths.append(path) label = path2gt_datasets(path, config['dataset']) path2gt[path] = label path2onehot[path] = label2onehot(label, config['num_classes_dataset']) return (paths, path2gt, path2onehot)
def label2onehot(label, num_classes): ' Convert class label to one hot vector.\n Example: label2onehot(label=2, num_classes=5) > array([0., 0., 1., 0., 0.])\n ' onehot = np.zeros(num_classes) onehot[label] = 1 return onehot
def path2gt_datasets(path, dataset): ' Given the audio path, it returns the ground truth label.\n Define HERE a new dataset to employ this code with other data.\n ' if (dataset == 'GTZAN'): if ('blues' in path): return 0 elif ('classical' in path): return 1 elif ('country' in path): return 2 elif ('disco' in path): return 3 elif ('hiphop' in path): return 4 elif ('jazz' in path): return 5 elif ('metal' in path): return 6 elif ('pop' in path): return 7 elif ('reggae' in path): return 8 elif ('rock' in path): return 9 else: print((('Did not find the corresponding ground truth (' + str(path)) + ')!')) else: print((('Did not find the implementation of ' + str(dataset)) + ' dataset!'))
def matrix_visualization(matrix, title=None): ' Visualize 2D matrices like spectrograms or feature maps.\n ' plt.figure() plt.imshow(np.flipud(matrix.T), interpolation=None) plt.colorbar() if (title != None): plt.title(title) plt.show()
def wavefile_to_waveform(wav_file, features_type): (data, sr) = sf.read(wav_file) if (features_type == 'vggish'): tmp_name = (str(int((np.random.rand(1) * 1000000))) + '.wav') sf.write(tmp_name, data, sr, subtype='PCM_16') (sr, wav_data) = wavfile.read(tmp_name) os.remove(tmp_name) assert (wav_data.dtype == np.int16), ('Bad sample type: %r' % wav_data.dtype) data = (wav_data / 32768.0) src_repeat = data while (src_repeat.shape[0] < sr): src_repeat = np.concatenate((src_repeat, data), axis=0) data = src_repeat[:sr] return (data, sr)
def waveform_to_examples(data, sample_rate): 'Converts audio waveform into an array of examples for VGGish.\n\n Args:\n data: np.array of either one dimension (mono) or two dimensions\n (multi-channel, with the outer dimension representing channels).\n Each sample is generally expected to lie in the range [-1.0, +1.0],\n although this is not required.\n sample_rate: Sample rate of data.\n\n Returns:\n 3-D np.array of shape [num_examples, num_frames, num_bands] which represents\n a sequence of examples, each of which contains a patch of log mel\n spectrogram, covering num_frames frames of audio and num_bands mel frequency\n bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.\n ' if (len(data.shape) > 1): data = np.mean(data, axis=1) if (sample_rate != vggish_params.SAMPLE_RATE): data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE) log_mel = mel_features.log_mel_spectrogram(data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ) features_sample_rate = (1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS) example_window_length = int(round((vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))) example_hop_length = int(round((vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))) log_mel_examples = mel_features.frame(log_mel, window_length=example_window_length, hop_length=example_hop_length) return log_mel_examples
def wavfile_to_examples(wav_file): 'Convenience wrapper around waveform_to_examples() for a common WAV format.\n\n Args:\n wav_file: String path to a file, or a file-like object. The file\n is assumed to contain WAV audio data with signed 16-bit PCM samples.\n\n Returns:\n See waveform_to_examples.\n ' (samples, sr) = wavefile_to_waveform(wav_file, 'vggish') return waveform_to_examples(samples, sr)
def define_vggish_slim(training=False): "Defines the VGGish TensorFlow model.\n\n All ops are created in the current default graph, under the scope 'vggish/'.\n\n The input is a placeholder named 'vggish/input_features' of type float32 and\n shape [batch_size, num_frames, num_bands] where batch_size is variable and\n num_frames and num_bands are constants, and [num_frames, num_bands] represents\n a log-mel-scale spectrogram patch covering num_bands frequency bands and\n num_frames time frames (where each frame step is usually 10ms). This is\n produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET).\n The output is an op named 'vggish/embedding' which produces the activations of\n a 128-D embedding layer, which is usually the penultimate layer when used as\n part of a full model with a final classifier layer.\n\n Args:\n training: If true, all parameters are marked trainable.\n\n Returns:\n The op 'vggish/embeddings'.\n " with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=params.INIT_STDDEV), biases_initializer=tf.zeros_initializer(), activation_fn=tf.nn.relu, trainable=training), slim.arg_scope([slim.conv2d], kernel_size=[3, 3], stride=1, padding='SAME'), slim.arg_scope([slim.max_pool2d], kernel_size=[2, 2], stride=2, padding='SAME'), tf.variable_scope('vggish'): features = tf.placeholder(tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS), name='input_features') net = tf.reshape(features, [(- 1), params.NUM_FRAMES, params.NUM_BANDS, 1]) net = slim.conv2d(net, 64, scope='conv1') net = slim.max_pool2d(net, scope='pool1') net = slim.conv2d(net, 128, scope='conv2') net = slim.max_pool2d(net, scope='pool2') net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3') net = slim.max_pool2d(net, scope='pool3') net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4') net = slim.max_pool2d(net, scope='pool4') net = slim.flatten(net) net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1') net = slim.fully_connected(net, params.EMBEDDING_SIZE, scope='fc2') return tf.identity(net, name='embedding')
def load_vggish_slim_checkpoint(session, checkpoint_path): 'Loads a pre-trained VGGish-compatible checkpoint.\n\n This function can be used as an initialization function (referred to as\n init_fn in TensorFlow documentation) which is called in a Session after\n initializating all variables. When used as an init_fn, this will load\n a pre-trained checkpoint that is compatible with the VGGish model\n definition. Only variables defined by VGGish will be loaded.\n\n Args:\n session: an active TensorFlow session.\n checkpoint_path: path to a file containing a checkpoint that is\n compatible with the VGGish model definition.\n ' with tf.Graph().as_default(): define_vggish_slim(training=False) vggish_var_names = [v.name for v in tf.global_variables()] vggish_vars = [v for v in tf.global_variables() if (v.name in vggish_var_names)] saver = tf.train.Saver(vggish_vars, name='vggish_load_pretrained', write_version=1) saver.restore(session, checkpoint_path)
def data_loader(data_name, miss_rate): 'Loads datasets and introduce missingness.\n \n Args:\n - data_name: letter, spam, or mnist\n - miss_rate: the probability of missing components\n \n Returns:\n data_x: original data\n miss_data_x: data with missing values\n data_m: indicator matrix for missing components\n ' if (data_name in ['letter', 'spam']): file_name = (('data/' + data_name) + '.csv') data_x = np.loadtxt(file_name, delimiter=',', skiprows=1) elif (data_name == 'mnist'): ((data_x, _), _) = mnist.load_data() data_x = np.reshape(np.asarray(data_x), [60000, (28 * 28)]).astype(float) (no, dim) = data_x.shape data_m = binary_sampler((1 - miss_rate), no, dim) miss_data_x = data_x.copy() miss_data_x[(data_m == 0)] = np.nan return (data_x, miss_data_x, data_m)
def main(args): 'Main function for UCI letter and spam datasets.\n \n Args:\n - data_name: letter or spam\n - miss_rate: probability of missing components\n - batch:size: batch size\n - hint_rate: hint rate\n - alpha: hyperparameter\n - iterations: iterations\n \n Returns:\n - imputed_data_x: imputed data\n - rmse: Root Mean Squared Error\n ' data_name = args.data_name miss_rate = args.miss_rate gain_parameters = {'batch_size': args.batch_size, 'hint_rate': args.hint_rate, 'alpha': args.alpha, 'iterations': args.iterations} (ori_data_x, miss_data_x, data_m) = data_loader(data_name, miss_rate) imputed_data_x = gain(miss_data_x, gain_parameters) rmse = rmse_loss(ori_data_x, imputed_data_x, data_m) print() print(('RMSE Performance: ' + str(np.round(rmse, 4)))) return (imputed_data_x, rmse)
def vime_self(x_unlab, p_m, alpha, parameters): 'Self-supervised learning part in VIME.\n \n Args:\n x_unlab: unlabeled feature\n p_m: corruption probability\n alpha: hyper-parameter to control the weights of feature and mask losses\n parameters: epochs, batch_size\n \n Returns:\n encoder: Representation learning block\n ' (_, dim) = x_unlab.shape epochs = parameters['epochs'] batch_size = parameters['batch_size'] inputs = Input(shape=(dim,)) h = Dense(int(dim), activation='relu')(inputs) output_1 = Dense(dim, activation='sigmoid', name='mask')(h) output_2 = Dense(dim, activation='sigmoid', name='feature')(h) model = Model(inputs=inputs, outputs=[output_1, output_2]) model.compile(optimizer='rmsprop', loss={'mask': 'binary_crossentropy', 'feature': 'mean_squared_error'}, loss_weights={'mask': 1, 'feature': alpha}) m_unlab = mask_generator(p_m, x_unlab) (m_label, x_tilde) = pretext_generator(m_unlab, x_unlab) model.fit(x_tilde, {'mask': m_label, 'feature': x_unlab}, epochs=epochs, batch_size=batch_size) layer_name = model.layers[1].name layer_output = model.get_layer(layer_name).output encoder = models.Model(inputs=model.input, outputs=layer_output) return encoder
def MinMaxScaler(data): 'Min Max normalizer.\n \n Args:\n - data: original data\n \n Returns:\n - norm_data: normalized data\n ' numerator = (data - np.min(data, 0)) denominator = (np.max(data, 0) - np.min(data, 0)) norm_data = (numerator / (denominator + 1e-07)) return norm_data
def sine_data_generation(no, seq_len, dim): 'Sine data generation.\n \n Args:\n - no: the number of samples\n - seq_len: sequence length of the time-series\n - dim: feature dimensions\n \n Returns:\n - data: generated data\n ' data = list() for i in range(no): temp = list() for k in range(dim): freq = np.random.uniform(0, 0.1) phase = np.random.uniform(0, 0.1) temp_data = [np.sin(((freq * j) + phase)) for j in range(seq_len)] temp.append(temp_data) temp = np.transpose(np.asarray(temp)) temp = ((temp + 1) * 0.5) data.append(temp) return data
def real_data_loading(data_name, seq_len): 'Load and preprocess real-world datasets.\n \n Args:\n - data_name: stock or energy\n - seq_len: sequence length\n \n Returns:\n - data: preprocessed data.\n ' assert (data_name in ['stock', 'energy']) if (data_name == 'stock'): ori_data = np.loadtxt('data/stock_data.csv', delimiter=',', skiprows=1) elif (data_name == 'energy'): ori_data = np.loadtxt('data/energy_data.csv', delimiter=',', skiprows=1) ori_data = ori_data[::(- 1)] ori_data = MinMaxScaler(ori_data) temp_data = [] for i in range(0, (len(ori_data) - seq_len)): _x = ori_data[i:(i + seq_len)] temp_data.append(_x) idx = np.random.permutation(len(temp_data)) data = [] for i in range(len(temp_data)): data.append(temp_data[idx[i]]) return data
def main(args): 'Main function for timeGAN experiments.\n \n Args:\n - data_name: sine, stock, or energy\n - seq_len: sequence length\n - Network parameters (should be optimized for different datasets)\n - module: gru, lstm, or lstmLN\n - hidden_dim: hidden dimensions\n - num_layer: number of layers\n - iteration: number of training iterations\n - batch_size: the number of samples in each batch\n - metric_iteration: number of iterations for metric computation\n \n Returns:\n - ori_data: original data\n - generated_data: generated synthetic data\n - metric_results: discriminative and predictive scores\n ' if (args.data_name in ['stock', 'energy']): ori_data = real_data_loading(args.data_name, args.seq_len) elif (args.data_name == 'sine'): (no, dim) = (10000, 5) ori_data = sine_data_generation(no, args.seq_len, dim) print((args.data_name + ' dataset is ready.')) parameters = dict() parameters['module'] = args.module parameters['hidden_dim'] = args.hidden_dim parameters['num_layer'] = args.num_layer parameters['iterations'] = args.iteration parameters['batch_size'] = args.batch_size generated_data = timegan(ori_data, parameters) print('Finish Synthetic Data Generation') metric_results = dict() discriminative_score = list() for _ in range(args.metric_iteration): temp_disc = discriminative_score_metrics(ori_data, generated_data) discriminative_score.append(temp_disc) metric_results['discriminative'] = np.mean(discriminative_score) predictive_score = list() for tt in range(args.metric_iteration): temp_pred = predictive_score_metrics(ori_data, generated_data) predictive_score.append(temp_pred) metric_results['predictive'] = np.mean(predictive_score) visualization(ori_data, generated_data, 'pca') visualization(ori_data, generated_data, 'tsne') print(metric_results) return (ori_data, generated_data, metric_results)
def discriminative_score_metrics(ori_data, generated_data): 'Use post-hoc RNN to classify original data and synthetic data\n \n Args:\n - ori_data: original data\n - generated_data: generated synthetic data\n \n Returns:\n - discriminative_score: np.abs(classification accuracy - 0.5)\n ' tf.reset_default_graph() (no, seq_len, dim) = np.asarray(ori_data).shape (ori_time, ori_max_seq_len) = extract_time(ori_data) (generated_time, generated_max_seq_len) = extract_time(ori_data) max_seq_len = max([ori_max_seq_len, generated_max_seq_len]) hidden_dim = int((dim / 2)) iterations = 2000 batch_size = 128 X = tf.placeholder(tf.float32, [None, max_seq_len, dim], name='myinput_x') X_hat = tf.placeholder(tf.float32, [None, max_seq_len, dim], name='myinput_x_hat') T = tf.placeholder(tf.int32, [None], name='myinput_t') T_hat = tf.placeholder(tf.int32, [None], name='myinput_t_hat') def discriminator(x, t): 'Simple discriminator function.\n \n Args:\n - x: time-series data\n - t: time information\n \n Returns:\n - y_hat_logit: logits of the discriminator output\n - y_hat: discriminator output\n - d_vars: discriminator variables\n ' with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as vs: d_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_dim, activation=tf.nn.tanh, name='d_cell') (d_outputs, d_last_states) = tf.nn.dynamic_rnn(d_cell, x, dtype=tf.float32, sequence_length=t) y_hat_logit = tf.contrib.layers.fully_connected(d_last_states, 1, activation_fn=None) y_hat = tf.nn.sigmoid(y_hat_logit) d_vars = [v for v in tf.all_variables() if v.name.startswith(vs.name)] return (y_hat_logit, y_hat, d_vars) (y_logit_real, y_pred_real, d_vars) = discriminator(X, T) (y_logit_fake, y_pred_fake, _) = discriminator(X_hat, T_hat) d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit_real, labels=tf.ones_like(y_logit_real))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_logit_fake, labels=tf.zeros_like(y_logit_fake))) d_loss = (d_loss_real + d_loss_fake) d_solver = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars) sess = tf.Session() sess.run(tf.global_variables_initializer()) (train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat) = train_test_divide(ori_data, generated_data, ori_time, generated_time) for itt in range(iterations): (X_mb, T_mb) = batch_generator(train_x, train_t, batch_size) (X_hat_mb, T_hat_mb) = batch_generator(train_x_hat, train_t_hat, batch_size) (_, step_d_loss) = sess.run([d_solver, d_loss], feed_dict={X: X_mb, T: T_mb, X_hat: X_hat_mb, T_hat: T_hat_mb}) (y_pred_real_curr, y_pred_fake_curr) = sess.run([y_pred_real, y_pred_fake], feed_dict={X: test_x, T: test_t, X_hat: test_x_hat, T_hat: test_t_hat}) y_pred_final = np.squeeze(np.concatenate((y_pred_real_curr, y_pred_fake_curr), axis=0)) y_label_final = np.concatenate((np.ones([len(y_pred_real_curr)]), np.zeros([len(y_pred_fake_curr)])), axis=0) acc = accuracy_score(y_label_final, (y_pred_final > 0.5)) discriminative_score = np.abs((0.5 - acc)) return discriminative_score
class ML_ISTA(nn.Module): def __init__(self, T): super(ML_ISTA, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True) self.strd3 = 2 self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True) self.strd4 = 1 self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True) self.strd5 = 1 self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True) self.strd6 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True) self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True) self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.Wclass = nn.Linear(512, 10) self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data) self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data) self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data) self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data) def forward(self, x): gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3)) for _ in range(self.T): gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1)) gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2)) gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3)) gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4)) gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2) gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2) gammaGoal = gamma6 gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) return out
class ML_FISTA(nn.Module): def __init__(self, T): super(ML_FISTA, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True) self.strd3 = 2 self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True) self.strd4 = 1 self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True) self.strd5 = 1 self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True) self.strd6 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True) self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True) self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.Wclass = nn.Linear(512, 10) self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data) self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data) self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data) self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data) def forward(self, x): t = 1 t_prv = t gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3)) gamma3_prv = gamma3 for _ in range(self.T): t_prv = t t = float(((1 + np.sqrt((1 + (4 * (t_prv ** 2))))) / 2)) Z = (gamma3 + (((t_prv - 1) / t) * (gamma3 - gamma3_prv))) gamma3_prv = gamma3 gamma2 = F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1)) gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2)) gamma3 = F.relu(((Z - (self.c3 * F.conv2d((F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3)) gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4)) gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2) gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2) gammaGoal = gamma6 gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) return out
class ML_LISTA_NET(nn.Module): def __init__(self, T): super(ML_LISTA_NET, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True) self.strd3 = 2 self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True) self.strd4 = 1 self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True) self.strd5 = 1 self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True) self.strd6 = 1 self.B1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True) self.B2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True) self.B3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True) self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True) self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.Wclass = nn.Linear(512, 10) self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data) self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data) self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data) self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data) self.B1.data = ((0.1 / np.sqrt((3 * 16))) * self.B1.data) self.B2.data = ((0.1 / np.sqrt((32 * 16))) * self.B2.data) self.B3.data = ((0.1 / np.sqrt((64 * 16))) * self.B3.data) def forward(self, x): gamma1 = F.relu((F.conv2d(x, self.B1, stride=self.strd1, padding=1) + self.b1)) gamma2 = F.relu((F.conv2d(gamma1, self.B2, stride=self.strd2, padding=1) + self.b2)) gamma3 = F.relu((F.conv2d(gamma2, self.B3, stride=self.strd3, padding=1) + self.b3)) for _ in range(self.T): gamma2 = F.conv_transpose2d(gamma3, self.B3, stride=self.strd3, padding=1) gamma1 = F.conv_transpose2d(gamma2, self.B2, stride=self.strd2, padding=1) gamma1 = F.relu((((gamma1 - F.conv2d(F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1), self.W1, stride=self.strd1, padding=1)) + F.conv2d(x, self.B1, stride=self.strd1, padding=1)) + self.b1)) gamma2 = F.relu((((gamma2 - F.conv2d(F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1), self.W2, stride=self.strd2, padding=1)) + F.conv2d(gamma1, self.B2, stride=self.strd2, padding=1)) + self.b2)) gamma3 = F.relu((((gamma3 - F.conv2d(F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1), self.W3, stride=self.strd3, padding=1)) + F.conv2d(gamma2, self.B3, stride=self.strd3, padding=1)) + self.b3)) gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4)) gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2) gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2) gammaGoal = gamma6 gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) return out
class LBP_NET(nn.Module): def __init__(self, T): super(LBP_NET, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True) self.strd3 = 2 self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True) self.strd4 = 1 self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True) self.strd5 = 1 self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True) self.strd6 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True) self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True) self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.Wclass = nn.Linear(512, 10) self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data) self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data) self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data) self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data) def forward(self, x): if (self.T == 0): gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3)) else: gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1)) for _ in range(self.T): gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2)) for _ in range(self.T): gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3)) for _ in range(self.T): gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3)) gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4)) gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2) gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2) gammaGoal = gamma6 gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) return out
class All_Free(nn.Module): def __init__(self): super(All_Free, self).__init__() m1 = 32 m2 = 64 m3 = 128 self.W1_1 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_2 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_3 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_4 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_5 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_6 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.W1_7 = nn.Parameter(((0.1 / np.sqrt((3 * 16))) * torch.randn(32, 3, 4, 4)), requires_grad=True) self.strd1 = 2 self.W2_1 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_2 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_3 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_4 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_5 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_6 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.W2_7 = nn.Parameter(((0.1 / np.sqrt((m1 * 16))) * torch.randn(64, 32, 4, 4)), requires_grad=True) self.strd2 = 2 self.W3_1 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_2 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_3 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_4 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_5 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_6 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.W3_7 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(128, 64, 4, 4)), requires_grad=True) self.strd3 = 2 self.b1_1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_2 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_3 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_4 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_5 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_6 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_7 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2_1 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_3 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_4 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_5 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_6 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_7 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3_1 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_2 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_4 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_5 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_6 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_7 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True) self.strd4 = 1 self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True) self.strd5 = 1 self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True) self.strd6 = 1 self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data) self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data) self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data) self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True) self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True) self.Wclass = nn.Linear(512, 10) def forward(self, x): gamma1 = F.relu((F.conv2d(x, self.W1_1, stride=self.strd1, padding=1) + self.b1_1)) gamma2 = F.relu((F.conv2d(gamma1, self.W2_1, stride=self.strd2, padding=1) + self.b2_1)) gamma3 = F.relu((F.conv2d(gamma2, self.W3_1, stride=self.strd3, padding=1) + self.b3_1)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_1, stride=self.strd1, padding=1) - x), self.W1_2, stride=self.strd1, padding=1)) + self.b1_2)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_1, stride=self.strd2, padding=1) - gamma1), self.W2_2, stride=self.strd2, padding=1)) + self.b2_2)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_1, stride=self.strd3, padding=1) - gamma2), self.W3_2, stride=self.strd3, padding=1)) + self.b3_2)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_2, stride=self.strd1, padding=1) - x), self.W1_3, stride=self.strd1, padding=1)) + self.b1_3)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_2, stride=self.strd2, padding=1) - gamma1), self.W2_3, stride=self.strd2, padding=1)) + self.b2_3)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_2, stride=self.strd3, padding=1) - gamma2), self.W3_3, stride=self.strd3, padding=1)) + self.b3_3)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_3, stride=self.strd1, padding=1) - x), self.W1_4, stride=self.strd1, padding=1)) + self.b1_4)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_3, stride=self.strd2, padding=1) - gamma1), self.W2_4, stride=self.strd2, padding=1)) + self.b2_4)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_3, stride=self.strd3, padding=1) - gamma2), self.W3_4, stride=self.strd3, padding=1)) + self.b3_4)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_4, stride=self.strd1, padding=1) - x), self.W1_5, stride=self.strd1, padding=1)) + self.b1_5)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_4, stride=self.strd2, padding=1) - gamma1), self.W2_5, stride=self.strd2, padding=1)) + self.b2_5)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_4, stride=self.strd3, padding=1) - gamma2), self.W3_5, stride=self.strd3, padding=1)) + self.b3_5)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_5, stride=self.strd1, padding=1) - x), self.W1_6, stride=self.strd1, padding=1)) + self.b1_6)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_5, stride=self.strd2, padding=1) - gamma1), self.W2_6, stride=self.strd2, padding=1)) + self.b2_6)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_5, stride=self.strd3, padding=1) - gamma2), self.W3_6, stride=self.strd3, padding=1)) + self.b3_6)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_6, stride=self.strd1, padding=1) - x), self.W1_7, stride=self.strd1, padding=1)) + self.b1_7)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_6, stride=self.strd2, padding=1) - gamma1), self.W2_7, stride=self.strd2, padding=1)) + self.b2_7)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_6, stride=self.strd3, padding=1) - gamma2), self.W3_7, stride=self.strd3, padding=1)) + self.b3_7)) gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4)) gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2) gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2) gammaGoal = gamma6 gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) return out
class ML_ISTA_NET(nn.Module): def __init__(self, m1, m2, m3, T): super(ML_ISTA_NET, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True) self.strd3 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.Wclass = nn.Linear(m3, 10) self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data) def forward(self, x, all_out=False): gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3)) for _ in range(self.T): gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1)) gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2)) gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3)) gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) if all_out: gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) return (out, gamma, x_Rec.detach()) else: return out
class ML_FISTA_NET(nn.Module): def __init__(self, m1, m2, m3, T): super(ML_FISTA_NET, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True) self.strd3 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.Wclass = nn.Linear(m3, 10) self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data) def forward(self, x, all_out=False): t = 1 t_prv = t gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3)) gamma3_prv = gamma3 for _ in range(self.T): t_prv = t t = float(((1 + np.sqrt((1 + (4 * (t_prv ** 2))))) / 2)) Z = (gamma3 + (((t_prv - 1) / t) * (gamma3 - gamma3_prv))) gamma3_prv = gamma3 gamma2 = F.conv_transpose2d(Z, self.W3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1)) gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2)) gamma3 = F.relu(((Z - (self.c3 * F.conv2d((F.conv_transpose2d(Z, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3)) gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) if all_out: gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) return (out, gamma, x_Rec.detach()) else: return out
class ML_LISTA_NET(nn.Module): def __init__(self, m1, m2, m3, T): super(ML_LISTA_NET, self).__init__() self.T = T self.B1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True) self.B2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True) self.B3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True) self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True) self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True) self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.strd1 = 2 self.strd2 = 2 self.strd3 = 1 self.Wclass = nn.Linear(m3, 10) self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data) self.B1.data = ((0.1 / np.sqrt(36)) * self.B1.data) self.B2.data = ((0.1 / np.sqrt((m1 * 36))) * self.B2.data) self.B3.data = ((0.1 / np.sqrt((m2 * 16))) * self.B3.data) def forward(self, x, all_out=False): gamma1 = F.relu((F.conv2d(x, self.B1, stride=self.strd1) + self.b1)) gamma2 = F.relu((F.conv2d(gamma1, self.B2, stride=self.strd2) + self.b2)) gamma3 = F.relu((F.conv2d(gamma2, self.B3, stride=self.strd3) + self.b3)) for _ in range(self.T): gamma2 = F.conv_transpose2d(gamma3, self.B3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.B2, stride=self.strd2) gamma1 = F.relu((((gamma1 - F.conv2d(F.conv_transpose2d(gamma1, self.W1, stride=self.strd1), self.W1, stride=self.strd1)) + F.conv2d(x, self.B1, stride=self.strd1)) + self.b1)) gamma2 = F.relu((((gamma2 - F.conv2d(F.conv_transpose2d(gamma2, self.W2, stride=self.strd2), self.W2, stride=self.strd2)) + F.conv2d(gamma1, self.B2, stride=self.strd2)) + self.b2)) gamma3 = F.relu((((gamma3 - F.conv2d(F.conv_transpose2d(gamma3, self.W3, stride=self.strd3), self.W3, stride=self.strd3)) + F.conv2d(gamma2, self.B3, stride=self.strd3)) + self.b3)) gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) if all_out: return (out, gamma) else: return out
class LBP_NET(nn.Module): def __init__(self, m1, m2, m3, T): super(LBP_NET, self).__init__() self.T = T self.W1 = nn.Parameter(torch.randn(m1, 1, 6, 6), requires_grad=True) self.strd1 = 2 self.W2 = nn.Parameter(torch.randn(m2, m1, 6, 6), requires_grad=True) self.strd2 = 2 self.W3 = nn.Parameter(torch.randn(m3, m2, 4, 4), requires_grad=True) self.strd3 = 1 self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True) self.b1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.Wclass = nn.Linear(m3, 10) self.W1.data = ((0.1 / np.sqrt(36)) * self.W1.data) self.W2.data = ((0.1 / np.sqrt((m1 * 36))) * self.W2.data) self.W3.data = ((0.1 / np.sqrt((m2 * 16))) * self.W3.data) def forward(self, x, all_out=False): if (self.T == 0): gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3)) else: gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1)) + self.b1)) for _ in range(self.T): gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) - x), self.W1, stride=self.strd1))) + self.b1)) gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2)) + self.b2)) for _ in range(self.T): gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) - gamma1), self.W2, stride=self.strd2))) + self.b2)) gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3)) + self.b3)) for _ in range(self.T): gamma3 = F.relu(((gamma3 - (self.c3 * F.conv2d((F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) - gamma2), self.W3, stride=self.strd3))) + self.b3)) gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) if all_out: gamma2 = F.conv_transpose2d(gamma3, self.W3, stride=self.strd3) gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2) x_Rec = F.conv_transpose2d(gamma1, self.W1, stride=self.strd1) return (out, gamma, x_Rec.detach()) else: return out
class All_Free(nn.Module): def __init__(self, m1, m2, m3): super(All_Free, self).__init__() self.W1_1 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_2 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_3 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_4 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_5 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_6 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.W1_7 = nn.Parameter(((0.1 / np.sqrt(36)) * torch.randn(m1, 1, 6, 6)), requires_grad=True) self.strd1 = 2 self.W2_1 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_2 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_3 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_4 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_5 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_6 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.W2_7 = nn.Parameter(((0.1 / np.sqrt((m1 * 36))) * torch.randn(m2, m1, 6, 6)), requires_grad=True) self.strd2 = 2 self.W3_1 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_2 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_3 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_4 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_5 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_6 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.W3_7 = nn.Parameter(((0.1 / np.sqrt((m2 * 16))) * torch.randn(m3, m2, 4, 4)), requires_grad=True) self.strd3 = 1 self.b1_1 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_2 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_3 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_4 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_5 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_6 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b1_7 = nn.Parameter(torch.zeros(1, m1, 1, 1), requires_grad=True) self.b2_1 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_2 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_3 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_4 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_5 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_6 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b2_7 = nn.Parameter(torch.zeros(1, m2, 1, 1), requires_grad=True) self.b3_1 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_2 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_3 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_4 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_5 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_6 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.b3_7 = nn.Parameter(torch.zeros(1, m3, 1, 1), requires_grad=True) self.Wclass = nn.Linear(m3, 10) def forward(self, x, all_out=False): gamma1 = F.relu((F.conv2d(x, self.W1_1, stride=self.strd1) + self.b1_1)) gamma2 = F.relu((F.conv2d(gamma1, self.W2_1, stride=self.strd2) + self.b2_1)) gamma3 = F.relu((F.conv2d(gamma2, self.W3_1, stride=self.strd3) + self.b3_1)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_1, stride=self.strd1) - x), self.W1_2, stride=self.strd1)) + self.b1_2)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_1, stride=self.strd2) - gamma1), self.W2_2, stride=self.strd2)) + self.b2_2)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_1, stride=self.strd3) - gamma2), self.W3_2, stride=self.strd3)) + self.b3_2)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_2, stride=self.strd1) - x), self.W1_3, stride=self.strd1)) + self.b1_3)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_2, stride=self.strd2) - gamma1), self.W2_3, stride=self.strd2)) + self.b2_3)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_2, stride=self.strd3) - gamma2), self.W3_3, stride=self.strd3)) + self.b3_3)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_3, stride=self.strd1) - x), self.W1_4, stride=self.strd1)) + self.b1_4)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_3, stride=self.strd2) - gamma1), self.W2_4, stride=self.strd2)) + self.b2_4)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_3, stride=self.strd3) - gamma2), self.W3_4, stride=self.strd3)) + self.b3_4)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_4, stride=self.strd1) - x), self.W1_5, stride=self.strd1)) + self.b1_5)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_4, stride=self.strd2) - gamma1), self.W2_5, stride=self.strd2)) + self.b2_5)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_4, stride=self.strd3) - gamma2), self.W3_5, stride=self.strd3)) + self.b3_5)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_5, stride=self.strd1) - x), self.W1_6, stride=self.strd1)) + self.b1_6)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_5, stride=self.strd2) - gamma1), self.W2_6, stride=self.strd2)) + self.b2_6)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_5, stride=self.strd3) - gamma2), self.W3_6, stride=self.strd3)) + self.b3_6)) gamma1 = F.relu(((gamma1 - F.conv2d((F.conv_transpose2d(gamma1, self.W1_6, stride=self.strd1) - x), self.W1_7, stride=self.strd1)) + self.b1_7)) gamma2 = F.relu(((gamma2 - F.conv2d((F.conv_transpose2d(gamma2, self.W2_6, stride=self.strd2) - gamma1), self.W2_7, stride=self.strd2)) + self.b2_7)) gamma3 = F.relu(((gamma3 - F.conv2d((F.conv_transpose2d(gamma3, self.W3_6, stride=self.strd3) - gamma2), self.W3_7, stride=self.strd3)) + self.b3_7)) gamma = gamma3.view(gamma3.shape[0], ((gamma3.shape[1] * gamma3.shape[2]) * gamma3.shape[3])) out = self.Wclass(gamma) out = F.log_softmax(out, dim=1) if all_out: return (out, gamma) else: return out
def permutation_test(tokens, key, n, k, vocab_size, n_runs=100): rng = mersenne_rng(key) xi = np.array([rng.rand() for _ in range((n * vocab_size))], dtype=np.float32).reshape(n, vocab_size) test_result = detect(tokens, n, k, xi) p_val = 0 for run in range(n_runs): xi_alternative = np.random.rand(n, vocab_size).astype(np.float32) null_result = detect(tokens, n, k, xi_alternative) p_val += (null_result <= test_result) return ((p_val + 1.0) / (n_runs + 1.0))
def detect(tokens, n, k, xi, gamma=0.0): m = len(tokens) n = len(xi) A = np.empty(((m - (k - 1)), n)) for i in range((m - (k - 1))): for j in range(n): A[i][j] = levenshtein(tokens[i:(i + k)], xi[((j + np.arange(k)) % n)], gamma) return np.min(A)
def main(args): with open(args.document, 'r') as f: text = f.read() tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) tokens = tokenizer.encode(text, return_tensors='pt', truncation=True, max_length=2048).numpy()[0] t0 = time.time() pval = permutation_test(tokens, args.key, args.n, len(tokens), len(tokenizer)) print('p-value: ', pval) print(f'(elapsed time: {(time.time() - t0)}s)')
class mersenne_rng(object): def __init__(self, seed=5489): self.state = ([0] * 624) self.f = 1812433253 self.m = 397 self.u = 11 self.s = 7 self.b = 2636928640 self.t = 15 self.c = 4022730752 self.l = 18 self.index = 624 self.lower_mask = ((1 << 31) - 1) self.upper_mask = (1 << 31) self.state[0] = seed for i in range(1, 624): self.state[i] = self.int_32(((self.f * (self.state[(i - 1)] ^ (self.state[(i - 1)] >> 30))) + i)) def twist(self): for i in range(624): temp = self.int_32(((self.state[i] & self.upper_mask) + (self.state[((i + 1) % 624)] & self.lower_mask))) temp_shift = (temp >> 1) if ((temp % 2) != 0): temp_shift = (temp_shift ^ 2567483615) self.state[i] = (self.state[((i + self.m) % 624)] ^ temp_shift) self.index = 0 def int_32(self, number): return int((4294967295 & number)) def randint(self): if (self.index >= 624): self.twist() y = self.state[self.index] y = (y ^ (y >> self.u)) y = (y ^ ((y << self.s) & self.b)) y = (y ^ ((y << self.t) & self.c)) y = (y ^ (y >> self.l)) self.index += 1 return self.int_32(y) def rand(self): return (self.randint() * (1.0 / 4294967296.0)) def randperm(self, n): p = list(range(n)) for i in range((n - 1), 0, (- 1)): j = (self.randint() % i) (p[i], p[j]) = (p[j], p[i]) return p
def substitution_attack(tokens, p, vocab_size, distribution=None): if (distribution is None): distribution = (lambda x: (torch.ones(size=(len(tokens), vocab_size)) / vocab_size)) idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] new_probs = distribution(tokens) samples = torch.multinomial(new_probs, 1).flatten() tokens[idx] = samples[idx] return tokens
def deletion_attack(tokens, p): idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] keep = torch.ones(len(tokens), dtype=torch.bool) keep[idx] = False tokens = tokens[keep] return tokens
def insertion_attack(tokens, p, vocab_size, distribution=None): if (distribution is None): distribution = (lambda x: (torch.ones(size=(len(tokens), vocab_size)) / vocab_size)) idx = torch.randperm(len(tokens))[:int((p * len(tokens)))] new_probs = distribution(tokens) samples = torch.multinomial(new_probs, 1) for i in idx.sort(descending=True).values: tokens = torch.cat([tokens[:i], samples[i], tokens[i:]]) tokens[i] = samples[i] return tokens
def permutation_test(tokens, vocab_size, n, k, seed, test_stat, n_runs=100, max_seed=100000): generator = torch.Generator() generator.manual_seed(int(seed)) test_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size) p_val = 0 for run in range(n_runs): pi = torch.randperm(vocab_size) tokens = torch.argsort(pi)[tokens] seed = torch.randint(high=max_seed, size=(1,)).item() generator.manual_seed(int(seed)) null_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size, null=True) p_val += ((null_result <= test_result).float() / n_runs) return p_val
def fast_permutation_test(tokens, vocab_size, n, k, seed, test_stat, null_results): generator = torch.Generator() generator.manual_seed(int(seed)) test_result = test_stat(tokens=tokens, n=n, k=k, generator=generator, vocab_size=vocab_size) p_val = (torch.searchsorted(null_results, test_result, right=True) / len(null_results)) return p_val
def phi(tokens, n, k, generator, key_func, vocab_size, dist, null=False, normalize=False): if null: tokens = torch.unique(tokens, return_inverse=True, sorted=False)[1] eff_vocab_size = (torch.max(tokens) + 1) else: eff_vocab_size = vocab_size (xi, pi) = key_func(generator, n, vocab_size, eff_vocab_size) tokens = torch.argsort(pi)[tokens] if normalize: tokens = (tokens.float() / vocab_size) A = adjacency(tokens, xi, dist, k) closest = torch.min(A, axis=1)[0] return torch.min(closest)
def adjacency(tokens, xi, dist, k): m = len(tokens) n = len(xi) A = torch.empty(size=((m - (k - 1)), n)) for i in range((m - (k - 1))): for j in range(n): A[i][j] = dist(tokens[i:(i + k)], xi[((j + torch.arange(k)) % n)]) return A
def gumbel_key_func(generator, n, vocab_size, eff_vocab_size=None): if (eff_vocab_size is None): eff_vocab_size = vocab_size pi = torch.arange(eff_vocab_size) xi = torch.rand((n, eff_vocab_size), generator=generator) return (xi, pi)
def gumbel_sampling(probs, pi, xi): return torch.argmax((xi ** (1 / torch.gather(probs, 1, pi))), axis=1).unsqueeze((- 1))
def gumbel_score(tokens, xi): xi_samp = torch.gather(xi, (- 1), tokens.unsqueeze((- 1))).squeeze() return (- torch.sum(torch.log((1 / (1 - xi_samp)))))
def gumbel_edit_score(tokens, xi, gamma): return gumbel_levenshtein(tokens.numpy(), xi.numpy(), gamma)
class Categories(): '\n Work with aliases from ISO 15924.\n https://en.wikipedia.org/wiki/ISO_15924#List_of_codes\n ' fpath = os.path.join(DATA_LOCATION, 'categories.json') @classmethod def _get_ranges(cls, categories): '\n :return: iter: (start code, end code)\n :rtype: list\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) for category in categories: if (category not in data['aliases']): raise ValueError('Invalid category: {}'.format(category)) for point in data['points']: if (point[2] in categories): (yield point[:2]) @classmethod def get_alphabet(cls, categories): '\n :return: set of chars in alphabet by categories list\n :rtype: set\n ' alphabet = set() for (start, end) in cls._get_ranges(categories): chars = (chr(code) for code in range(start, (end + 1))) alphabet.update(chars) return alphabet @classmethod def detect(cls, char): '\n :return: category\n :rtype: str\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) try: category = unicodedata.name(char).split()[0] except (TypeError, ValueError): pass else: if (category in data['aliases']): return category code = ord(char) for point in data['points']: if (point[0] <= code <= point[1]): return point[2] @classmethod def get_all(cls): with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) return set(data['aliases'])
class Languages(): fpath = os.path.join(DATA_LOCATION, 'languages.json') @classmethod def get_alphabet(cls, languages): '\n :return: set of chars in alphabet by languages list\n :rtype: set\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) alphabet = set() for lang in languages: if (lang not in data): raise ValueError('Invalid language code: {}'.format(lang)) alphabet.update(data[lang]) return alphabet @classmethod def detect(cls, char): '\n :return: set of languages which alphabet contains passed char.\n :rtype: set\n ' with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) languages = set() for (lang, alphabet) in data.items(): if (char in alphabet): languages.add(lang) return languages @classmethod def get_all(cls): with open(cls.fpath, encoding='utf-8') as f: data = json.load(f) return set(data.keys())
class Homoglyphs(): def __init__(self, categories=None, languages=None, alphabet=None, strategy=STRATEGY_IGNORE, ascii_strategy=STRATEGY_IGNORE, ascii_range=ASCII_RANGE): if (strategy not in (STRATEGY_LOAD, STRATEGY_IGNORE, STRATEGY_REMOVE)): raise ValueError('Invalid strategy') self.strategy = strategy self.ascii_strategy = ascii_strategy self.ascii_range = ascii_range if ((not categories) and (not languages) and (not alphabet)): categories = ('LATIN', 'COMMON') self.categories = set((categories or [])) self.languages = set((languages or [])) self.alphabet = set((alphabet or [])) if self.categories: alphabet = Categories.get_alphabet(self.categories) self.alphabet.update(alphabet) if self.languages: alphabet = Languages.get_alphabet(self.languages) self.alphabet.update(alphabet) self.table = self.get_table(self.alphabet) @staticmethod def get_table(alphabet): table = defaultdict(set) with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f: data = json.load(f) for char in alphabet: if (char in data): for homoglyph in data[char]: if (homoglyph in alphabet): table[char].add(homoglyph) return table @staticmethod def get_restricted_table(source_alphabet, target_alphabet): table = defaultdict(set) with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f: data = json.load(f) for char in source_alphabet: if (char in data): for homoglyph in data[char]: if (homoglyph in target_alphabet): table[char].add(homoglyph) return table @staticmethod def uniq_and_sort(data): result = list(set(data)) result.sort(key=(lambda x: ((- len(x)), x))) return result def _update_alphabet(self, char): langs = Languages.detect(char) if langs: self.languages.update(langs) alphabet = Languages.get_alphabet(langs) self.alphabet.update(alphabet) else: category = Categories.detect(char) if (category is None): return False self.categories.add(category) alphabet = Categories.get_alphabet([category]) self.alphabet.update(alphabet) self.table = self.get_table(self.alphabet) return True def _get_char_variants(self, char): if (char not in self.alphabet): if (self.strategy == STRATEGY_LOAD): if (not self._update_alphabet(char)): return [] elif (self.strategy == STRATEGY_IGNORE): return [char] elif (self.strategy == STRATEGY_REMOVE): return [] alt_chars = self.table.get(char, set()) if alt_chars: alt_chars2 = [self.table.get(alt_char, set()) for alt_char in alt_chars] alt_chars.update(*alt_chars2) alt_chars.add(char) return self.uniq_and_sort(alt_chars) def _get_combinations(self, text, ascii=False): variations = [] for char in text: alt_chars = self._get_char_variants(char) if ascii: alt_chars = [char for char in alt_chars if (ord(char) in self.ascii_range)] if ((not alt_chars) and (self.ascii_strategy == STRATEGY_IGNORE)): return if alt_chars: variations.append(alt_chars) if variations: for variant in product(*variations): (yield ''.join(variant)) def get_combinations(self, text): return list(self._get_combinations(text)) def _to_ascii(self, text): for variant in self._get_combinations(text, ascii=True): if (max(map(ord, variant)) in self.ascii_range): (yield variant) def to_ascii(self, text): return self.uniq_and_sort(self._to_ascii(text))
def normalization_strategy_lookup(strategy_name: str) -> object: if (strategy_name == 'unicode'): return UnicodeSanitizer() elif (strategy_name == 'homoglyphs'): return HomoglyphCanonizer() elif (strategy_name == 'truecase'): return TrueCaser()
class HomoglyphCanonizer(): 'Attempts to detect homoglyph attacks and find a consistent canon.\n\n This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).\n ' def __init__(self): self.homoglyphs = None def __call__(self, homoglyphed_str: str) -> str: (target_category, all_categories) = self._categorize_text(homoglyphed_str) homoglyph_table = self._select_canon_category_and_load(target_category, all_categories) return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str) def _categorize_text(self, text: str) -> dict: iso_categories = defaultdict(int) for char in text: iso_categories[Categories.detect(char)] += 1 target_category = max(iso_categories, key=iso_categories.get) all_categories = tuple(iso_categories) return (target_category, all_categories) @cache def _select_canon_category_and_load(self, target_category: str, all_categories: tuple[str]) -> dict: homoglyph_table = Homoglyphs(categories=(target_category, 'COMMON')) source_alphabet = Categories.get_alphabet(all_categories) restricted_table = homoglyph_table.get_restricted_table(source_alphabet, homoglyph_table.alphabet) return restricted_table def _sanitize_text(self, target_category: str, homoglyph_table: dict, homoglyphed_str: str) -> str: sanitized_text = '' for char in homoglyphed_str: cat = Categories.detect(char) if ((target_category in cat) or ('COMMON' in cat) or (len(cat) == 0)): sanitized_text += char else: sanitized_text += list(homoglyph_table[char])[0] return sanitized_text
class UnicodeSanitizer(): 'Regex-based unicode sanitzer. Has different levels of granularity.\n\n * ruleset="whitespaces" - attempts to remove only whitespace unicode characters\n * ruleset="IDN.blacklist" - does its best to remove unusual unicode based on Network.IDN.blacklist characters\n * ruleset="ascii" - brute-forces all text into ascii\n\n This is unlikely to be a comprehensive list.\n\n You can find a more comprehensive discussion at https://www.unicode.org/reports/tr36/\n and https://www.unicode.org/faq/security.html\n ' def __init__(self, ruleset='whitespaces'): if (ruleset == 'whitespaces'): 'Documentation:\n \xa0: Non-breaking space\n \u1680: Ogham space mark\n \u180e: Mongolian vowel separator\n \u2000-\u200b: Various space characters, including en space, em space, thin space, hair space, zero-width space, and zero-width non-joiner\n \u200c\u200d: Zero-width non-joiner and zero-width joiner\n \u200e,\u200f: Left-to-right-mark, Right-to-left-mark\n \u2060: Word joiner\n \u2063: Invisible separator\n \u202f: Narrow non-breaking space\n \u205f: Medium mathematical space\n \u3000: Ideographic space\n \ufeff: Zero-width non-breaking space\n ᅠ: Halfwidth hangul filler\n \ufff9\ufffa\ufffb: Interlinear annotation characters\n ︀-️: Variation selectors\n \u202a-\u202f: Embedding characters\n ㅤ: Korean hangul filler.\n\n Note that these characters are not always superfluous whitespace characters!\n ' self.pattern = re.compile('[\\u00A0\\u1680\\u180E\\u2000-\\u200B\\u200C\\u200D\\u200E\\u200F\\u2060\\u2063\\u202F\\u205F\\u3000\\uFEFF\\uFFA0\\uFFF9\\uFFFA\\uFFFB\\uFE00\\uFE01\\uFE02\\uFE03\\uFE04\\uFE05\\uFE06\\uFE07\\uFE08\\uFE09\\uFE0A\\uFE0B\\uFE0C\\uFE0D\\uFE0E\\uFE0F\\u3164\\u202A\\u202B\\u202C\\u202D\\u202E\\u202F]') elif (ruleset == 'IDN.blacklist'): 'Documentation:\n [\xa0\u1680\u180e\u2000-\u200b\u202f\u205f\u2060\u2063\ufeff]: Matches any whitespace characters in the Unicode character\n set that are included in the IDN blacklist.\n \ufff9-\ufffb: Matches characters that are not defined in Unicode but are used as language tags in various legacy encodings.\n These characters are not allowed in domain names.\n \ud800-\udb7f: Matches the first part of a surrogate pair. Surrogate pairs are used to represent characters in the Unicode character\n set that cannot be represented by a single 16-bit value. The first part of a surrogate pair is in the range U+D800 to U+DBFF,\n and the second part is in the range U+DC00 to U+DFFF.\n \udb80-\udbff][\udc00-\udfff]?: Matches the second part of a surrogate pair. The second part of a surrogate pair is in the range U+DC00\n to U+DFFF, and is optional.\n [\udb40\udc20-\udb40\udc7f][\udc00-\udfff]: Matches certain invalid UTF-16 sequences which should not appear in IDNs.\n ' self.pattern = re.compile('[\\u00A0\\u1680\\u180E\\u2000-\\u200B\\u202F\\u205F\\u2060\\u2063\\uFEFF\\uFFF9-\\uFFFB\\uD800-\\uDB7F\\uDB80-\\uDBFF][\\uDC00-\\uDFFF]?|[\\uDB40\\uDC20-\\uDB40\\uDC7F][\\uDC00-\\uDFFF]') else: 'Documentation:\n This is a simple restriction to "no-unicode", using only ascii characters. Control characters are included.\n ' self.pattern = re.compile('[^\\x00-\\x7F]+') def __call__(self, text: str) -> str: text = unicodedata.normalize('NFC', text) text = self.pattern.sub(' ', text) text = re.sub(' +', ' ', text) text = ''.join((c for c in text if (unicodedata.category(c) != 'Cc'))) return text
class TrueCaser(): 'True-casing, is a capitalization normalization that returns text to its original capitalization.\n\n This defends against attacks that wRIte TeXt lIkE spOngBoB.\n\n Here, a simple POS-tagger is used.\n ' uppercase_pos = ['PROPN'] def __init__(self, backend='spacy'): if (backend == 'spacy'): import spacy self.nlp = spacy.load('en_core_web_sm') self.normalize_fn = self._spacy_truecasing else: from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') self.normalize_fn = self._nltk_truecasing def __call__(self, random_capitalized_string: str) -> str: truecased_str = self.normalize_fn(random_capitalized_string) return truecased_str def _spacy_truecasing(self, random_capitalized_string: str): doc = self.nlp(random_capitalized_string.lower()) POS = self.uppercase_pos truecased_str = ''.join([(w.text_with_ws.capitalize() if ((w.pos_ in POS) or w.is_sent_start) else w.text_with_ws) for w in doc]) return truecased_str def _nltk_truecasing(self, random_capitalized_string: str): from nltk import pos_tag, word_tokenize import nltk nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('universal_tagset') POS = ['NNP', 'NNPS'] tagged_text = pos_tag(word_tokenize(random_capitalized_string.lower())) truecased_str = ' '.join([(w.capitalize() if (p in POS) else w) for (w, p) in tagged_text]) return truecased_str
class WatermarkBase(): def __init__(self, vocab: list[int]=None, gamma: float=0.5, delta: float=2.0, seeding_scheme: str='simple_1', hash_key: int=15485863, select_green_tokens: bool=True): self.vocab = vocab self.vocab_size = len(vocab) self.gamma = gamma self.delta = delta self.seeding_scheme = seeding_scheme self.rng = None self.hash_key = hash_key self.select_green_tokens = select_green_tokens def _seed_rng(self, input_ids: torch.LongTensor, seeding_scheme: str=None) -> None: if (seeding_scheme is None): seeding_scheme = self.seeding_scheme if (seeding_scheme == 'simple_1'): assert (input_ids.shape[(- 1)] >= 1), f'seeding_scheme={seeding_scheme} requires at least a 1 token prefix sequence to seed rng' prev_token = input_ids[(- 1)].item() self.rng.manual_seed((self.hash_key * prev_token)) else: raise NotImplementedError(f'Unexpected seeding_scheme: {seeding_scheme}') return def _get_greenlist_ids(self, input_ids: torch.LongTensor) -> list[int]: self._seed_rng(input_ids) greenlist_size = int((self.vocab_size * self.gamma)) vocab_permutation = torch.randperm(self.vocab_size, device=input_ids.device, generator=self.rng) if self.select_green_tokens: greenlist_ids = vocab_permutation[:greenlist_size] else: greenlist_ids = vocab_permutation[(self.vocab_size - greenlist_size):] return greenlist_ids
class WatermarkLogitsProcessor(WatermarkBase, LogitsProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _calc_greenlist_mask(self, scores: torch.FloatTensor, greenlist_token_ids) -> torch.BoolTensor: green_tokens_mask = torch.zeros_like(scores) for b_idx in range(len(greenlist_token_ids)): green_tokens_mask[b_idx][greenlist_token_ids[b_idx]] = 1 final_mask = green_tokens_mask.bool() return final_mask def _bias_greenlist_logits(self, scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float) -> torch.Tensor: scores[greenlist_mask] = (scores[greenlist_mask] + greenlist_bias) return scores def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: if (self.rng is None): self.rng = torch.Generator(device=input_ids.device) batched_greenlist_ids = [None for _ in range(input_ids.shape[0])] for b_idx in range(input_ids.shape[0]): greenlist_ids = self._get_greenlist_ids(input_ids[b_idx]) batched_greenlist_ids[b_idx] = greenlist_ids green_tokens_mask = self._calc_greenlist_mask(scores=scores, greenlist_token_ids=batched_greenlist_ids) scores = self._bias_greenlist_logits(scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta) return scores
class WatermarkDetector(WatermarkBase): def __init__(self, *args, device: torch.device=None, tokenizer: Tokenizer=None, z_threshold: float=4.0, normalizers: list[str]=['unicode'], ignore_repeated_bigrams: bool=False, **kwargs): super().__init__(*args, **kwargs) assert device, 'Must pass device' assert tokenizer, 'Need an instance of the generating tokenizer to perform detection' self.tokenizer = tokenizer self.device = device self.z_threshold = z_threshold self.rng = torch.Generator(device=self.device) if (self.seeding_scheme == 'simple_1'): self.min_prefix_len = 1 else: raise NotImplementedError(f'Unexpected seeding_scheme: {self.seeding_scheme}') self.normalizers = [] for normalization_strategy in normalizers: self.normalizers.append(normalization_strategy_lookup(normalization_strategy)) self.ignore_repeated_bigrams = ignore_repeated_bigrams if self.ignore_repeated_bigrams: assert (self.seeding_scheme == 'simple_1'), 'No repeated bigram credit variant assumes the single token seeding scheme.' def _compute_z_score(self, observed_count, T): expected_count = self.gamma numer = (observed_count - (expected_count * T)) denom = sqrt(((T * expected_count) * (1 - expected_count))) z = (numer / denom) return z def _compute_p_value(self, z): p_value = scipy.stats.norm.sf(z) return p_value def _score_sequence(self, input_ids: Tensor, return_num_tokens_scored: bool=True, return_num_green_tokens: bool=True, return_green_fraction: bool=True, return_green_token_mask: bool=False, return_z_score: bool=True, return_p_value: bool=True): if self.ignore_repeated_bigrams: assert (return_green_token_mask == False), "Can't return the green/red mask when ignoring repeats." bigram_table = {} token_bigram_generator = ngrams(input_ids.cpu().tolist(), 2) freq = collections.Counter(token_bigram_generator) num_tokens_scored = len(freq.keys()) for (idx, bigram) in enumerate(freq.keys()): prefix = torch.tensor([bigram[0]], device=self.device) greenlist_ids = self._get_greenlist_ids(prefix) bigram_table[bigram] = (True if (bigram[1] in greenlist_ids) else False) green_token_count = sum(bigram_table.values()) else: num_tokens_scored = (len(input_ids) - self.min_prefix_len) if (num_tokens_scored < 1): raise ValueError(f'Must have at least {1} token to score after the first min_prefix_len={self.min_prefix_len} tokens required by the seeding scheme.') (green_token_count, green_token_mask) = (0, []) for idx in range(self.min_prefix_len, len(input_ids)): curr_token = input_ids[idx] greenlist_ids = self._get_greenlist_ids(input_ids[:idx]) if (curr_token in greenlist_ids): green_token_count += 1 green_token_mask.append(True) else: green_token_mask.append(False) score_dict = dict() if return_num_tokens_scored: score_dict.update(dict(num_tokens_scored=num_tokens_scored)) if return_num_green_tokens: score_dict.update(dict(num_green_tokens=green_token_count)) if return_green_fraction: score_dict.update(dict(green_fraction=(green_token_count / num_tokens_scored))) if return_z_score: score_dict.update(dict(z_score=self._compute_z_score(green_token_count, num_tokens_scored))) if return_p_value: z_score = score_dict.get('z_score') if (z_score is None): z_score = self._compute_z_score(green_token_count, num_tokens_scored) score_dict.update(dict(p_value=self._compute_p_value(z_score))) if return_green_token_mask: score_dict.update(dict(green_token_mask=green_token_mask)) return score_dict def detect(self, text: str=None, tokenized_text: list[int]=None, return_prediction: bool=True, return_scores: bool=True, z_threshold: float=None, **kwargs) -> dict: assert ((text is not None) ^ (tokenized_text is not None)), 'Must pass either the raw or tokenized string' if return_prediction: kwargs['return_p_value'] = True for normalizer in self.normalizers: text = normalizer(text) if (len(self.normalizers) > 0): print(f'''Text after normalization: {text} ''') if (tokenized_text is None): assert (self.tokenizer is not None), ('Watermark detection on raw string ', 'requires an instance of the tokenizer ', 'that was used at generation time.') tokenized_text = self.tokenizer(text, return_tensors='pt', add_special_tokens=False)['input_ids'][0].to(self.device) if (tokenized_text[0] == self.tokenizer.bos_token_id): tokenized_text = tokenized_text[1:] elif ((self.tokenizer is not None) and (tokenized_text[0] == self.tokenizer.bos_token_id)): tokenized_text = tokenized_text[1:] output_dict = {} score_dict = self._score_sequence(tokenized_text, **kwargs) if return_scores: output_dict.update(score_dict) if return_prediction: z_threshold = (z_threshold if z_threshold else self.z_threshold) assert (z_threshold is not None), 'Need a threshold in order to decide outcome of detection test' output_dict['prediction'] = (score_dict['z_score'] > z_threshold) if output_dict['prediction']: output_dict['confidence'] = (1 - score_dict['p_value']) return output_dict
def transform_key_func(generator, n, vocab_size, eff_vocab_size=None): pi = torch.randperm(vocab_size, generator=generator) xi = torch.rand((n, 1), generator=generator) return (xi, pi)
def transform_sampling(probs, pi, xi): cdf = torch.cumsum(torch.gather(probs, 1, pi), 1) return torch.gather(pi, 1, torch.searchsorted(cdf, xi))
def transform_score(tokens, xi): return torch.pow(torch.linalg.norm((tokens - xi.squeeze()), ord=1), 1)
def transform_edit_score(tokens, xi, gamma=1): return transform_levenshtein(tokens.numpy(), xi.squeeze().numpy(), gamma)
class BaseArgs(): '\n Arguments for data, model, and checkpoints.\n ' def __init__(self): (self.is_train, self.split) = (None, None) self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.parser.add_argument('--n_workers', type=int, default=8, help='number of threads') self.parser.add_argument('--gpus', type=str, default='0', help='visible GPU ids, separated by comma') self.parser.add_argument('--dset_dir', type=str, default=os.path.join(os.environ['HOME'], 'slowbro')) self.parser.add_argument('--dset_name', type=str, default='moving_mnist') self.parser.add_argument('--image_size', type=int, nargs='+', default=[64, 64]) self.parser.add_argument('--n_frames_input', type=int, default=10) self.parser.add_argument('--n_frames_output', type=int, default=10) self.parser.add_argument('--num_objects', type=int, nargs='+', default=[2], help='Max number of digits in Moving MNIST videos.') self.parser.add_argument('--model', type=str, default='crop', help='Model name') self.parser.add_argument('--n_components', type=int, default=2) self.parser.add_argument('--image_latent_size', type=int, default=256, help='Output size of image encoder') self.parser.add_argument('--content_latent_size', type=int, default=128, help='Size of content vector') self.parser.add_argument('--pose_latent_size', type=int, default=3, help='Size of pose vector') self.parser.add_argument('--hidden_size', type=int, default=64, help='Hidden size of LSTM') self.parser.add_argument('--ngf', type=int, default=8, help='number of channels in encoder and decoder') self.parser.add_argument('--stn_scale_prior', type=float, default=3, help='The scale of the spatial transformer prior.') self.parser.add_argument('--independent_components', type=int, default=0, help='Baseline: (if set to 1) independent prediction of each component.') self.parser.add_argument('--ckpt_dir', type=str, default=os.path.join(os.environ['HOME'], 'slowbro', 'ckpt'), help='the directory that contains all checkpoints') self.parser.add_argument('--ckpt_name', type=str, default='ckpt', help='checkpoint name') self.parser.add_argument('--log_every', type=int, default=400, help='log every x steps') self.parser.add_argument('--save_every', type=int, default=50, help='save every x epochs') self.parser.add_argument('--evaluate_every', type=int, default=(- 1), help='evaluate on val set every x epochs') def parse(self): opt = self.parser.parse_args() assert ((opt.n_frames_input > 0) and (opt.n_frames_output > 0)) (opt.is_train, opt.split) = (self.is_train, self.split) opt.dset_path = os.path.join(opt.dset_dir, opt.dset_name) if opt.is_train: ckpt_name = '{:s}_NC{:d}_lr{:.01e}_bt{:d}_{:s}'.format(opt.model, opt.n_components, opt.lr_init, opt.batch_size, opt.ckpt_name) else: ckpt_name = opt.ckpt_name opt.ckpt_path = os.path.join(opt.ckpt_dir, opt.dset_name, ckpt_name) if (opt.dset_name == 'moving_mnist'): opt.n_channels = 1 opt.image_size = (64, 64) elif (opt.dset_name == 'bouncing_balls'): opt.n_channels = 1 opt.image_size = (128, 128) else: raise NotImplementedError if (opt.model == 'crop'): opt.pose_latent_size = 3 else: raise NotImplementedError log = ['Arguments: '] for (k, v) in sorted(vars(opt).items()): log.append('{}: {}'.format(k, v)) return (opt, log)
class TestArgs(BaseArgs): '\n Arguments for testing.\n ' def __init__(self): super(TestArgs, self).__init__() self.is_train = False self.split = 'val' self.parser.add_argument('--batch_size', type=int, default=1, help='batch size') self.parser.add_argument('--which_epochs', type=int, nargs='+', default=[(- 1)], help='which epochs to evaluate, -1 to load latest checkpoint') self.parser.add_argument('--save_visuals', type=int, default=0, help='Save results to tensorboard') self.parser.add_argument('--save_all_results', type=int, default=0, help='Save results to tensorboard')
class TrainArgs(BaseArgs): '\n Arguments specific for training.\n ' def __init__(self): super(TrainArgs, self).__init__() self.is_train = True self.split = 'train' self.parser.add_argument('--batch_size', type=int, default=4, help='batch size per gpu') self.parser.add_argument('--n_epochs', type=int, default=50, help='total # of epochs') self.parser.add_argument('--n_iters', type=int, default=0, help='total # of iterations') self.parser.add_argument('--start_epoch', type=int, default=0, help='starting epoch') self.parser.add_argument('--lr_init', type=float, default=0.001, help='initial learning rate') self.parser.add_argument('--lr_decay', type=int, default=1, choices=[0, 1], help='whether to decay learning rate') self.parser.add_argument('--load_ckpt_dir', type=str, default='', help='directory of checkpoint') self.parser.add_argument('--load_ckpt_epoch', type=int, default=0, help='epoch to load checkpoint') self.parser.add_argument('--when_to_predict_only', type=float, default=0, help='when to set predict_loss_only to True.')
def make_dataset(root, is_train): if is_train: folder = 'balls_n4_t60_ex50000' else: folder = 'balls_n4_t60_ex2000' dataset = np.load(os.path.join(root, folder, 'dataset_info.npy')) return dataset
class BouncingBalls(data.Dataset): '\n Bouncing balls dataset.\n ' def __init__(self, root, is_train, n_frames_input, n_frames_output, image_size, transform=None, return_positions=False): super(BouncingBalls, self).__init__() self.n_frames = (n_frames_input + n_frames_output) self.dataset = make_dataset(root, is_train) self.size = image_size self.scale = (self.size / 800) self.radius = int((60 * self.scale)) self.root = root self.is_train = is_train self.n_frames_input = n_frames_input self.n_frames_output = n_frames_output self.transform = transform self.return_positions = return_positions def __getitem__(self, idx): traj = self.dataset[idx] (vid_len, n_balls) = traj.shape[:2] if self.is_train: start = random.randint(0, (vid_len - self.n_frames)) else: start = 0 n_channels = 1 images = np.zeros([self.n_frames, self.size, self.size, n_channels], np.uint8) positions = [] for fid in range(self.n_frames): xy = [] for bid in range(n_balls): ball = traj[((start + fid), bid)] (x, y) = (int(round((self.scale * ball[0]))), int(round((self.scale * ball[1])))) images[fid] = cv2.circle(images[fid], (x, y), int((self.radius * ball[3])), 255, (- 1)) xy.append([(x / self.size), (y / self.size)]) positions.append(xy) if (self.transform is not None): images = self.transform(images) input = images[:self.n_frames_input] if (self.n_frames_output > 0): output = images[self.n_frames_input:] else: output = [] if (not self.return_positions): return (input, output) else: positions = np.array(positions) return (input, output, positions) def __len__(self): return len(self.dataset)
def get_data_loader(opt): if (opt.dset_name == 'moving_mnist'): transform = transforms.Compose([vtransforms.ToTensor()]) dset = MovingMNIST(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.num_objects, transform) elif (opt.dset_name == 'bouncing_balls'): transform = transforms.Compose([vtransforms.Scale(opt.image_size), vtransforms.ToTensor()]) dset = BouncingBalls(opt.dset_path, opt.is_train, opt.n_frames_input, opt.n_frames_output, opt.image_size[0], transform) else: raise NotImplementedError dloader = data.DataLoader(dset, batch_size=opt.batch_size, shuffle=opt.is_train, num_workers=opt.n_workers, pin_memory=True) return dloader
def get_model(opt): if (opt.model == 'crop'): model = DDPAE(opt) else: raise NotImplementedError model.setup_training() model.initialize_weights() return model
class ImageDecoder(nn.Module): '\n Decode images from vectors. Similar structure as DCGAN.\n ' def __init__(self, input_size, n_channels, ngf, n_layers, activation='tanh'): super(ImageDecoder, self).__init__() ngf = (ngf * (2 ** (n_layers - 2))) layers = [nn.ConvTranspose2d(input_size, ngf, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True)] for i in range(1, (n_layers - 1)): layers += [nn.ConvTranspose2d(ngf, (ngf // 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf // 2)), nn.ReLU(True)] ngf = (ngf // 2) layers += [nn.ConvTranspose2d(ngf, n_channels, 4, 2, 1, bias=False)] if (activation == 'tanh'): layers += [nn.Tanh()] elif (activation == 'sigmoid'): layers += [nn.Sigmoid()] else: raise NotImplementedError self.main = nn.Sequential(*layers) def forward(self, x): if (len(x.size()) == 2): x = x.view(*x.size(), 1, 1) x = self.main(x) return x
class ImageEncoder(nn.Module): '\n Encodes images. Similar structure as DCGAN.\n ' def __init__(self, n_channels, output_size, ngf, n_layers): super(ImageEncoder, self).__init__() layers = [nn.Conv2d(n_channels, ngf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True)] for i in range(1, (n_layers - 1)): layers += [nn.Conv2d(ngf, (ngf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ngf * 2)), nn.LeakyReLU(0.2, inplace=True)] ngf *= 2 layers += [nn.Conv2d(ngf, output_size, 4, 1, 0, bias=False)] self.main = nn.Sequential(*layers) def forward(self, x): x = self.main(x) x = x.squeeze(3).squeeze(2) return x
def build(is_train, tb_dir=None): '\n Parse arguments, setup logger and tensorboardX directory.\n ' (opt, log) = (args.TrainArgs().parse() if is_train else args.TestArgs().parse()) os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus os.makedirs(opt.ckpt_path, exist_ok=True) torch.manual_seed(666) torch.cuda.manual_seed_all(666) np.random.seed(666) random.seed(666) logger = Logger(opt.ckpt_path, opt.split) if (tb_dir is not None): tb_path = os.path.join(opt.ckpt_path, tb_dir) vis = Visualizer(tb_path) else: vis = None logger.print(log) return (opt, logger, vis)
class Logger(): '\n Logger to write logs to file.\n ' def __init__(self, ckpt_path, name='train'): self.logger = logging.getLogger() self.logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(message)s', datefmt=blue('[%Y-%m-%d,%H:%M:%S]')) fh = logging.FileHandler(os.path.join(ckpt_path, '{}.log'.format(name)), 'w') fh.setLevel(logging.INFO) fh.setFormatter(formatter) self.logger.addHandler(fh) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) ch.setFormatter(formatter) self.logger.addHandler(ch) def print(self, log): if isinstance(log, list): self.logger.info('\n - '.join(log)) else: self.logger.info(log)
def to_numpy(array): '\n :param array: Variable, GPU tensor, or CPU tensor\n :return: numpy\n ' if isinstance(array, np.ndarray): return array if isinstance(array, torch.autograd.Variable): array = array.data if array.is_cuda: array = array.cpu() return array.numpy()
def blue(string): return (('\x1b[94m' + string) + '\x1b[0m')
def prompt_yes_no(question): '\n Prompt user to type yes or no.\n ' i = input((question + ' [y/n]: ')) if ((len(i) > 0) and ((i[0] == 'y') or (i[0] == 'Y'))): return True else: return False
class Visualizer(): def __init__(self, tb_path): self.tb_path = tb_path if os.path.exists(tb_path): if prompt_yes_no('{} already exists. Proceed?'.format(tb_path)): os.system('rm -r {}'.format(tb_path)) else: exit(0) self.writer = SummaryWriter(tb_path) def add_scalar(self, scalar_dict, global_step=None): for (tag, scalar) in scalar_dict.items(): if isinstance(scalar, dict): self.writer.add_scalars(tag, scalar, global_step) elif (isinstance(scalar, list) or isinstance(scalar, np.ndarray)): continue else: self.writer.add_scalar(tag, scalar, global_step) def add_images(self, image_dict, global_step=None, prefix=None): for (tag, images) in image_dict.items(): if (prefix is not None): tag = '{}/{}'.format(prefix, tag) images = torch.clamp(images, (- 1), 1) images = vutils.make_grid(images, nrow=images.size(0), normalize=True, range=((- 1), 1)) self.writer.add_image(tag, images, global_step)
def main(): TARGET_DIR = 'depth_benchmark' (K_RAW, K_DEPTH) = (DATA_PATHS['kitti_raw'], DATA_PATHS['kitti_depth']) print(f'-> Exporting Kitti Benchmark from "{K_DEPTH}" to "{K_RAW}"...') ROOT = (K_RAW / TARGET_DIR) ROOT.mkdir(exist_ok=True) for seq in kr.SEQS: (ROOT / seq).mkdir(exist_ok=True) for mode in ('train', 'val'): for dir in tqdm(sorted((K_DEPTH / mode).iterdir())): seq = next((s for s in kr.SEQS if dir.stem.startswith(s))) shutil.copytree(dir, ((ROOT / seq) / dir.stem), dirs_exist_ok=True)
def process_dataset(src_dir: Path, dst_dir: Path, use_hints: bool=True, use_benchmark: bool=True, overwrite: bool=False) -> None: 'Process the entire Kitti Raw Sync datsets.' (HINTS_DIR, BENCHMARK_DIR) = ('depth_hints', 'depth_benchmark') if (not (path := (dst_dir / 'splits')).is_dir()): shutil.copytree((src_dir / 'splits'), path) for seq in kr.SEQS: src_path = (src_dir / seq) dst_path = (dst_dir / seq) export_calibration(src_path, dst_path, overwrite) process_sequence(src_path, dst_path, overwrite) if use_hints: (src_hints, dst_hints) = ((src_dir / HINTS_DIR), (dst_dir / HINTS_DIR)) for src_scene in sorted(src_hints.iterdir()): dst_scene = (dst_hints / src_scene.name) process_sequence(src_scene, dst_scene, overwrite) if use_benchmark: (src_benchmark, dst_benchmark) = ((src_dir / BENCHMARK_DIR), (dst_dir / BENCHMARK_DIR)) for src_scene in sorted(src_benchmark.iterdir()): dst_scene = (dst_benchmark / src_scene.name) process_sequence(src_scene, dst_scene, overwrite)
def process_sequence(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26.' print(f'-> Processing sequence "{src_dir}"') for src_path in sorted(src_dir.iterdir()): if src_path.is_file(): continue dst_path = (dst_dir / src_path.name) process_drive(src_path, dst_path, overwrite)
def process_drive(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26/2011_09_26_drive_0005.' print(f' -> Processing drive "{src_dir}"') for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) process_dir(src_path, dst_path, overwrite)
def process_dir(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Processes a data directory within a given drive.\n\n Cases:\n - Base dataset: images_00, images_01, velodyne_points, oxts (/data & /timestamps for each)\n - Depth hints: images_02, images_03\n - Depth benchmark: groundtruth/image_02, groundtruth/image_03\n ' print(f' -> Processing dir "{src_dir}"') if ('depth_hints' in str(src_dir)): if ((not overwrite) and dst_dir.is_dir()): print(f' -> Skipping dir "{dst_dir}"') return export_hints(src_dir, dst_dir) elif ('depth_benchmark' in str(src_dir)): for src_path in sorted((src_dir / 'groundtruth').iterdir()): dst_path = ((dst_dir / 'groundtruth') / src_path.name) if ((not overwrite) and dst_path.is_dir()): print(f' -> Skipping dir "{dst_path}"') continue export_images(src_path, dst_path) else: for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) if src_path.is_file(): if (not dst_path.is_file()): shutil.copy(src_path, dst_path) else: assert (src_path.stem == 'data') file = next(src_path.iterdir(), None) if (file is None): dst_path.mkdir(exist_ok=True, parents=True) print(f' -> Skipping empty dir "{dst_path}"') continue ext = file.suffix if ((not overwrite) and dst_path.is_dir()): print(f' -> Skipping dir "{dst_path}"') continue if (ext == '.png'): export_images(src_path, dst_path) elif (ext == '.bin'): export_velodyne(src_path, dst_path) elif (ext == '.txt'): export_oxts(src_path, dst_path)
def export_calibration(src_seq: Path, dst_seq: Path, overwrite: bool=False) -> None: 'Exports sequence calibration information as a LabelDatabase of arrays.' dst_dir = (dst_seq / 'calibration') if ((not overwrite) and dst_dir.is_dir()): print(f' -> Skipping calib "{dst_dir}"') return else: print(f' -> Processing calib "{dst_dir}"') (cam2cam, imu2velo, velo2cam) = kr.load_calib(src_seq.stem) data = {'cam2cam': cam2cam, 'imu2velo': imu2velo, 'velo2cam': velo2cam} data = {f'{k1}/{k2}': v2 for (k1, v1) in data.items() for (k2, v2) in v1.items()} write_label_database(data, dst_dir)
def export_images(src_dir: Path, dst_dir: Path) -> None: 'Export images as an ImageDatabase.' image_paths = {file.stem: file for file in sorted(src_dir.iterdir())} write_image_database(image_paths, dst_dir)
def export_oxts(src_dir: Path, dst_dir: Path) -> None: 'Export OXTS dicts as a LabelDatabase.' data = {file.stem: kr.load_oxts(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_velodyne(src_dir: Path, dst_dir: Path) -> None: 'Export Velodyne points as a LabelDatabase of arrays.' data = {file.stem: kr.load_velo(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_hints(src_dir: Path, dst_dir: Path) -> None: 'Export depth hints as a LabelDatabase of arrays.' data = {file.stem: np.load(file) for file in sorted(src_dir.iterdir())} write_array_database(data, dst_dir)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_kitti(depth_split: str, mode: str, use_velo_depth: bool=False, save_stem: Optional[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for a given Kitti test split.\n\n :param depth_split: (str) Kitti depth split to load.\n :param mode: (str) Split mode to use. {'train', 'val', 'test'}\n :param use_velo_depth: (bool) If `True`, load the raw velodyne depth. Only used for legacy reasons!\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f''' -> Exporting ground truth depths for KITTI "{depth_split}/{mode}"...''') split_file = kr.get_split_file(depth_split, mode='test') lines = [line.split() for line in kr.load_split(split_file)] items = [{'seq': l[0], 'cam': (2 if (l[2] == 'l') else 3), 'stem': int(l[1])} for l in lines] save_file = (split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, Ks) = ([], []) for d in tqdm(items): (cam2cam, _, velo2cam) = kr.load_calib(d['seq'].split('/')[0]) if use_velo_depth: file = kr.get_velodyne_file(d['seq'], d['stem']) depth = kr.load_depth_velodyne(file, velo2cam, cam2cam, cam=d['cam'], use_velo_depth=use_velo_depth) else: file = kr.get_depth_file(d['seq'], f"image_0{d['cam']}", d['stem']) depth = kr.load_depth(file) depths.append(depth) Ks.append(cam2cam[f"K_0{d['cam']}"]) depths = np.array(depths, dtype=object) save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f'-> Saving to "{file}"...') np.savez_compressed(file, **kwargs)
def export_syns(mode, save_stem: Optional[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f'-> Exporting ground truth depths for SYNS "{mode}"...') dataset = SYNSPatchesDataset(mode, use_depth=True, use_edges=True, as_torch=False) save_file = (dataset.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, edges, Ks, cats, subcats) = ([], [], [], [], []) for (_, y, m) in tqdm(dataset): depths.append(y['depth'].squeeze()) Ks.append(y['K']) edges.append(y['edges'].squeeze()) cats.append(m['cat']) subcats.append(m['subcat']) save(save_file, depth=depths, K=Ks, edge=edges, cat=cats, subcat=subcats)
def main(): device = ops.get_device('cpu') root = MODEL_ROOTS[(- 1)] (exp, ckpt_name) = ('benchmark', 'last') files = sorted((root / exp).glob(f'**/{ckpt_name}.ckpt')) for f in files: n = str(f).replace(str(root), '') is_training = (f.parent / 'training').is_file() is_finished = (f.parent / 'finished').is_file() try: ckpt = torch.load(f, map_location=device) print(f"{n} - Epoch: {ckpt['epoch']} - Training: {is_training} - Finished: {is_finished}") except EOFError: print(f'CORRUPTED! {f} - Training: {is_training} - Finished: {is_finished}')
def load_dfs(files: dict[(str, Sequence[Path])]): df = pd.json_normalize([io.load_yaml(f) for fs in files.values() for f in fs]) df.index = [f'{k}' for (k, fs) in files.items() for (i, _) in enumerate(fs)] return df
def load_dfs(files: dict[(str, Sequence[Path])]): dfs = [pd.json_normalize(io.load_yaml(f)) for fs in files.values() for f in fs] df = pd.concat(dfs) models = [f'{k}' for (k, fs) in files.items() for _ in fs] df.index = pd.MultiIndex.from_product([models, dfs[0].index], names=['Model', 'Item']) return df
def save_metrics(file: Path, metrics: Sequence[Metrics]): 'Helper to save metrics. If any strings are present, save metrics separately. Otherwise save means.' print(f''' -> Saving results to "{file}"...''') file.parent.mkdir(exist_ok=True, parents=True) use_mean = all((isinstance(v, float) for v in metrics[0].values())) if use_mean: metrics = {k: float(np.array([m[k] for m in metrics]).mean()) for k in metrics[0]} write_yaml(file, metrics, mkdir=True)
def compute_eval_metrics(preds: NDArray, mode: str, cfg_file: Path) -> Sequence[Metrics]: 'Compute evaluation metrics from network predictions.\n Predictions must be unscaled (see `compute_eval_preds`).\n\n :param preds: (NDArray) (b, h, w) Precomputed unscaled network predictions.\n :param mode: (str) Evaluation mode, which determines prediction scaling. {stereo, mono}\n :param cfg_file: (Path) Path to YAML config file.\n :return: (list[Metrics]) Metrics computed for each dataset item.\n ' cfg = load_yaml(cfg_file) (cfg_ds, cfg_args) = (cfg['dataset'], cfg['args']) target_stem = cfg_ds.pop('target_stem', f"targets_{cfg.get('mode', 'test')}") ds = parsers.get_ds(cfg_ds) target_file = (ds.split_file.parent / f'{target_stem}.npz') print(f''' -> Loading targets from "{target_file}"...''') data = np.load(target_file, allow_pickle=True) evaluator = MonoDepthEvaluator(mode=mode, **cfg_args) metrics = evaluator.run(preds, data) return metrics
def save_preds(file: Path, preds: NDArray) -> None: 'Helper to save network predictions to a NPZ file. Required for submitted to the challenge.' file.parent.mkdir(exist_ok=True, parents=True) print(f'-> Saving network predictions to "{file}"...') np.savez_compressed(file, pred=preds)
def compute_eval_preds(ckpt_file: Union[(str, Path)], cfg: dict, overwrite: bool=False) -> NDArray: 'Compute network predictions required for evaluation.\n\n The confing in `cfg_dataset` is equivalent to that used by the `Trainer`.\n Note that in most cases, additional outputs, such as depth or edges can be omitted.\n Furthermore, image `size` is determined by the pretrained checkpoint.\n\n The config stored in `ckpt_file` is used to automatically determine:\n - Image size for network input.\n - Initial disparity scaling range.\n\n NOTE: The output disparities are NOT in metric depth. They are just scaled to the range expected by the network\n during training. We still need to apply fixed scaling (stereo) or median scaling (mono). This is done in the\n evaluation script by the `DepthEvaluator`.\n\n :param ckpt_file: (Path) Path to pretrained model checkpoint. Path can be absolute or relative to `MODEL_ROOTS`.\n :param cfg: (dict) Loaded YAML dataset config.\n :return: (ndarray) (b, h, w) Array containing unscaled network predictions for each dataset item.\n ' device = ops.get_device() ckpt_file = find_model_file(ckpt_file) if ((not (ckpt_file.parent / 'finished').is_file()) and (not overwrite)): print(f'-> Training for "{ckpt_file}" has not finished...') print('-> Set `--overwrite 1` to run this evaluation anyway...') exit() hparams_file = str((ckpt_file.parents[1] / 'hparams.yaml')) print(f''' -> Loading model weights from "{ckpt_file}"...''') mod = MonoDepthModule.load_from_checkpoint(ckpt_file, hparams_file=hparams_file, strict=False).eval() mod.freeze() cfg.update({'size': mod.cfg['dataset']['size'], 'as_torch': True, 'use_aug': False, 'log_time': False}) ds = parsers.get_ds(cfg) dl = DataLoader(ds, batch_size=12, num_workers=4, collate_fn=ds.collate_fn, pin_memory=True) print(f''' -> Computing predictions...''') preds = predict_depths(mod.nets['depth'].to(device), dl, device=device, min=mod.min_depth, max=mod.max_depth, use_stereo_blend=False) preds = ops.to_numpy(preds).squeeze() return preds
def load_dfs(d): df = pd.json_normalize([load_yaml(f) for fs in d.values() for f in fs]) df.index = [f'{m}' for (m, fs) in d.items() for (i, _) in enumerate(fs)] return df
def main(): pd.set_option('display.max_rows', None, 'display.max_columns', None) root = MODEL_ROOTS[(- 1)] exp = 'benchmark' split = 'eigen_benchmark' mode = '*' ckpt_name = 'best' res = 'results' fname = f'kitti_{split}_{ckpt_name}_{mode}.yaml' metric_type = ([(- 1), (- 1), (- 1), (- 1), (+ 1), (+ 1), (+ 1), (- 1), (+ 1), (+ 1), (+ 1), (+ 1)] if (split == 'eigen') else [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (+ 1), (+ 1), (+ 1), (+ 1)]) models = [] if (not models): fs = sorted(root.glob(f'{exp}/**/{res}/{fname}')) models = sorted({file.parents[2].stem for file in fs}) print('Evalutation Models:', models) eval_files = {m: sorted(root.glob(f'{exp}/{m}/**/{res}/{fname}')) for m in models} df = load_dfs(eval_files) df2 = df.groupby(level=0) df_mean = df2.agg('mean').reindex(models) df_mean.columns.name = 'Mean' df_std = df2.agg('std').reindex(models) df_std.columns.name = 'StdDev' print(TableFormatter.from_df(df_mean, metrics=metric_type).to_latex(precision=4))
def main(): parser = ArgumentParser(description='Monocular depth trainer.') parser.add_argument('--cfg-file', '-c', required=True, type=Path, help='Path to YAML config file to load.') parser.add_argument('--cfg-default', '-d', default=None, type=Path, help='Default YAML config file to overwrite.') parser.add_argument('--ckpt-dir', '-o', default=MODEL_ROOTS[(- 1)], type=Path, help='Root path to store checkpoint in.') parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.') parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.') parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.') args = parser.parse_args() fs = ([f, args.cfg_file] if (f := args.cfg_default) else [args.cfg_file]) cfg: MonoDepthCfg = io.load_merge_yaml(*fs) logger = pl.loggers.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=f'{args.version:03}', default_hp_metric=False) monitor = cfg['trainer'].get('monitor', 'AbsRel') mode = ('max' if ('Acc' in monitor) else 'min') cb_ckpt = plc.ModelCheckpoint(dirpath=Path(logger.log_dir, 'models'), filename='best', auto_insert_metric_name=False, monitor=f'val_metrics/{monitor}', mode=mode, save_last=True, save_top_k=1, verbose=True) cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.RichProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger()] if cfg['trainer'].get('swa'): cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.5, annealing_epochs=5, swa_lrs=None)) if cfg['trainer'].get('early_stopping'): cbks.append(plc.EarlyStopping(monitor=f'val_metrics/{monitor}', mode=mode, patience=5)) pl.seed_everything(args.seed) if (path := cfg['trainer'].get('load_ckpt')): path = find_model_file(path) print(f'Loading model from checkpoint: {path}') model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True) else: model = MonoDepthModule(cfg) resume_path = None if cfg['trainer'].get('resume_training'): print('Resuming training...') if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file(): resume_path = path else: print(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...') trainer = pl.Trainer(gpus=1, auto_select_gpus=True, max_epochs=cfg['trainer']['max_epochs'], limit_train_batches=1.0, limit_val_batches=200, accumulate_grad_batches=cfg['trainer'].get('accumulate_grad_batches', None), log_every_n_steps=cfg['trainer'].get('log_every_n_steps', 100), benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), logger=logger, callbacks=cbks, enable_model_summary=False) trainer.fit(model, ckpt_path=resume_path)
def main(): parser = ArgumentParser(description='Monocular depth trainer.') parser.add_argument('--cfg-file', '-c', required=True, type=Path, help='Path to YAML config file to load.') parser.add_argument('--cfg-default', '-d', default=None, type=Path, help='Default YAML config file to overwrite.') parser.add_argument('--ckpt-dir', '-o', default=Path('/tmp'), type=Path, help='Root path to store checkpoint in.') parser.add_argument('--name', '-n', required=True, type=str, help='Model name for use during saving.') parser.add_argument('--version', '-v', default=0, type=int, help='Model version number for use during saving.') parser.add_argument('--seed', '-s', default=42, type=int, help='Random generator seed.') args = parser.parse_args() fs = ([f, args.cfg_file] if (f := args.cfg_default) else [args.cfg_file]) cfg: MonoDepthCfg = io.load_merge_yaml(*fs) logger = pl.loggers.TensorBoardLogger(save_dir=args.ckpt_dir, name=args.name, version=f'{args.version:03}', default_hp_metric=False) monitor = cfg['trainer'].get('monitor', 'AbsRel') mode = ('max' if ('Acc' in monitor) else 'min') cb_ckpt = plc.ModelCheckpoint(dirpath=Path(logger.log_dir, 'models'), filename='best', auto_insert_metric_name=False, monitor=f'val_metrics/{monitor}', mode=mode, save_last=True, save_top_k=1, verbose=True) cbks = [cb_ckpt, plc.LearningRateMonitor(logging_interval='epoch'), plc.RichModelSummary(max_depth=2), cb.TQDMProgressBar(), cb.TrainingManager(Path(cb_ckpt.dirpath)), cb.DetectAnomaly(), HeavyLogger()] if cfg['trainer'].get('swa'): cbks.append(plc.StochasticWeightAveraging(swa_epoch_start=0.5, annealing_epochs=5, swa_lrs=None)) if cfg['trainer'].get('early_stopping'): cbks.append(plc.EarlyStopping(monitor=f'val_metrics/{monitor}', mode=mode, patience=5)) pl.seed_everything(args.seed) if (path := cfg['trainer'].get('load_ckpt')): path = find_model_file(path) print(f'Loading model from checkpoint: {path}') model = MonoDepthModule.load_from_checkpoint(path, cfg=cfg, strict=True) else: model = MonoDepthModule(cfg) resume_path = None if cfg['trainer'].get('resume_training'): print('Resuming training...') if (path := Path(cb_ckpt.dirpath, 'last.ckpt')).is_file(): resume_path = path else: print(f'No previous checkpoint found in "{path.parent}". Beginning training from scratch...') num_batches = 10 max_epochs = 50 trainer = pl.Trainer(gpus=1, auto_select_gpus=False, max_epochs=max_epochs, limit_train_batches=num_batches, limit_val_batches=num_batches, log_every_n_steps=num_batches, benchmark=cfg['trainer'].get('benchmark', False), precision=cfg['trainer'].get('precision', 32), gradient_clip_val=cfg['trainer'].get('gradient_clip_val', None), logger=logger, callbacks=cbks, enable_model_summary=False) trainer.fit(model, ckpt_path=resume_path)
def get_augmentations(strong=True): if strong: tfm = TrivialAugmentWide() else: tfm = ka.ColorJitter(brightness=(0.8, 1.2), contrast=(0.8, 1.2), saturation=(0.8, 1.2), hue=((- 0.1), 0.1), p=1.0, same_on_batch=True, keepdim=True) return tfm
class BaseDataset(ABC, Dataset): 'Base dataset class that all others should inherit from.\n\n The idea is to provide a common structure and format for data to follow. Additionally, provide some nice\n functionality and automation for the more boring stuff. Datasets are defined as providing the following dictionaries\n for each item:\n - x: Inputs to the network (typically \'imgs\').\n - y: Additional data required for loss computation (e.g. \'labels\') or for logging (e.g. non-augmented images).\n - meta: Metadata for the given item, typically for logging.\n\n BaseDataset will automatically add the following fields to \'meta\':\n - items: Item number (i.e. argument to \'__getitem__\').\n - errors: If \'retry_exc\' and NOT silent, log the exception messages caught.\n - aug: If \'use_aug\', child class should add a list of the aug performed.\n\n The additional features/utilities provided include:\n - A logger to be used for logging.\n - A timer which, if enabled, times load/augment for an item. Can also be used in the child class.\n - Functionality to automatically \'retry\' if the current item fails to load. This aims to replace "hacky"\n methods for manually filtering/blacklisting items, whilst being easy to enable & customize.\n - This functionality if wrapped in __getitem__, meaning that child classes only need to provide a \'load\' method,\n which loads the data and sorts it in the corresponding (x, y, meta) dicts.\n - Tools for visualizing/playing the dataset to inspect and sanity check it.\n\n Attributes:\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call \'self.augment\' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n\n Utilities:\n :attr logger: (Logger) Logger with parent CogvisDataset to use for logging.\n :attr timer: (MultiLevelTimer) If \'log_timings\', timer to use for timing blocks.\n\n Methods:\n :method __len__: (abstract) Number of items in dataset.\n :method __getitem__: Retrieve a given item in the dataset. Should not be modified.\n :method load: (abstract) Load a single raw dataset item.\n :method augment: (override) Apply augmentations to a single dataset item. Default: No-op.\n :method transform: (override) Apply common transforms to a single dataset item. Default: No-op.\n :method to_torch: (override) Convert (x, y, meta) to torch. Default: Convert and permute (>=3D).\n :method collate_fn: (override) Collate a batch in a DataLoader. Default: PyTorch base collate.\n :method create_axs: (override) Create matplotlib axes to display a dataset item. Default: Single axis.\n :method show: (abstract) Display a single dataset item.\n :method play: Iterate over dataset and display each item.\n ' def __init__(self, as_torch: bool=True, use_aug: bool=False, log_time: bool=True): self.logger.debug('Initializing BaseDataset') self.as_torch = as_torch self.use_aug = use_aug self.log_time = log_time self.timer = (MultiLevelTimer(name=self.__class__.__qualname__, as_ms=True, precision=4) if self.log_time else nullcontext) if self.use_aug: self.logger.info(f'Dataset augmentations ENABLED') if self.log_time: self.logger.info(f'Logging dataset loading times...') def __init_subclass__(cls, retry_exc=None, silent=False, max_retries=10, use_blacklist=False, **kwargs): 'Subclass initializer. We wrap the subclass init to replace kwargs.' super().__init_subclass__(**kwargs) cls.logger = get_logger(f'BaseDataset.{cls.__qualname__}') cls.__init__ = delegates(cls.__base__.__init__)(cls.__init__) cls.__getitem__ = retry_new_on_error(cls.__getitem__, exc=retry_exc, silent=silent, max=max_retries, use_blacklist=use_blacklist) def __repr__(self) -> str: sig = inspect.signature(self.__init__) kw = {k: getattr(self, k) for k in sig.parameters if hasattr(self, k)} kw = ', '.join((f'{k}={v}' for (k, v) in kw.items())) return f'{self.__class__.__qualname__}({kw})' @abstractmethod def __len__(self) -> int: 'Number of items in the dataset.' def __getitem__(self, item: int) -> BatchData: 'Generic dataset __getitem__. Loads, augments, times and converts data to torch (if required).' self.logger.debug(f'Loading item {item}...') (x, y, m) = ({}, {}, {'items': str(item)}) with self.timer('Total'): with self.timer('Load'): (x, y, m) = self.load(item, x, y, m) if self.use_aug: m['augs'] = '' with self.timer('Augment'): (x, y, m) = self.augment(x, y, m) with self.timer('Transform'): (x, y, m) = self.transform(x, y, m) if self.as_torch: with self.timer('ToTorch'): (x, y, m) = self.to_torch(x, y, m) if self.log_time: m['data_timer'] = self.timer.copy() self.logger.debug(str(self.timer)) self.timer.reset() return (x, y, m) @abstractmethod def load(self, item: int, x: dict, y: dict, m: dict) -> BatchData: "Load data for a single 'item'. MUST return (x, y, m)." def augment(self, x: dict, y: dict, m: dict) -> BatchData: 'Augment a loaded item. Default is a no-op.' return (x, y, m) def transform(self, x: dict, y: dict, m: dict) -> BatchData: 'Transform a loaded item. Default is a no-op.' return (x, y, m) def to_torch(self, x: dict, y: dict, m: dict) -> BatchData: 'Convert (x, y, m) to torch Tensors. Default converts to torch and permutes >=3D tensors.' return ops.to_torch((x, y, m)) @classmethod def collate_fn(cls, batch: Sequence[BatchData]): 'Function to collate multiple dataset items. By default uses the PyTorch collator.' return default_collate(batch) def create_axs(self) -> Axes: 'Create the axis structure required for plotting. Assumes data will be in numpy format.' (_, ax) = plt.subplots() return ax @abstractmethod def show(self, x: dict, y: dict, m: dict, axs: Optional[Axes]=None) -> None: "Show a single dataset item. Should call 'create_axs' if 'axs' is None." def play(self, fps: float=30, skip: int=1, reverse: bool=False, fullscreen: bool=False, axs: Optional[Axes]=None) -> None: 'Iterate through dataset at the required fps and show each item.' if self.as_torch: raise ValueError('Dataset must not be in torch format when playing.') axs = (self.create_axs() if (axs is None) else axs) fig = plt.gcf() if fullscreen: fig.canvas.manager.full_screen_toggle() items = (range((len(self) - 1), 0, (- skip)) if reverse else range(0, len(self), skip)) for i in items: (x, y, m) = self[i] (axs.cla() if isinstance(axs, plt.Axes) else [ax.cla() for ax in axs.flatten()]) self.show(x, y, m, axs) fig.suptitle(str(i)) plt.pause((1 / fps)) plt.show(block=False)
@register('kitti_lmdb') class KittiRawLMDBDataset(KittiRawDataset): "Kitti Depth based on the kitti_raw_sync dataset.\n\n LMDB variant of KittiRawDataset. This is designed to be a drop-in replacement that can help with IO load.\n As such, we only need to provide wrappers around the loading functions in the same format as the original dataset.\n\n The _databases are loaded as required and added to a cached dict.\n\n Attributes:\n :param split: (str) Kitti depth split to use (eigen, eigen_zhou, eigen_full, benchmark, odom).\n :param mode: (str) Dataset mode (core, val, test).\n :param size: (Sequence[int]) Target image training size as (w, h).\n :param supp_idxs: (int | Sequence[int]) Indexes of the support images to load.\n :param use_depth: (bool) If `True`, load ground truth LiDAR depth maps.\n :param use_hints: (bool) If `True`, load precomputed fused SGBM depth maps.\n :param use_benchmark: (bool) If `True`, load corrected ground truth depth maps.\n :param use_strong_aug: (bool) If `False`, use only colour jittering augmentations.\n :param as_torch: (bool) If `True`, convert (x, y, meta) to torch.\n :param use_aug: (bool) If `True`, call 'self.augment' during __getitem__.\n :param log_time: (bool) If `True`, log time taken to load/augment each item.\n " def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.image_dbs = {} self.depth_dbs = {} self.poses_dbs = {} self.hints_dbs = {} self.calib_dbs = {} self.preload() def preload(self) -> None: 'Create all LMDBs required by the dataset split.' drives = set((item['seq'] for item in self.items)) for d in drives: self.image_dbs[f'{d}/image_02'] = kr.load_images(*d.split('/'), 'image_02') self.image_dbs[f'{d}/image_03'] = kr.load_images(*d.split('/'), 'image_03') if self.use_hints: for d in drives: self.hints_dbs[f'{d}/image_02'] = kr.load_hints(*d.split('/'), 'image_02') self.hints_dbs[f'{d}/image_03'] = kr.load_hints(*d.split('/'), 'image_03') if self.use_depth: if self.use_benchmark: for d in drives: self.depth_dbs[f'{d}/image_02'] = kr.load_depths(*d.split('/'), 'image_02') self.depth_dbs[f'{d}/image_03'] = kr.load_depths(*d.split('/'), 'image_03') else: seqs = set((seq.split('/')[0] for seq in drives)) self.calib_dbs = {s: kr.load_calib(s) for s in seqs} for d in drives: (s, d2) = d.split('/') self.depth_dbs[d] = kr.load_velo_depths(s, d2, self.calib_dbs[s]) def parse_items(self) -> tuple[(Path, list[KittiRawItem])]: 'Helper to parse each dataset item as a sequence, camera and file number.' file = kr.get_split_file(self.depth_split, self.mode) lines = [line.split() for line in io.readlines(file)] items = [{'seq': line[0], 'cam': self.side2cam[line[2]], 'stem': int(line[1])} for line in lines] return (file, items) def load_image(self, data: KittiRawItem, offset: int=0) -> Image: "Load and resize a single image.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :param offset: (int) Additional offset to apply to the item number.\n :return: (Image) (self.w, self.h) Loaded PIL image.\n " k = f"{(data['stem'] + offset):010}" kdb = f"{data['seq']}/{data['cam']}" db = self.image_dbs[kdb] if (k not in db): raise FileNotFoundError(f'Could not find specified file "{kdb}/{k}" with "offset={offset!r}"') image = db[k].resize(self.size, resample=Image.BILINEAR) return image def load_depth(self, data: KittiRawItem) -> np.ndarray: "Load ground truth LiDAR depth.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :return: (ndarray) (h, w, 1) Loaded depth map. NOTE: Shape can vary for each item.\n " if self.use_benchmark: k = f"{data['stem']:010}" kdb = f"{data['seq']}/{data['cam']}" depth = self.depth_dbs[kdb][k] else: k = (f"{data['stem']:010}", int(data['cam'][(- 2):])) kdb = data['seq'] depth = self.depth_dbs[kdb][k] depth = skit.resize(depth, (self.h_full, self.w_full), order=0, preserve_range=True, mode='constant') return depth[(..., None)] def load_hint(self, data: KittiRawItem) -> np.ndarray: "Load a precomputed fusion of SGBM predictions.\n\n :param data: (KittRawItem) Data representing the item's sequence, camera and number.\n :return: (array) (h, w, 1) (320, 1024) Loaded fused SGBM depth map.\n " k = f"{data['stem']:010}" kdb = f"{data['seq']}/{data['cam']}" depth = cv2.resize(self.hints_dbs[kdb][k], dsize=self.size, interpolation=cv2.INTER_NEAREST) return depth[(..., None)]