code
stringlengths
17
6.64M
def test_dfa_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.dfa.__dict__[arch], input_size)
def test_usf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.usf.__dict__[arch], input_size)
def test_brsf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.brsf.__dict__[arch], input_size)
def test_frsf_models(model_architectures): for (arch, input_size) in model_architectures: check_model(models.frsf.__dict__[arch], input_size)
def test_biomodule_convert(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() if (mode == 'dfa'): with pytest.raises(ValueError, match='Model `output_dim` is required for Direct Feedback Alignment \\(dfa\\) mode'): BioModule(dummy_net, mode) _ = BioModule(dummy_net, mode, output_dim=10) else: _ = BioModule(dummy_net, mode)
def test_module_converter_convert_dummy_net(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, output_dim=output_dim) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) assert (not np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data)) assert (not np.testing.assert_array_almost_equal(w2, converted.fc.weight.data))
def test_module_converter_convert_dummy_net_copy_weights(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, copy_weights=True, output_dim=output_dim) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data) np.testing.assert_array_almost_equal(w2, converted.fc.weight.data)
def test_module_converter_convert_dummy_net_layer_config(dummy_net_constructor, mode_types): for mode in mode_types: dummy_net = dummy_net_constructor() layers_to_convert = {str(type(dummy_net.conv1)): 1, str(type(dummy_net.fc)): 1} w1 = dummy_net.conv1.weight.data w2 = dummy_net.fc.weight.data output_dim = None converter = ModuleConverter(mode=mode) layer_config = {'options': {'init': 'kaiming'}} if (mode == 'dfa'): output_dim = 10 converted = converter.convert(dummy_net, copy_weights=True, output_dim=output_dim, layer_config=layer_config) for (layer, count) in converter.replaced_layers_counts.items(): assert (layers_to_convert[layer] == count) np.testing.assert_array_almost_equal(w1, converted.conv1.weight.data) np.testing.assert_array_almost_equal(w2, converted.fc.weight.data) assert (converted.conv1.init == 'kaiming') assert (converted.fc.init == 'kaiming')
def EmbedWord2Vec(walks, dimension): time_start = time.time() print('Creating embeddings.') model = Word2Vec(walks, size=dimension, window=5, min_count=0, sg=1, workers=32, iter=1) node_ids = model.wv.index2word node_embeddings = model.wv.vectors print('Embedding generation runtime: ', (time.time() - time_start)) return (node_ids, node_embeddings)
def EmbedPoincare(relations, epochs, dimension): model = PoincareModel(relations, size=dimension, workers=32) model.train(epochs) node_ids = model.index2entity node_embeddings = model.vectors return (node_ids, node_embeddings)
def TraverseAndSelect(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0): walksTAS = [] for hyperedge_index in hyperedges: hyperedge = hyperedges[hyperedge_index] walk_hyperedge = [] for _ in range(num_walks): curr_vertex = random.choice(hyperedge['members']) initial = True curr_hyperedge_num = hyperedge_index curr_hyperedge = hyperedge for i in range(length): proba = ((float(alpha) / len(vertexMemberships[curr_vertex])) + beta) if (random.random() < proba): adjacent_vertices = curr_hyperedge['members'] curr_vertex = random.choice(adjacent_vertices) walk_hyperedge.append(str(curr_hyperedge_num)) adjacent_hyperedges = vertexMemberships[curr_vertex] curr_hyperedge_num = random.choice(adjacent_hyperedges) curr_hyperedge = hyperedges[curr_hyperedge_num] walksTAS.append(walk_hyperedge) return walksTAS
def SubsampleAndTraverse(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0): walksSAT = [] for hyperedge_index in hyperedges: hyperedge = hyperedges[hyperedge_index] walk_vertex = [] curr_vertex = random.choice(hyperedge['members']) for _ in range(num_walks): initial = True hyperedge_num = hyperedge_index curr_hyperedge = hyperedge for i in range(length): proba = ((float(alpha) / len(curr_hyperedge['members'])) + beta) if (random.random() < proba): adjacent_hyperedges = vertexMemberships[curr_vertex] hyperedge_num = random.choice(adjacent_hyperedges) curr_hyperedge = hyperedges[hyperedge_num] walk_vertex.append(str(curr_vertex)) curr_vertex = random.choice(curr_hyperedge['members']) walksSAT.append(walk_vertex) return walksSAT
def getFeaturesTrainingData(): i = 0 lists = [] labels = [] for vertex in G.nodes: vertex_embedding_list = [] lists.append({'f': vertex_features[vertex].tolist()}) labels.append(vertex_labels[vertex]) X_unshuffled = [] for hlist in lists: x = np.zeros((feature_dimension,)) x[:feature_dimension] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_Features = np.asarray(X_arr) Y_Features = np.asarray(Y_arr) return (X_Features, Y_Features)
def getTrainingData(): i = 0 lists = [] labels = [] for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),)) i = 0 x[:hyperedge_embedding_dimension] = hlist['h'] x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f'] for embedding in np_vertex_embeddings: x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def getMLPTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),)) x[:hyperedge_embedding_dimension] = hlist['h'] x[hyperedge_embedding_dimension:] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_MLP = np.asarray(X_arr) Y_MLP = np.asarray(Y_arr) return (X_MLP, Y_MLP)
def getDSTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()}) lists.append label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),)) x[(vertex_embedding_dimension * max_groupsize):] = hlist['f'] i = 0 for embedding in np_vertex_embeddings: x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def hyperedgesTrain(X_train, Y_train, num_epochs): deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5')) history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train, num_epochs): MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5')) history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train, num_epochs): deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5')) history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def testModel(model, X_tst, Y_tst): from sklearn.metrics import classification_report, accuracy_score target_names = ['Neural Networks', 'Case Based', 'Reinforcement Learning', 'Probabilistic Methods', 'Genetic Algorithms', 'Rule Learning', 'Theory'] y_pred = model.predict(X_tst, batch_size=16, verbose=0) finals_pred = [] finals_test = [] for p in y_pred: m = 0 ind = 0 final = 0 for i in p: if (i > m): m = i final = ind ind += 1 finals_pred.append(final) for i in Y_tst: ind = 0 for j in i: if (j == 1): finals_test.append(ind) ind += 1 c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4) reports.append(c) print(c)
def RunAllTests(percentTraining, num_times, num_epochs): for i in range(num_times): print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges') (X, Y) = getTrainingData() (X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining)) hyperedgesTrain(X_train, Y_train, num_epochs) testModel(deephyperedges_transductive_model, X_test, Y_test)
def getFeaturesTrainingData(): i = 0 lists = [] labels = [] for vertex in G.nodes: vertex_embedding_list = [] lists.append({'f': vertex_features[vertex].tolist()}) labels.append(vertex_labels[vertex]) X_unshuffled = [] for hlist in lists: x = np.zeros((feature_dimension,)) x[:feature_dimension] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_Features = np.asarray(X_arr) Y_Features = np.asarray(Y_arr) return (X_Features, Y_Features)
def getTrainingData(): i = 0 lists = [] labels = [] for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),)) i = 0 x[:hyperedge_embedding_dimension] = hlist['h'] x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f'] for embedding in np_vertex_embeddings: x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def getMLPTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),)) x[:hyperedge_embedding_dimension] = hlist['h'] x[hyperedge_embedding_dimension:] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_MLP = np.asarray(X_arr) Y_MLP = np.asarray(Y_arr) return (X_MLP, Y_MLP)
def getDSTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()}) lists.append label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),)) x[(vertex_embedding_dimension * max_groupsize):] = hlist['f'] i = 0 for embedding in np_vertex_embeddings: x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def hyperedgesTrain(X_train, Y_train): deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5')) history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train): MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5')) history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train): deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5')) history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def testModel(model, X_tst, Y_tst): from sklearn.metrics import classification_report, accuracy_score target_names = target_names = ['Type-1 Diabetes', 'Type-2 Diabetes', 'Type-3 Diabetes'] y_pred = model.predict(X_tst, batch_size=16, verbose=0) finals_pred = [] finals_test = [] for p in y_pred: m = 0 ind = 0 final = 0 for i in p: if (i > m): m = i final = ind ind += 1 finals_pred.append(final) for i in Y_tst: ind = 0 for j in i: if (j == 1): finals_test.append(ind) ind += 1 c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4) print(c) reports.append(c) print(accuracy_score(finals_test, finals_pred))
def RunAllTests(percentTraining, num_times=10): for i in range(num_times): print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges') (X, Y) = getTrainingData() (X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining)) hyperedgesTrain(X_train, Y_train) testModel(deephyperedges_transductive_model, X_test, Y_test) print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: MLP') (X_MLP, Y_MLP) = getMLPTrainingData() (X_MLP_transductive_train, X_MLP_transductive_test, Y_MLP_transductive_train, Y_MLP_transductive_test) = train_test_split(X_MLP, Y_MLP, train_size=percentTraining, test_size=(1 - percentTraining)) MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train) testModel(MLP_transductive_model, X_MLP_transductive_test, Y_MLP_transductive_test) print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep sets') (X_deepset, Y_deepset) = getDSTrainingData() (X_deepset_transductive_train, X_deepset_transductive_test, Y_deepset_transductive_train, Y_deepset_transductive_test) = train_test_split(X_deepset, Y_deepset, train_size=percentTraining, test_size=(1 - percentTraining)) DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train) testModel(deepsets_transductive_model, X_deepset_transductive_test, Y_deepset_transductive_test)
def smooth(scalars, weight): last = scalars[0] smoothed = list() for point in scalars: smoothed_val = ((last * weight) + ((1 - weight) * point)) smoothed.append(smoothed_val) last = smoothed_val return smoothed
def plot(deephyperedges_directory, MLP_directory, deepsets_directory, metric, dataset): dhe_metrics = pd.read_csv(deephyperedges_directory) x = [] y = [] for (index, row) in dhe_metrics.iterrows(): x.append(float(row['Step'])) y.append(float(row['Value'])) mlp_metrics = pd.read_csv(MLP_directory) x_mlp = [] y_mlp = [] for (index, row) in mlp_metrics.iterrows(): x_mlp.append(float(row['Step'])) y_mlp.append(float(row['Value'])) ds_metrics = pd.read_csv(deepsets_directory) x_ds = [] y_ds = [] for (index, row) in ds_metrics.iterrows(): x_ds.append(float(row['Step'])) y_ds.append(float(row['Value'])) sns.set() ds_normal = '(0.0, 0.0, 0.7, 0.2)' ds_smoothed = '(0.0, 0.0, 0.7, 1)' dh_normal = '(0.0, 0.7, 0.0, 0.2)' dh_smoothed = '(0.0, 0.7, 0.0, 1)' mlp_normal = '(0.7, 0.2, 0.1, 0.2)' mlp_smoothed = '(0.7, 0.2, 0.1, 1)' plt.gca().set_prop_cycle(color=[mlp_normal, ds_normal, dh_normal, mlp_smoothed, ds_smoothed, dh_smoothed]) plt.plot(x_mlp, y_mlp) plt.plot(x_ds, y_ds) plt.plot(x, y) plt.plot(x_mlp, smooth(y_mlp, 0.8)) plt.plot(x_ds, smooth(y_ds, 0.8)) plt.plot(x, smooth(y, 0.8)) plt.legend(['_nolegend_', '_nolegend_', '_nolegend_', 'MLP + TAS Walks', 'Deep Sets + SAT Walks', 'Deep Hyperedges'], loc='bottom right') plt.savefig((((('images/paper/' + dataset) + '/') + metric) + '.png'), dpi=300) plt.show()
def plotAll(dataset): metric = 'run-.-tag-categorical_accuracy.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_accuracy', dataset) metric = 'run-.-tag-loss.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_loss', dataset) metric = 'run-.-tag-val_categorical_accuracy.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_accuracy', dataset) metric = 'run-.-tag-val_loss.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_loss', dataset)
class Boco(): def __init__(self, name): self.name = name def validate(self): assert self.computeLoss, 'You need to specify a function to compute the loss'
class Neumann(Boco): def __init__(self, sampler, name='neumann'): super().__init__(name) self.vars = sampler.vars self.sampler = sampler def sample(self, n_samples=None): return self.sampler.sample(n_samples) def validate(self, inputs, outputs): super().validate() assert (inputs == self.vars), f'Boco {self.name} with different inputs !' assert self.computeBocoLoss, 'You need to specify a function to compute the loss' def computeLoss(self, model, criterion, inputs, outputs): _X = self.sample() X = torch.stack([_X[var] for var in inputs], axis=(- 1)) X.requires_grad_(True) y = model(X) loss = self.computeBocoLoss(X, y) return {f'{self.name}_{name}': criterion(l, torch.zeros(l.shape).to(X.device)) for (name, l) in loss.items()} def computeGrads(self, outputs, inputs): (grads,) = torch.autograd.grad(outputs, inputs, grad_outputs=outputs.data.new(outputs.shape).fill_(1), create_graph=True, only_inputs=True) return grads
class Periodic(Boco): def __init__(self, sampler, sampler1, sampler2, name='periodic'): super().__init__(name) self.sampler = sampler self.sampler1 = sampler1 self.sampler2 = sampler2 inputs1 = tuple(self.sampler1.sample(1).keys()) inputs2 = tuple(self.sampler2.sample(1).keys()) (vars1, vars2) = ((sampler.vars + inputs1), (sampler.vars + inputs2)) assert (len(vars1) == len(vars2)), 'Samplers must have the same variables' for var in vars1: assert (var in vars2), 'Samplers must have the same variables' self.vars = vars1 def sample(self, n_samples=None): shared = self.sampler.sample(n_samples) inputs = self.sampler1.sample(n_samples) inputs.update(shared) outputs = self.sampler2.sample(n_samples) outputs.update(shared) return (inputs, outputs) def validate(self, inputs, outputs): super().validate() assert (len(inputs) == len(self.vars)), f'Boco {self.name} with different inputs !' for var in self.vars: assert (var in inputs), f'Boco {self.name} with different inputs !' def computeLoss(self, model, criterion, inputs, outputs): (_x1, _x2) = self.sample() x1 = torch.stack([_x1[var] for var in inputs], axis=(- 1)) x2 = torch.stack([_x2[var] for var in inputs], axis=(- 1)) y1 = model(x1) y2 = model(x2) return {self.name: criterion(y1, y2)}
class Dataset(torch.utils.data.Dataset): def __init__(self, data, device='cpu'): mesh = np.stack(np.meshgrid(*data), (- 1)).reshape((- 1), len(data)) self.X = torch.from_numpy(mesh).float().to(device) def __len__(self): return len(self.X) def __getitem__(self, ix): return self.X[ix]
class Mesh(): def __init__(self, data, device='cpu'): assert isinstance(data, dict), 'you must pass a dict with your data' (self.vars, data) = (tuple(data.keys()), data.values()) self.dataset = Dataset(data, device) self.device = device def build_dataloader(self, batch_size=None, shuffle=True): if (batch_size == None): batch_size = len(self.dataset) return torch.utils.data.DataLoader(self.dataset, batch_size=batch_size, shuffle=shuffle)
class History(): def __init__(self, precision=5): self.history = {} self.current = {} self.precision = precision def add(self, d): for (name, metric) in d.items(): if (not (name in self.history)): self.history[name] = [] self.history[name].append(metric) def add_step(self, d): for (name, metric) in d.items(): if (name not in self.current): self.current[name] = [] self.current[name].append(metric) def average(self): return {name: round(np.mean(self.current[name]), self.precision) for name in self.current} def step(self): for name in self.current: self.add({name: np.mean(self.current[name])}) self.current = {} def __str__(self): s = '' for (name, value) in self.history.items(): s += f' | {name} {round(value[(- 1)], self.precision)}' return s
class Sine(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.sin(x)
def block(i, o): fc = torch.nn.Linear(i, o) return torch.nn.Sequential(Sine(), torch.nn.Linear(i, o))
class MLP(torch.nn.Module): def __init__(self, inputs, outputs, layers, neurons): super().__init__() fc_in = torch.nn.Linear(inputs, neurons) fc_hidden = [block(neurons, neurons) for layer in range((layers - 1))] fc_out = block(neurons, outputs) self.mlp = torch.nn.Sequential(fc_in, *fc_hidden, fc_out) def forward(self, x): return self.mlp(x)
def get_lr(optimizer): for param_group in optimizer.param_groups: return param_group['lr']
class PDE(): def __init__(self, inputs, outputs): if isinstance(inputs, str): inputs = tuple(inputs) if isinstance(outputs, str): outputs = tuple(outputs) checkIsListOfStr(inputs) checkIsListOfStr(outputs) checkUnique(inputs) checkUnique(outputs) checkNoRepeated(inputs, outputs) self.inputs = inputs self.outputs = outputs self.mesh = None self.bocos = [] def set_sampler(self, sampler): assert (sampler.vars == self.inputs), 'your data does not match the PDE inputs' self.sampler = sampler def add_boco(self, boco): assert (boco.name not in [boco.name for boco in self.bocos]), f'Boco {boco.name} already exists, use another name' boco.validate(self.inputs, self.outputs) self.bocos.append(boco) def update_boco(self, boco): for b in self.bocos: if (b.name == boco.name): self.bocos[self.bocos.index(b)] = boco return def compile(self, model, optimizer, scheduler=None, loss_fn=None): self.model = model self.optimizer = optimizer self.loss_fn = (loss_fn if loss_fn else torch.nn.MSELoss()) self.scheduler = scheduler def computePDELoss(self, vars, grads): print('This function need to be overloaded !!!') def solve(self, N_STEPS=1000, log_each=100): history = History() pbar = tqdm(range(1, (N_STEPS + 1)), miniters=int((N_STEPS / log_each))) for step in pbar: history.add({'lr': get_lr(self.optimizer)}) self.optimizer.zero_grad() loss = 0 X = self.sampler._sample() X.requires_grad_(True) y = self.model(X) pde_losses = self.computePDELoss(X, y) if (step == 1): assert isinstance(pde_losses, dict), 'you should return a dict with the name of the equation and the corresponding loss' for (name, l) in pde_losses.items(): _loss = self.loss_fn(l, torch.zeros(l.shape).to(self.sampler.device)) loss += _loss history.add_step({name: _loss.item()}) for boco in self.bocos: boco_losses = boco.computeLoss(self.model, self.loss_fn, self.inputs, self.outputs) for (name, l) in boco_losses.items(): if (step == 1): assert isinstance(boco_losses, dict), 'you should return a dict with the name of the equation and the corresponding loss' loss += l history.add_step({name: l.item()}) loss.backward() self.optimizer.step() if ((step % log_each) == 0): pbar.set_description(str(history.average())) history.step() if self.scheduler: self.scheduler.step() return history.history def computeGrads(self, outputs, inputs): (grads,) = torch.autograd.grad(outputs, inputs, grad_outputs=outputs.data.new(outputs.shape).fill_(1), create_graph=True, only_inputs=True) return grads def eval(self, X): self.model.eval() with torch.no_grad(): return self.model(X)
class BaseSampler(): def __init__(self, data, n_samples=1, device='cpu'): assert isinstance(data, dict), 'you must pass a dict with your data' self.device = device self.data = data self.vars = tuple(data.keys()) self.n_samples = n_samples def _sample(self, n_samples=None): n_samples = (n_samples or self.n_samples) sample = self.sample(n_samples) return torch.stack([sample[var] for var in self.vars], axis=(- 1)) def sample(self, n_samples=None): raise ValueError('you must implement this method')
class RandomSampler(BaseSampler): def __init__(self, data, n_samples=1, device='cpu'): super().__init__(data, n_samples, device) for (var, lims) in data.items(): if isinstance(lims, list): assert (len(lims) == 2), 'you must pass a list with the min and max limits' elif (isinstance(lims, int) or isinstance(lims, float)): data[var] = [lims, lims] else: raise ValueError('invalid limits') def sample(self, n_samples=None): n_samples = (n_samples or self.n_samples) return {var: ((torch.rand(n_samples, device=self.device) * (lims[1] - lims[0])) + lims[0]) for (var, lims) in self.data.items()}
def checkIsListOfStr(l): 'Make sure that l is a list containing only strings' if isinstance(l, tuple): for i in l: if (not isinstance(i, str)): raise Exception((str(i) + ' must be a string'))
def checkUnique(l): 'Make sure that l does not contain repeated elements' for (i, item1) in enumerate(l): for (j, item2) in enumerate(l): if ((i != j) and (item1 == item2)): raise Exception(('Repeated item ' + str(item1)))
def checkNoRepeated(l1, l2): 'Make sure there are no repeated elements in both lists' for i in l1: if (i in l2): raise Exception(('Repeated item ' + str(i)))
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f"-> Saving to '{file}'...") np.savez_compressed(file, **kwargs)
def export_ddad(mode, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f'-> Exporting ground truth depths for DDAD "{mode}"...') ds = DdadDataset(mode, datum='image depth K', shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") depths = np.zeros((len(ds), *DdadDataset.SHAPE), dtype=np.float32) Ks = np.zeros((len(ds), 4, 4), dtype=np.float32) for (i, batch) in enumerate(tqdm(ds)): y = batch[1] depths[i] = y['depth'].squeeze() Ks[i] = y['K'] del batch, y save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f"-> Saving to '{file}'...") np.savez_compressed(file, **kwargs)
def export_diode(mode: str, scene: str, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for SYNS.\n\n :param mode: (str) Split mode to use. {'val'}\n :param scene: (str) Scene type to use. {'outdoor', 'indoor'}\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f"-> Exporting ground truth depths for DIODE '{mode}'...") ds = DiodeDataset(mode, scene, shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") depths = np.array([(y['depth'].squeeze() * y['mask']) for (_, y, _) in tqdm(ds)]) save(save_file, depth=depths)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_kitti(depth_split: str, mode: str, use_velo_depth: bool=False, save_stem: Optional[str]=None, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for a given Kitti test split.\n\n :param depth_split: (str) Kitti depth split to load.\n :param mode: (str) Split mode to use. {'train', 'val', 'test'}\n :param use_velo_depth: (bool) If `True`, load the raw velodyne depth. Only used for legacy reasons!\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " print(f''' -> Exporting ground truth depths for KITTI "{depth_split}/{mode}"...''') split_file = kr.get_split_file(depth_split, mode='test') lines = [line.split() for line in kr.load_split(split_file)] items = [{'seq': l[0], 'cam': (2 if (l[2] == 'l') else 3), 'stem': int(l[1])} for l in lines] save_file = (split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') (depths, Ks) = ([], []) for d in tqdm(items): (cam2cam, _, velo2cam) = kr.load_calib(d['seq'].split('/')[0]) if use_velo_depth: file = kr.get_velodyne_file(d['seq'], d['stem']) depth = kr.load_depth_velodyne(file, velo2cam, cam2cam, cam=d['cam'], use_velo_depth=use_velo_depth) else: file = kr.get_depth_file(d['seq'], f"image_0{d['cam']}", d['stem']) depth = kr.load_depth(file) depths.append(depth) Ks.append(cam2cam[f"K_0{d['cam']}"]) depths = np.array(depths, dtype=object) save(save_file, depth=depths, K=Ks)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f'-> Saving to "{file}"...') np.savez_compressed(file, **kwargs)
def export_mannequin(mode: str, save_stem: ty.N[str]=None, overwrite: bool=False) -> None: 'Export the ground truth LiDAR depth images for SYNS.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (Optional[str]) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f"-> Exporting ground truth depths for Mannequin '{mode}'...") ds = MannequinDataset(mode, datum='image depth K', shape=None, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") (depths, Ks) = ([], []) for (_, y, m) in tqdm(ds): depths.append(y['depth'].squeeze()) Ks.append(geo.resize_K(y['K'], y['depth'].shape[(- 2):], shape=MannequinDataset.SHAPE)) save(save_file, depth=np.array(depths, dtype=object), K=np.array(Ks))
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_nyud(mode: str, save_stem: str, overwrite: bool=False) -> None: "Export the ground truth LiDAR depth images for NYUD.\n\n :param mode: (str) Split mode to use. {'test'}\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n " ds = NyudDataset(mode=mode, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f'Target file "{save_file}" already exists. Set flag `--overwrite 1` to overwrite') depths = np.array([batch[1]['depth'].squeeze() for batch in tqdm(ds)]) save(save_file, depth=depths)
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to "{file}"...''') np.savez_compressed(file, **kwargs)
def export_sintel(mode, save_stem: str=None, overwrite: bool=False) -> None: 'Export the ground-truth synthetic depth images for Sintel.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f"-> Exporting ground truth depths for Sintel '{mode}'...") ds = SintelDataset(mode=mode, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") (depths, Ks) = ([], []) for (x, y, m) in tqdm(ds): depths.append(y['depth'].squeeze()) Ks.append(y['K']) save(save_file, depth=np.array(depths), K=np.array(Ks))
def save(file: Path, **kwargs) -> None: 'Save a list of arrays as a npz file.' print(f''' -> Saving to '{file}'...''') np.savez_compressed(file, **kwargs)
def export_tum(mode: str, save_stem: str, overwrite: bool=False) -> None: 'Export the ground-truth depth maps for TUM.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ' print(f"-> Exporting ground truth depths for TUM '{mode}'...") ds = TumDataset(mode=mode, as_torch=False) save_file = (ds.split_file.parent / f'{save_stem}.npz') if ((not overwrite) and save_file.is_file()): raise FileExistsError(f"Target file '{save_file}' already exists. Set flag `--overwrite 1` to overwrite") depths = np.array([batch[1]['depth'].squeeze() for batch in tqdm(ds)]) save(save_file, depth=depths)
def process_dataset(src_dir: Path, dst_dir: Path, use_hints: bool=True, use_benchmark: bool=True, overwrite: bool=False) -> None: 'Process the entire Kitti Raw Sync dataset.' (HINTS_DIR, BENCHMARK_DIR) = ('depth_hints', 'depth_benchmark') if (not (path := (dst_dir / 'splits')).is_dir()): shutil.copytree((src_dir / 'splits'), path) for seq in kr.SEQS: src_path = (src_dir / seq) dst_path = (dst_dir / seq) export_calibration(src_path, dst_path, overwrite) process_sequence(src_path, dst_path, overwrite) if use_hints: (src_hints, dst_hints) = ((src_dir / HINTS_DIR), (dst_dir / HINTS_DIR)) for src_scene in sorted(src_hints.iterdir()): dst_scene = (dst_hints / src_scene.name) process_sequence(src_scene, dst_scene, overwrite) if use_benchmark: (src_benchmark, dst_benchmark) = ((src_dir / BENCHMARK_DIR), (dst_dir / BENCHMARK_DIR)) for src_scene in sorted(src_benchmark.iterdir()): dst_scene = (dst_benchmark / src_scene.name) process_sequence(src_scene, dst_scene, overwrite)
def process_sequence(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26.' print(f"-> Processing sequence '{src_dir}'") for src_path in sorted(src_dir.iterdir()): if src_path.is_file(): continue dst_path = (dst_dir / src_path.name) process_drive(src_path, dst_path, overwrite)
def process_drive(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26/2011_09_26_drive_0005.' print(f" -> Processing drive '{src_dir}'") for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) process_dir(src_path, dst_path, overwrite)
def process_dir(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Processes a data directory within a given drive.\n\n Cases:\n - Base dataset: images_00, images_01, velodyne_points, oxts (/data & /timestamps for each)\n - Depth hints: images_02, images_03\n - Depth benchmark: groundtruth/image_02, groundtruth/image_03\n ' print(f" -> Processing dir '{src_dir}'") if ('depth_hints' in str(src_dir)): if ((not overwrite) and dst_dir.is_dir()): print(f" -> Skipping dir '{dst_dir}'") return export_hints(src_dir, dst_dir) elif ('depth_benchmark' in str(src_dir)): for src_path in sorted((src_dir / 'groundtruth').iterdir()): dst_path = ((dst_dir / 'groundtruth') / src_path.name) if ((not overwrite) and dst_path.is_dir()): print(f" -> Skipping dir '{dst_path}'") continue export_images(src_path, dst_path) else: for src_path in sorted(src_dir.iterdir()): dst_path = (dst_dir / src_path.name) if src_path.is_file(): if (not dst_path.is_file()): shutil.copy(src_path, dst_path) else: assert (src_path.stem == 'data') file = next(src_path.iterdir(), None) if (file is None): dst_path.mkdir(exist_ok=True, parents=True) print(f" -> Skipping empty dir '{dst_path}'") continue ext = file.suffix if ((not overwrite) and dst_path.is_dir()): print(f" -> Skipping dir '{dst_path}'") continue if (ext == '.png'): export_images(src_path, dst_path) elif (ext == '.bin'): export_velodyne(src_path, dst_path) elif (ext == '.txt'): export_oxts(src_path, dst_path)
def export_calibration(src_seq: Path, dst_seq: Path, overwrite: bool=False) -> None: 'Exports sequence calibration information as a LabelDatabase of arrays.' dst_dir = (dst_seq / 'calibration') if ((not overwrite) and dst_dir.is_dir()): print(f" -> Skipping calib '{dst_dir}'") return else: print(f" -> Processing calib '{dst_dir}'") (cam2cam, imu2velo, velo2cam) = kr.load_calib(src_seq.stem) data = {'cam2cam': cam2cam, 'imu2velo': imu2velo, 'velo2cam': velo2cam} data = {f'{k1}/{k2}': v2 for (k1, v1) in data.items() for (k2, v2) in v1.items()} write_label_database(data, dst_dir)
def export_images(src_dir: Path, dst_dir: Path) -> None: 'Export images as an ImageDatabase.' image_paths = {file.stem: file for file in sorted(src_dir.iterdir())} write_image_database(image_paths, dst_dir)
def export_oxts(src_dir: Path, dst_dir: Path) -> None: 'Export OXTS dicts as a LabelDatabase.' data = {file.stem: kr.load_oxts(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_velodyne(src_dir: Path, dst_dir: Path) -> None: 'Export Velodyne points as a LabelDatabase of arrays.' data = {file.stem: kr.load_velo(file) for file in sorted(src_dir.iterdir())} write_label_database(data, dst_dir)
def export_hints(src_dir: Path, dst_dir: Path) -> None: 'Export depth hints as a LabelDatabase of arrays.' data = {file.stem: np.load(file) for file in sorted(src_dir.iterdir())} write_array_database(data, dst_dir)
def process_dataset(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process the entire MannequinChallenge dataset.' print(f"-> Copying splits directory '{(dst_dir / 'splits')}'...") shutil.copytree((src_dir / 'splits'), (dst_dir / 'splits'), dirs_exist_ok=True) for mode in ('train', 'val', 'test'): process_mode((src_dir / mode), (dst_dir / mode), overwrite)
def process_mode(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Process a full MannequinChallenge mode, e.g. train or val.' calibs = {d.stem: mc.load_info(dst_dir.stem, d.stem) for d in tqdm(src_dir.iterdir())} export_intrinsics(src_dir, (dst_dir / 'intrinsics'), calibs, overwrite) export_shapes(src_dir, (dst_dir / 'shapes'), calibs, overwrite) export_poses(src_dir, (dst_dir / 'poses'), calibs, overwrite) export_images(src_dir, (dst_dir / 'images'), overwrite)
def export_intrinsics(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None: 'Create camera intrinsics LMDB.' if ((not overwrite) and dst_dir.is_dir()): print(f"-> Intrinsics already exist for dir '{src_dir.stem}'") return all_Ks = {} for (k, v) in tqdm(calibs.items()): Ks = np.stack((vv['K'] for vv in v.values())) are_equal = (Ks[0] == Ks).all(axis=((- 2), (- 1))) if (not are_equal.all()): LOGGER.warning(f'Miss-matched Ks! {Ks[0]} {Ks[np.where((~ are_equal))]}') all_Ks[k] = Ks[0] print(f"-> Exporting intrinsics for dir '{src_dir.stem}'") write_label_database(all_Ks, (dst_dir / 'intrinsics'))
def export_shapes(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None: 'Create image shapes LMDB.' if ((not overwrite) and dst_dir.is_dir()): print(f"-> Shapes already exist for dir '{src_dir.stem}'") return all_shapes = {} for (k, v) in tqdm(calibs.items()): shapes = np.stack((vv['shape'] for vv in v.values())) if (not (shapes[0] == shapes).all()): raise ValueError(f'Miss-matched shapes!') all_shapes[k] = shapes[0] print(f"-> Exporting shapes for dir '{src_dir.stem}'") write_label_database(all_shapes, (dst_dir / 'shapes'))
def export_poses(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None: 'Create camera poses LMDB.' if ((not overwrite) and dst_dir.is_dir()): print(f"-> Poses already exist for dir '{src_dir.stem}'") return print(f'-> Exporting poses for dir {src_dir.stem}') all_poses = {f'{k}/{kk}': vv['T'] for (k, v) in tqdm(calibs.items()) for (kk, vv) in v.items()} write_label_database(all_poses, (dst_dir / 'poses'))
def export_images(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None: 'Create images LMDB.' if ((not overwrite) and dst_dir.is_dir()): print(f"-> Images already exist for dir '{src_dir.stem}'") return print(f"-> Exporting images for dir '{src_dir.stem}'") files = {f'{d.stem}/{p.stem}': p for d in tqdm(io.get_dirs(src_dir)) for p in io.get_files(d, key=(lambda f: (f.suffix == '.jpg')))} write_image_database(files, dst_dir)
def process_dataset(overwrite=False): (src, dst) = (PATHS['slow_tv'], PATHS['slow_tv_lmdb']) print(f"-> Copying splits directory '{(dst / 'splits')}'...") shutil.copytree((src / 'splits'), (dst / 'splits'), dirs_exist_ok=True) export_intrinsics(dst, overwrite) args = [((src / seq), dst, overwrite) for seq in stv.get_seqs()] with Pool() as p: list(p.starmap(export_seq, tqdm(args)))
def export_seq(path: Path, save_root: Path, overwrite: bool=False) -> None: 'Convert SlowTV video into an LMDB.' seq = path.stem out_dir = (save_root / seq) if ((not overwrite) and out_dir.is_dir()): print(f'-> Skipping directory "{out_dir}"...') return print(f'-> Export LMDB for dir "{seq}"') paths = {p.stem: p for p in io.get_files(path, key=(lambda f: (f.suffix == '.png')))} write_image_database(paths, out_dir)
def export_intrinsics(save_root: Path, overwrite: bool=False) -> None: 'Export SlowTV intrinsics as an LMDB.' out_dir = (save_root / 'calibs') if ((not overwrite) and out_dir.is_dir()): print(f'-> Skipping LMDB calibrations...') return print(f"""-> Exporting intrinsics "{(save_root / 'calibs')}"...""") data = {seq: stv.load_intrinsics(seq) for seq in stv.get_seqs()} write_label_database(data, (save_root / 'calibs'))
def read_array(path): with open(path, 'rb') as fid: (width, height, channels) = np.genfromtxt(fid, delimiter='&', max_rows=1, usecols=(0, 1, 2), dtype=int) fid.seek(0) num_delimiter = 0 byte = fid.read(1) while True: if (byte == b'&'): num_delimiter += 1 if (num_delimiter >= 3): break byte = fid.read(1) array = np.fromfile(fid, np.float32) array = array.reshape((width, height, channels), order='F') return np.transpose(array, (1, 0, 2)).squeeze().astype(np.float32)
def export_split(split, src, dst, overwrite=False): print(f'-> Exporting "{split}" split...') dst = (dst / split) io.mkdirs(dst) seqs = io.get_dirs((src / split)) dsts = [(dst / s.stem) for s in seqs] ovs = [overwrite for _ in seqs] with Pool(8) as p: for _ in tqdm(p.imap_unordered(export_seq, zip(seqs, dsts, ovs)), total=len(seqs)): pass return {}
def export_seq(args): try: (src, dst, overwrite) = args depth_dir = (dst / 'depths') if ((not overwrite) and depth_dir.is_dir()): print(f'-> Skipping "{src.parent.stem}" sequence "{src.stem}"...') return print(f'-> Exporting "{src.parent.stem}" sequence "{src.stem}"...') shutil.rmtree(dst, ignore_errors=True) io.mkdirs(dst) db_path = (dst / 'database.db') img_dir = (dst / 'images') sparse_dir = (dst / 'sparse') refined_dir = (dst / 'refined') dense_dir = (dst / 'dense') io.mkdirs(img_dir, sparse_dir, refined_dir, dense_dir) [shutil.copy(f, img_dir) for f in io.get_files(src, key=(lambda f: (f.suffix == '.jpg')))] subprocess.call(['colmap', 'feature_extractor', '--ImageReader.single_camera', '1', '--ImageReader.default_focal_length_factor', '0.85', '--SiftExtraction.peak_threshold', '0.02', '--database_path', db_path, '--image_path', img_dir]) subprocess.call(['colmap', 'exhaustive_matcher', '--SiftMatching.max_error', '3', '--SiftMatching.min_inlier_ratio', '0.3', '--SiftMatching.min_num_inliers', '30', '--SiftMatching.guided_matching', '1', '--database_path', db_path]) subprocess.call(['colmap', 'mapper', '--Mapper.tri_merge_max_reproj_error', '3', '--Mapper.ignore_watermarks', '1', '--Mapper.filter_max_reproj_error', '2', '--database_path', db_path, '--image_path', img_dir, '--output_path', sparse_dir]) subprocess.call(['colmap', 'bundle_adjuster', '--input_path', (sparse_dir / '0'), '--output_path', refined_dir]) subprocess.call(['colmap', 'image_undistorter', '--input_path', refined_dir, '--image_path', img_dir, '--output_path', dense_dir, '--output_type', 'COLMAP', '--max_image_size', '1600']) subprocess.call(['colmap', 'patch_match_stereo', '--PatchMatchStereo.window_radius', '5', '--PatchMatchStereo.num_samples', '15', '--PatchMatchStereo.geom_consistency_regularizer', '1', '--PatchMatchStereo.geom_consistency_max_cost', '1.5', '--PatchMatchStereo.filter_min_ncc', '0.2', '--PatchMatchStereo.filter_min_num_consistent', '3', '--PatchMatchStereo.geom_consistency', 'true', '--workspace_path', dense_dir, '--workspace_format', 'COLMAP']) files = io.get_files(((dense_dir / 'stereo') / 'depth_maps'), key=(lambda f: ('geometric' in str(f)))) [np.save((src / f"{f.name.split('.')[0]}.npy"), read_array(f)) for f in files] io.mkdirs(depth_dir) except: pass
def main(root): dst = (root / 'colmap') io.mkdirs(dst) splits = ['test'] fails = {} for s in tqdm(splits): fails[s] = export_split(s, root, dst, overwrite=False) print(fails)
def main(src, dst): TARGET_DIR = 'depth_benchmark' (K_DEPTH, K_RAW) = (src, dst) print(f'-> Exporting Kitti Benchmark from "{K_DEPTH}" to "{K_RAW}"...') ROOT = (K_RAW / TARGET_DIR) ROOT.mkdir(exist_ok=True) for seq in kr.SEQS: (ROOT / seq).mkdir(exist_ok=True) for mode in ('train', 'val'): for path in tqdm(sorted((K_DEPTH / mode).iterdir())): seq = next((s for s in kr.SEQS if path.stem.startswith(s))) shutil.copytree(path, ((ROOT / seq) / path.stem), dirs_exist_ok=True)
def loadmat(file): 'Conflict with specific matfile versions?' f = h5py.File(file) arr = {k: np.array(v) for (k, v) in f.items()} return arr
def export_split(mode, idxs, data, dst): img_dir = ((dst / mode) / 'rgb') depth_dir = ((dst / mode) / 'depth') split_file = ((dst / 'splits') / f'{mode}_files.txt') io.mkdirs(img_dir, depth_dir, split_file.parent) with open(split_file, 'w') as f: for i in tqdm(idxs): i -= 1 stem = f'{i:05}' img = (data['images'][(i - 1)].transpose((2, 1, 0)).astype(np.float32) / 255.0) depth = data['depths'][(i - 1)].T[(..., None)] io.np2pil(img).save((img_dir / f'{stem}.png')) np.save((depth_dir / f'{stem}.npy'), depth) f.write((stem + '\n'))
def main(dst): data_file = (dst / 'nyu_depth_v2_labeled.mat') split_file = (dst / 'splits.mat') data = loadmat(data_file) splits = sio.loadmat(split_file) export_split('train', splits['trainNdxs'].squeeze(), data, dst) export_split('test', splits['testNdxs'].squeeze(), data, dst) data_file.unlink() split_file.unlink()
def save_settings(**kwargs): io.write_yaml(((PATHS['slow_tv'] / 'splits') / 'config.yaml'), kwargs)
def export_scene(args): (vid_file, cat) = args seq = vid_file.stem seq_dir = (PATHS['slow_tv'] / seq) stv.extract_frames(vid_file, save_dir=seq_dir, fps=fps, trim_start=trim, n_keep=n_keep, per_interval=per_interval, overwrite=overwrite) seeds = [42, 195, 335, 558, 724] for seed in seeds: try: stv.estimate_intrinsics(seq_dir, save_root=colmap_dir, n_imgs=n_colmap_imgs, interval=colmap_interval, seed=seed, overwrite=overwrite) break except RuntimeError: print(f'-> Failed COLMAP intrinsics with seed "{seed}"...') else: raise RuntimeError(f'-> Tried {seeds} and they all failed!!') stv.add_frames_to_split(seq_dir, cat, seq, 'all', p_train=p_train, skip=val_skip)
def main(args): if write_settings: save_settings(fps=fps, trim=trim, data_scale=data_scale, n_keep=n_keep, per_interval=per_interval, p_train=p_train, val_skip=val_skip, n_colmap_imgs=n_colmap_imgs, colmap_interval=colmap_interval) cats = stv.load_categories(subcats=False) video_files = io.get_files(vid_dir) assert (len(cats) == len(video_files)), 'Non-matching SlowTV videos and labelled categories.' if (args.idx is not None): export_scene((video_files[args.idx], cats[args.idx])) return if (args.n_proc == 0): [export_scene(args) for args in zip(video_files, cats)] else: with Pool(args.n_proc) as p: list(tqdm(p.imap_unordered(export_scene, zip(video_files, cats)), total=len(cats)))
def main(dst): print(f'-> Copying splits to "{dst}"...') shutil.copytree((REPO_ROOT / 'api/data/splits'), dst, dirs_exist_ok=True) (dst / FILE.name).unlink()
def save_metrics(file: Path, metrics: ty.U[(Metrics, ty.S[Metrics])]): 'Helper to save metrics.' LOGGER.info(f'Saving results to "{file}"...') file.parent.mkdir(exist_ok=True, parents=True) write_yaml(file, metrics, mkdir=True)
def compute_eval_metrics(preds: ty.A, cfg_file: Path, align_mode: ty.U[(str, float)], nproc: ty.N[int]=None, max_items: ty.N[int]=None) -> tuple[(Metrics, ty.S[Metrics])]: 'Compute evaluation metrics from scaleless network disparities (see `compute_eval_preds`).\n\n :param preds: (NDArray) (b, h, w) Precomputed unscaled network predictions.\n :param cfg_file: (Path) Path to YAML config file.\n :param align_mode: (str|float) Strategy used to align the predictions to the ground-truth. {median, lsqr, 1, 5.4...}\n :param nproc: (None|int) Number of processes to use. `None` to let OS determine it.\n :param max_items: (None|int) Maximum number of items to process. Used for testing/debugging a subset.\n :return: (\n mean_metrics: (Metrics) Average metrics across the whole dataset.\n metrics: (list[Metrics]) Metrics for each item in the dataset.\n )\n ' cfg = load_yaml(cfg_file) (cfg_ds, cfg_args) = (cfg['dataset'], cfg['args']) try: cfg_args['align_mode'] = float(align_mode) except (ValueError, TypeError): cfg_args['align_mode'] = align_mode target_stem = cfg_ds.pop('target_stem', f"targets_{cfg.get('mode', 'test')}") ds = parsers.get_ds({cfg_ds.pop('type'): cfg_ds}) ds = next(iter(ds.values())) target_file = (ds.split_file.parent / f'{target_stem}.npz') LOGGER.info(f'Loading targets from "{target_file}"...') data = np.load(target_file, allow_pickle=True) (mean_metrics, metrics) = MonoDepthEvaluator(**cfg_args).run(preds, data, nproc=nproc, max_items=max_items) return (mean_metrics, metrics)
def save_preds(file: Path, preds: ty.A) -> None: 'Helper to save network predictions to a NPZ file. Required for submitted to the challenge.' io.mkdirs(file.parent) logging.info(f"Saving network predictions to '{file}'...") np.savez_compressed(file, pred=preds)
def compute_preds(cfg: dict, ckpt: str, cfg_model: ty.N[list[Path]], device: ty.N[str], overwrite: bool) -> ty.A: 'Compute predictions for a given dataset and network cfg.\n\n `ckpt` can be provided as:\n - Path: Path to a pretrained checkpoint trained using the benchmark repository.\n - Name: Name indicating the external model type and variant to load, e.g. midas.MiDaS, newcrfs.indoor.\n\n Currently supported external models are: {\n midas.{MiDaS, DPT_Large, DPT_BEiT_L_512},\n newcrfs.{indoor,outdoor},\n }\n\n :param cfg: (dict) Dataset cfg, following `MonoDepthModule` conventions.\n :param ckpt: (str) Model checkpoint to load. Either our checkpoint file or external model name. See docs.\n :param cfg_model: (None|list[Path]) Optional model cfgs when loading our legacy models.\n :param device: (str) Device on which to compute predictions.\n :param overwrite: (bool) If `True`, compute predictions even if model has not finished training.\n :return:\n ' trigger_preds() (model_type, name) = ckpt.split('.', maxsplit=2) model_type = (model_type if (model_type in PRED_REG) else 'ours') predictor = PRED_REG[model_type]() if (model_type == 'ours'): ckpt = find_model_file(ckpt) if ((not (ckpt.parent / 'finished').is_file()) and (not overwrite)): logging.error(f"Training for '{ckpt}' has not finished...") logging.error('Set `--overwrite 1` to run this evaluation anyway...') exit() logging.info(f"Loading pretrained model from '{ckpt}'") net = predictor.load_model(ckpt, cfg_model) else: net = predictor.load_model(name) cfg.update({'shape': predictor.get_img_shape(cfg['type']), 'as_torch': True, 'use_aug': False, 'log_time': False}) ds = parsers.get_ds({cfg.pop('type'): cfg}) ds = list(ds.values())[0] dl = DataLoader(ds, batch_size=12, num_workers=8, collate_fn=ds.collate_fn, pin_memory=True) logging.info('Computing predictions...') preds = predictor(net, dl, use_stereo_blend=False, device=device) return preds
def get_models(root: Path, exp: str, dataset: str, ckpt: str='last', mode: str='*', res: str='results', models: ty.N[list[str]]=None, tag: str='') -> tuple[(dict[(str, list[Path])], list[str])]: "Find all models and files associated with a particular experiment.\n NOTE: Parameters can use regex expressions, but overlapping names will be combined together. Use at your own risk.\n\n Found model names can be adjusted using the MODEL_TAGS dictionary.\n\n :param root: (Path) Root directory to search for models.\n :param exp: (str) Experiment name.\n :param dataset: (str) Evaluation dataset name.\n :param ckpt: (str) Checkpoint mode to retrieve. {last, best, *}\n :param mode: (str) Depth alignment mode to retrieve. {metric, median, lsqr, *}\n :param res: (str) Results directory name.\n :param models: (None|list[str]) List of models to retrieve. (Default: All models will be retrieved)\n :param tag: (str) Tag to append to model names. Include '_' to make more legible.\n :return: (\n eval_files: (dict[str, list[Path]]) Mapping from model names to all found files.\n models: (list[str]) List of model names found.\n )\n " if isinstance(models, str): models = models.split() fname = f'{dataset}_{ckpt}_{mode}.yaml' if (not models): fs = sorted(root.glob(f'{exp}/**/{res}/{fname}')) models = sorted({file.parents[2].stem for file in fs}) print('Evaluation Models:', models) eval_files = {(m + tag): sorted(root.glob(f'{exp}/{m}/**/{res}/{fname}')) for m in models} eval_files = {k: v for (k, v) in eval_files.items() if v} models = list(eval_files) return (eval_files, models)
def load_dfs(files: dict[(str, list[Path])]) -> pd.DataFrame: 'Load dict of YAML files into a single dataframe.\n\n :param files: (dict[str, list[Path]]) List of files for each model.\n :return: (DataFrame) Loaded dataframe, index based on the model key and a potential item number.\n ' dfs = [pd.json_normalize(load_yaml(f)) for fs in files.values() for f in fs] df = pd.concat(dfs) models = [f'{k}' for (k, fs) in files.items() for _ in fs] df.index = pd.MultiIndex.from_product([models, dfs[0].index], names=['Model', 'Item']) return df
def filter_df(df: pd.DataFrame) -> tuple[(pd.DataFrame, ty.S[int])]: 'Preprocess dataframe to include only AbsRel and (F-Score or delta) metrics.' (metrics, metric_type) = (['AbsRel'], [(- 1)]) (delta, delta_legacy) = ('$\\delta_{.25}$', '$\\delta < 1.25$') (f, f_legacy) = ('F-Score (10)', 'F-Score') if ((f_legacy in df) and (f not in df)): df = df.rename(columns={f_legacy: f}) if ((delta_legacy in df) and (delta not in df)): df[delta] = (100 * df[delta_legacy]) df = df.drop(columns=[delta_legacy]) if (f in df): metrics.append(f) metric_type.append((+ 1)) if (f_legacy in df): df[f] = (df[f].fillna(0) + df[f_legacy].fillna(0)) elif (delta in df): metrics.append(delta) metric_type.append((+ 1)) if (delta_legacy in df): df[delta] = (df[delta].fillna(0) + (100 * df[delta_legacy].fillna(0))) df = df[metrics] df = df.rename(columns={'AbsRel': 'Rel', f: 'F'}) return (df, metric_type)