code
stringlengths
17
6.64M
def get_wilds_ood_test_loader(dataset, data_dir, data_fraction=1.0, model_seed=0): ' load out-of-distribution test data and return data loader ' config = get_default_config(dataset, data_fraction=data_fraction) dataset_kwargs = ({'fold': POVERTY_FOLDS[model_seed]} if (dataset == 'poverty') else {}) full_dataset = get_dataset(dataset=dataset, root_dir=data_dir, **dataset_kwargs) train_grouper = CombinatorialGrouper(dataset=full_dataset, groupby_fields=config.groupby_fields) if (dataset == 'fmow'): config.batch_size = (config.batch_size // 2) test_transform = initialize_transform(transform_name=config.eval_transform, config=config, dataset=full_dataset) test_data = full_dataset.get_subset('test', frac=config.frac, transform=test_transform) test_loader = get_eval_loader(loader=config.eval_loader, dataset=test_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs) test_loader = ProperDataLoader(test_loader) return test_loader
def get_default_config(dataset, algorithm='ERM', data_fraction=1.0): config = Namespace(dataset=dataset, algorithm=algorithm, model_kwargs={}, optimizer_kwargs={}, loader_kwargs={}, dataset_kwargs={}, scheduler_kwargs={}, train_transform=None, eval_transform=None, no_group_logging=True, distinct_groups=True, frac=data_fraction, scheduler=None) return populate_defaults(config)
def optimize_noise_standard_deviation(model, val_loader, device, lr=0.1, n_epochs=10): ' optimizes the noise standard deviation of a Gaussian regression likelihood on the validation data ' log_sigma_noise = nn.Parameter(torch.zeros(1, device=device)) optimizer = torch.optim.Adam([log_sigma_noise], lr=lr) gaussian_nll_loss = nn.GaussianNLLLoss(full=True) for e in range(n_epochs): print(f'Running epoch {(e + 1)}/{n_epochs}...') for (i, (X, y)) in enumerate(val_loader): optimizer.zero_grad() y_pred = model(X.to(device)) if isinstance(y_pred, tuple): y_pred = y_pred[0] nll = gaussian_nll_loss(y_pred, y.to(device), (torch.ones_like(y_pred) * (log_sigma_noise.exp() ** 2))) nll.backward() optimizer.step() sigma_noise = log_sigma_noise.exp().item() print(f' Iter {(i + 1)}/{len(val_loader)}: sigma_noise = {sigma_noise} (NLL: {nll.item()}).') print(f'''After epoch {(e + 1)}/{n_epochs}: sigma_noise = {sigma_noise}. ''') return sigma_noise
class model(): def __init__(self, inputs_shape=None): tf.reset_default_graph() self.base_net_t1 = tf.keras.applications.VGG16 self.base_net_t2 = tf.keras.applications.VGG16 self.inputs_t1 = tf.placeholder(dtype=tf.float32, shape=inputs_shape, name='inputs_t1') self.inputs_t2 = tf.placeholder(dtype=tf.float32, shape=inputs_shape, name='inputs_t2') self.labels_t1 = tf.placeholder(dtype=tf.uint8, shape=[None], name='labels_t1') self.labels_t2 = tf.placeholder(dtype=tf.uint8, shape=[None], name='labels_t2') self.activation = tf.nn.relu self.hidden_num = 1024 self.l2_reg = tf.contrib.layers.l2_regularizer(0.0001) self.init = tf.glorot_normal_initializer() def forward(self, num_classes=None): label_t1_onehot = tf.one_hot(indices=self.labels_t1, depth=num_classes, name='label_t1_onehot') label_t2_onehot = tf.one_hot(indices=self.labels_t2, depth=num_classes, name='label_t2_onehot') label_bi = tf.equal(x=self.labels_t1, y=self.labels_t2, name='label_bi') with tf.variable_scope('conv_layers') as scope: conv_t1 = self.base_net_t1(weights=None, include_top=False, input_tensor=self.inputs_t1).output conv_t2 = self.base_net_t2(weights=None, include_top=False, input_tensor=self.inputs_t2).output flat_feature_t1 = tf.reduce_mean(conv_t1, axis=[1, 2], name='avg_pool_t1') flat_feature_t2 = tf.reduce_mean(conv_t2, axis=[1, 2], name='avg_pool_t2') dense1_t1 = tf.layers.dense(inputs=flat_feature_t1, units=self.hidden_num, activation=self.activation, kernel_regularizer=self.l2_reg, kernel_initializer=self.init, name='dense1_t1') dense1_t2 = tf.layers.dense(inputs=flat_feature_t2, units=self.hidden_num, activation=self.activation, kernel_regularizer=self.l2_reg, kernel_initializer=self.init, name='dense1_t2') dense2_t1 = tf.layers.dense(inputs=dense1_t1, units=self.hidden_num, activation=self.activation, kernel_regularizer=self.l2_reg, kernel_initializer=self.init, name='dense2_t1') dense2_t2 = tf.layers.dense(inputs=dense1_t2, units=self.hidden_num, activation=self.activation, kernel_regularizer=self.l2_reg, kernel_initializer=self.init, name='dense2_t2') inputs_dim = dense2_t1.get_shape().as_list()[(- 1)] with tf.name_scope('Corr_Fusion_Layer'): (outputs_t1, outputs_t2, corr_loss, self.decov_loss, self.corr) = CorrFusion(input_shape=inputs_dim).forward(inputs_t1=dense2_t1, inputs_t2=dense2_t2) self.corr_loss = tf.reduce_mean(tf.multiply(corr_loss, tf.cast(label_bi, tf.float32)), name='corr_loss') with tf.name_scope('losses') as scope: weights_t1 = tf.get_variable(name='weights_t1', shape=[dense2_t1.get_shape().as_list()[(- 1)], num_classes], initializer=self.init) weights_t2 = tf.get_variable(name='weights_t2', shape=[dense2_t2.get_shape().as_list()[(- 1)], num_classes], initializer=self.init) (self.pred_prob_t1, self.softmax_loss_t1) = Original_Softmax_loss(embeddings=dense2_t1, weights=weights_t1, labels=label_t1_onehot) (self.pred_prob_t2, self.softmax_loss_t2) = Original_Softmax_loss(embeddings=dense2_t2, weights=weights_t2, labels=label_t2_onehot) bn3_t1 = tf.layers.batch_normalization(inputs=dense2_t1, axis=(- 1), name='bn3_t1') bn3_t2 = tf.layers.batch_normalization(inputs=dense2_t2, axis=(- 1), name='bn3_t2') cca_input = tf.concat((bn3_t1, bn3_t2), axis=(- 1), name='cca_input') self.dcca_loss = DCCA_loss(cca_input) self.losses = (self.softmax_loss_t1 + self.softmax_loss_t2) self.prediction_t1 = tf.argmax(input=self.pred_prob_t1, axis=1, name='prediction_t1') self.prediction_t2 = tf.argmax(input=self.pred_prob_t2, axis=1, name='prediction_t2') with tf.name_scope('metrics') as scope: (self.metrics_t1, self.metrics_t1_op) = tf.metrics.accuracy(self.labels_t1, predictions=self.prediction_t1, name='metrics_t1') (self.metrics_t2, self.metrics_t2_op) = tf.metrics.accuracy(self.labels_t2, predictions=self.prediction_t2, name='metrics_t2') self.local_init = tf.local_variables_initializer() tf.summary.scalar(name='losses/t1', tensor=self.softmax_loss_t1) tf.summary.scalar(name='losses/t2', tensor=self.softmax_loss_t2) tf.summary.scalar(name='acc/t1', tensor=self.metrics_t1) tf.summary.scalar(name='acc/t2', tensor=self.metrics_t2) return True
def LoadNpy(filename=None): npy = np.load(file=filename) image_t1 = npy['image_t1'] image_t1 = (image_t1.astype(np.float32) / np.max(image_t1)) image_t2 = npy['image_t2'] image_t2 = (image_t2.astype(np.float32) / np.max(image_t2)) label_t1 = (npy['label_t1'] - 1) label_t2 = (npy['label_t2'] - 1) return (image_t1, image_t2, label_t1, label_t2)
def extract_label(file_list): label_t1 = None label_t2 = None for file in file_list: (image_t1, image_t2, temp_label_t1, temp_label_t2) = LoadNpy(file) if (label_t1 is None): label_t1 = temp_label_t1 label_t2 = temp_label_t2 else: label_t1 = np.concatenate((label_t1, temp_label_t1), axis=0) label_t2 = np.concatenate((label_t2, temp_label_t2), axis=0) return (label_t1, label_t2)
class config(): def __init__(self): arr = (np.array([[0, 0, 205], [65, 105, 225], [135, 206, 235], [0, 139, 69], [0, 216, 0], [238, 154, 73], [163, 124, 2], [255, 38, 38], [205, 38, 38], [139, 26, 26], [255, 231, 186], [48, 48, 48], [179, 151, 143], [186, 85, 211]], dtype=np.float32) / 255) self.colormap = (np.array(arr, dtype=np.float32) / np.max(arr)) self.class_name = ['0-undefined', '1-administration', '2-commercial', '3-water', '4-farmland', '5-greenspace', '6-transportation', '7-industrial', '8-residential-1', '9-residential-2', '10-residential-3', '11-road', '12-parking', '13-bareland', '14-playground'] self.shape = [190, 237] self.dir_t1 = './2014/sure/' self.dir_t2 = './2016/sure/' self.step = 1
def argparser(): parser = argparse.ArgumentParser() parser.add_argument('-g', '--gpu', help='gpu device id', default='1') parser.add_argument('-b', '--batch_size', help='batch size', type=int, default=32) parser.add_argument('-e', '--epoches', help='max epoches', type=int, default=100) parser.add_argument('-n', '--num_classes', help='num classes', type=int, default=14) parser.add_argument('-tb', '--use_tfboard', help='use tensorboard', type=bool, default=True) parser.add_argument('-sm', '--save_model', help='save best model', type=bool, default=True) parser.add_argument('-log', '--save_log', help='save training log', type=bool, default=True) parser.add_argument('-trn', '--trn_dir', help='training file dir', default='../wuhan/data_small/trn/') parser.add_argument('-tst', '--tst_dir', help='testing file dir', default='../wuhan/data_small/tst/') parser.add_argument('-val', '--val_dir', help='validation file dir', default='../wuhan/data_small/val/') parser.add_argument('-lpath', '--log_path', help='log file path', default='./VGG16-v1-dcca/log/') parser.add_argument('-mpath', '--model_path', help='model file path', default='./VGG16-v1-dcca/model/') parser.add_argument('-tbpath', '--tb_path', help='tfboard file path', default='./VGG16-v1-dcca/tfboard/') parser.add_argument('-rpath', '--result_path', help='validation file path', default='./result/') args = parser.parse_args() return args
def DecodeH5(h5file=None): file = h5py.File(name=h5file, mode='r') data = ((file['image'].value.astype(np.float32) / 255) - 0.5) label = (file['label'].value.astype(np.int8) - 1) return (data, label)
def LoadNpy(filename=None): npy = np.load(file=filename) image_t1 = npy['image_t1'] image_t1 = ((image_t1.astype(np.float32) / np.max(image_t1)) - 0.5) image_t2 = npy['image_t2'] image_t2 = ((image_t2.astype(np.float32) / np.max(image_t2)) - 0.5) label_t1 = (npy['label_t1'] - 1) label_t2 = (npy['label_t2'] - 1) return (image_t1, image_t2, label_t1, label_t2)
def Accuracy(pred_t1, pred_t2, label_t1, label_t2): oa_t1 = metrics.accuracy_score(y_true=label_t1, y_pred=pred_t1) oa_t2 = metrics.accuracy_score(y_true=label_t2, y_pred=pred_t2) pred_bi = np.equal(pred_t1, pred_t2).astype(np.int16) label_bi = np.equal(label_t1, label_t2).astype(np.int16) oa_bi = metrics.accuracy_score(y_true=label_bi, y_pred=pred_bi) '\n cnt = 0.\n for k1 in range(len(pred_t1)):\n if pred_t1[k1]==label_t1[k1] and pred_t2[k1]==label_t2[k1]:\n cnt = cnt + 1 \n oa_tr = cnt/float(len(pred_t1))\n ' oa_tr = (np.sum(((pred_t1 == label_t1) & (pred_t2 == label_t2))) / float(len(pred_t1))) return (oa_t1, oa_t2, oa_bi, oa_tr)
class Point(): def __init__(self, x, y): self.x = x self.y = y def __str__(self): return ((str(self.x) + ',') + str(self.y))
class Vector(): def __init__(self, pa, pb): self.x = (int(pb.x) - int(pa.x)) self.y = (int(pb.y) - int(pa.y)) def __str__(self): return ((str(self.x) + ',') + str(self.y))
class Angle(): def __init__(self, va, vb): self.va = va self.vb = vb def theta(self): theta = math.degrees(math.acos((((self.va.x * self.vb.x) + (self.va.y * self.vb.y)) / (math.hypot(self.va.x, self.va.y) * math.hypot(self.vb.x, self.vb.y))))) return theta
class Distance(): def __init__(self, pa, pb): self.x = ((int(pb.x) - int(pa.x)) * (int(pb.x) - int(pa.x))) self.y = ((int(pb.y) - int(pa.y)) * (int(pb.y) - int(pa.y))) def dist(self): return ((self.x + self.y) ** 0.5)
def checkArg(): if (len(sys.argv) != 2): print('please give me file') sys.exit(0)
def readFile(filename): points = [] f = open(filename, 'r') for line in f.readlines(): line = line.strip(' \t\n\r') x = line.split(',')[0] y = line.split(',')[1] points.append(Point(x, y)) f.close() return points
def getCross(va, vb): return ((va.x * vb.y) - (va.y * vb.x))
def getODI(pa, pb, pc, pd, pe, pf, pg, ph): va = Vector(pa, pb) vb = Vector(pc, pd) vc = Vector(pe, pf) vd = Vector(pg, ph) aa = Angle(va, vb).theta() ab = Angle(vc, vd).theta() cb = getCross(vc, vd) if (cb < 0): ab = (- ab) return (aa + ab)
def getAPDI(pa, pb, pc, pd, pe, pf, pg, ph, pi, pj): va = Vector(pa, pb) vb = Vector(pc, pd) vc = Vector(pe, pf) vd = Vector(pg, ph) ve = Vector(pi, pj) aa = Angle(va, vb).theta() ab = Angle(vb, vc).theta() ac = Angle(vd, ve).theta() cb = getCross(vb, vc) cc = getCross(vd, ve) if (cb > 0): ab = (- ab) if (cc < 0): ac = (- ac) return ((aa + ab) + ac)
def writeFile(filename, points, ANBtype, SNBtype, SNAtype, ODItype, APDItype, FHItype, FMAtype, mwtype): f = open(filename, 'w') for point in points: f.write((str(point) + '\n')) f.write((ANBtype + '\n')) f.write((SNBtype + '\n')) f.write((SNAtype + '\n')) f.write((ODItype + '\n')) f.write((APDItype + '\n')) f.write((FHItype + '\n')) f.write((FMAtype + '\n')) f.write((mwtype + '\n')) f.close()
def classification(points): va = Vector(points[1], points[0]) vb = Vector(points[1], points[5]) vc = Vector(points[1], points[0]) vd = Vector(points[1], points[4]) ANBtype = '' ANB = (Angle(vc, vd).theta() - Angle(va, vb).theta()) if (ANB < 3.2): ANBtype = '3' elif (ANB > 5.7): ANBtype = '2' else: ANBtype = '1' va = Vector(points[1], points[0]) vb = Vector(points[1], points[5]) SNBtype = '' SNB = Angle(va, vb).theta() if (SNB < 74.6): SNBtype = '2' elif (SNB > 78.7): SNBtype = '3' else: SNBtype = '1' va = Vector(points[1], points[0]) vb = Vector(points[1], points[4]) SNAtype = '' SNA = Angle(va, vb).theta() if (SNA < 79.4): SNAtype = '3' elif (SNA > 83.2): SNAtype = '2' else: SNAtype = '1' ODItype = '' ODI = getODI(points[7], points[9], points[5], points[4], points[3], points[2], points[16], points[17]) if (ODI < 68.4): ODItype = '3' elif (ODI > 80.5): ODItype = '2' else: ODItype = '1' APDItype = '' APDI = getAPDI(points[2], points[3], points[1], points[6], points[4], points[5], points[3], points[2], points[16], points[17]) if (APDI < 77.6): APDItype = '2' elif (APDI > 85.2): APDItype = '3' else: APDItype = '1' pfh = Distance(points[0], points[9]).dist() afh = Distance(points[1], points[7]).dist() FHItype = '' if ((pfh / afh) < 0.65): FHItype = '3' elif ((pfh / afh) > 0.75): FHItype = '2' else: FHItype = '1' va = Vector(points[0], points[1]) vb = Vector(points[9], points[8]) FMAtype = '' if (Angle(va, vb).theta() < 26.8): FMAtype = '3' elif (Angle(va, vb).theta() > 31.4): FMAtype = '2' else: FMAtype = '1' mw = (Distance(points[10], points[11]).dist() / 10) mwtype = '' if (points[11].x < points[10].x): mw = (- mw) if (mw >= 2): if (mw <= 4.5): mwtype = '1' else: mwtype = '4' elif (mw == 0): mwtype = '2' else: mwtype = '3' return np.array([ANBtype, SNBtype, SNAtype, ODItype, APDItype, FHItype, FMAtype, mwtype])
def main(): config = parser.parse_args() model_ft = models.fusionVGG19(torchvision.models.vgg19_bn(pretrained=True), config).cuda(config.use_gpu) print('image scale ', config.image_scale) print('GPU: ', config.use_gpu) transform_origin = torchvision.transforms.Compose([Rescale(config.image_scale), ToTensor()]) train_dataset_origin = LandmarksDataset(csv_file=(config.dataRoot + config.trainingSetCsv), root_dir=(config.dataRoot + config.supervised_dataset_train), transform=transform_origin, landmarksNum=config.landmarkNum) val_dataset = LandmarksDataset(csv_file=(config.dataRoot + config.testSetCsv), root_dir=(config.dataRoot + config.supervised_dataset_test), transform=transform_origin, landmarksNum=config.landmarkNum) train_dataloader = [] val_dataloader = [] train_dataloader_t = DataLoader(train_dataset_origin, batch_size=config.batchSize, shuffle=False, num_workers=40) val_dataloader_t = DataLoader(val_dataset, batch_size=config.batchSize, shuffle=False, num_workers=40) for data in train_dataloader_t: train_dataloader.append(data) for data in val_dataloader_t: val_dataloader.append(data) print(len(train_dataloader), len(val_dataloader)) dataloaders = {'train': train_dataloader, 'val': val_dataloader} para_list = list(model_ft.children()) print('len', len(para_list)) for idx in range(len(para_list)): print(idx, '-------------------->>>>', para_list[idx]) model_ft = model_ft.cuda(config.use_gpu) criterion = lossFunction.fusionLossFunc_improved(config) optimizer_ft = optim.Adadelta(filter((lambda p: p.requires_grad), model_ft.parameters()), lr=1.0) train.train_model(model_ft, dataloaders, criterion, optimizer_ft, config)
def train_model(model, dataloaders, criterion, optimizer, config): since = time.time() test_epoch = 5 for epoch in range(config.epochs): train_dev = [] for phase in ['train']: model.train(True) running_loss = 0.0 lent = len(dataloaders[phase]) pbar = tqdm(total=(lent * config.batchSize)) for ide in range(lent): data = dataloaders[phase][ide] (inputs, labels) = (data['image'], data['landmarks']) inputs = inputs.to(config.use_gpu) optimizer.zero_grad() heatmaps = model(inputs) loss = criterion(heatmaps[0], labels.detach().cpu()) loss.backward() optimizer.step() if ((epoch % test_epoch) == 0): predicted_landmarks = utils.regression_voting(heatmaps, config.R2).cuda(config.use_gpu) dev = utils.calculate_deviation(predicted_landmarks.detach(), labels.cuda(config.use_gpu).detach()) train_dev.append(dev) running_loss += loss.item() pbar.update(config.batchSize) pbar.close() epoch_loss = (running_loss / lent) print('{} epoch: {} Loss: {}'.format(phase, epoch, epoch_loss)) if ((epoch % test_epoch) == 0): train_dev = (torch.stack(train_dev).squeeze() * config.spacing) (train_SDR, train_SD, train_MRE) = utils.get_statistical_results(train_dev, config) print(('train_MRE(SD): %f(%f), SDR([1mm, 2mm, 2.5mm, 3mm, 4mm]): ' % (torch.mean(train_MRE).detach().cpu().numpy(), torch.mean(train_SD).detach().cpu().numpy())), torch.mean(train_SDR, 0).detach().cpu().numpy()) val(model, dataloaders, criterion, optimizer, config) time_elapsed = (time.time() - since) print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
def val(model, dataloaders, criterion, optimizer, config): since = time.time() test_dev = [] for phase in ['val']: model.train(False) running_loss = 0.0 lent = len(dataloaders[phase]) pbar = tqdm(total=(lent * config.batchSize)) for ide in range(lent): data = dataloaders[phase][ide] (inputs, labels) = (data['image'], data['landmarks']) inputs = inputs.to(config.use_gpu) heatmaps = model(inputs) predicted_landmarks = utils.regression_voting(heatmaps, config.R2).to(config.use_gpu) dev = utils.calculate_deviation(predicted_landmarks.detach(), labels.to(config.use_gpu).detach()) test_dev.append(dev) pbar.update(config.batchSize) pbar.close() test_dev = (torch.stack(test_dev).squeeze() * config.spacing) (test_SDR, test_SD, test_MRE) = utils.get_statistical_results(test_dev, config) print(('test_MRE(SD): %f(%f), SDR([1mm, 2mm, 2.5mm, 3mm, 4mm]):' % (torch.mean(test_MRE).detach().cpu().numpy(), torch.mean(test_SD).detach().cpu().numpy())), torch.mean(test_SDR, 0).detach().cpu().numpy()) global best_MRE global best_SD global best_SDR if (best_MRE > torch.mean(test_MRE).detach().cpu().numpy()): best_MRE = torch.mean(test_MRE).detach().cpu().numpy() best_SD = torch.mean(test_SD).detach().cpu().numpy() best_SDR = torch.mean(test_SDR, 0).detach().cpu().numpy() time_elapsed = (time.time() - since) print('testing complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60))) print(('Best val MRE(SD): %f(%f), SDR([1mm, 2mm, 2.5mm, 3mm, 4mm]):' % (best_MRE, best_SD)), best_SDR)
def draw_matches(data): keypoints1 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints1']] keypoints2 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints2']] inliers = data['inliers'].astype(bool) matches = np.array(data['matches'])[inliers].tolist() img1 = (np.concatenate([output['image1'], output['image1'], output['image1']], axis=2) * 255) img2 = (np.concatenate([output['image2'], output['image2'], output['image2']], axis=2) * 255) return cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches, None, matchColor=(0, 255, 0), singlePointColor=(0, 0, 255))
def draw_keypoints(img, corners, color): keypoints = [cv2.KeyPoint(c[1], c[0], 1) for c in np.stack(corners).T] return cv2.drawKeypoints(img.astype(np.uint8), keypoints, None, color=color)
def draw_keypoints(img, corners, color, radius=3, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
def select_top_k(prob, thresh=0, num=300): pts = np.where((prob > thresh)) idx = np.argsort(prob[pts])[::(- 1)][:num] pts = (pts[0][idx], pts[1][idx]) return pts
def draw_keypoints(img, corners, color): keypoints = [cv2.KeyPoint(c[1], c[0], 1) for c in np.stack(corners).T] return cv2.drawKeypoints(img.astype(np.uint8), keypoints, None, color=color)
def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
def draw_overlay(img, mask, color=[0, 0, 255], op=0.5, s=3): mask = cv2.resize(mask.astype(np.uint8), None, fx=s, fy=s, interpolation=cv2.INTER_NEAREST) img[np.where(mask)] = ((img[np.where(mask)] * (1 - op)) + (np.array(color) * op))
def display(d): img = (draw_keypoints((d['image'][(..., 0)] * 255), np.where(d['keypoint_map']), (0, 255, 0)) if add_keypoints else (d['image'][(..., 0)] * 255)) draw_overlay(img, np.logical_not(d['valid_mask'])) return img
def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
def draw_overlay(img, mask, color=[0, 0, 255], op=0.5, s=3): mask = cv2.resize(mask.astype(np.uint8), None, fx=s, fy=s, interpolation=cv2.INTER_NEAREST) img[np.where(mask)] = ((img[np.where(mask)] * (1 - op)) + (np.array(color) * op))
def display(d): img = (draw_keypoints((d['image'][(..., 0)] * 255), np.where(d['keypoint_map']), (0, 255, 0)) if add_keypoints else (d['image'][(..., 0)] * 255)) draw_overlay(img, np.logical_not(d['valid_mask'])) return img
def draw_keypoints(img, corners, color, radius=4, s=3): img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[(..., np.newaxis)], 3, (- 1)) for c in np.stack(corners).T: cv2.circle(img, tuple((s * np.flip(c, 0))), radius, color, thickness=(- 1)) return img
def draw_keypoints(img, corners, color): keypoints = [cv2.KeyPoint(c[1], c[0], 1) for c in np.stack(corners).T] return cv2.drawKeypoints(img.astype(np.uint8), keypoints, None, color=color)
def display(d): return draw_keypoints((d['image'][(..., 0)] * 255), np.where(d['keypoint_map']), (0, 255, 0))
def draw_keypoints(img, corners, color): keypoints = [cv2.KeyPoint(c[1], c[0], 1) for c in np.stack(corners).T] return cv2.drawKeypoints(img.astype(np.uint8), keypoints, None, color=color)
def draw_overlay(img, mask, color=[0, 0, 255], op=0.5): img[np.where(mask)] = ((img[np.where(mask)] * (1 - op)) + (np.array(color) * op))
def display(d): img = draw_keypoints((d['image'][(..., 0)] * 255), np.where(d['keypoint_map']), (0, 255, 0)) draw_overlay(img, np.logical_not(d['valid_mask'])) return img
def get_dataset(name): mod = __import__('superpoint.datasets.{}'.format(name), fromlist=['']) return getattr(mod, _module_to_class(name))
def _module_to_class(name): return ''.join((n.capitalize() for n in name.split('_')))
class BaseDataset(metaclass=ABCMeta): 'Base model class.\n\n Arguments:\n config: A dictionary containing the configuration parameters.\n\n Datasets should inherit from this class and implement the following methods:\n `_init_dataset` and `_get_data`.\n Additionally, the following static attributes should be defined:\n default_config: A dictionary of potential default configuration values (e.g. the\n size of the validation set).\n ' split_names = ['training', 'validation', 'test'] @abstractmethod def _init_dataset(self, **config): 'Prepare the dataset for reading.\n\n This method should configure the dataset for later fetching through `_get_data`,\n such as downloading the data if it is not stored locally, or reading the list of\n data files from disk. Ideally, especially in the case of large images, this\n method shoudl NOT read all the dataset into memory, but rather prepare for faster\n seubsequent fetching.\n\n Arguments:\n config: A configuration dictionary, given during the object instantiantion.\n\n Returns:\n An object subsequently passed to `_get_data`, e.g. a list of file paths and\n set splits.\n ' raise NotImplementedError @abstractmethod def _get_data(self, dataset, split_name, **config): 'Reads the dataset splits using the Tensorflow `tf.data` API.\n\n This method should create a `tf.data.Dataset` object for the given data split,\n with named components defined through a dictionary mapping strings to tensors.\n\n It typically performs operations such as reading data from a file or from a\n Python generator, shuffling the elements or applying data augmentation to the\n training split. It should however NOT batch the dataset (left to the model).\n\n Arguments:\n dataset: An object returned by the `_init_dataset` method.\n split_name: A string, the name of the requested split, either `"training"`,\n `"validation"` or `"test"`.\n config: A configuration dictionary, given during the object instantiantion.\n\n Returns:\n An object of type `tf.data.Dataset` corresponding to the corresponding split.\n ' raise NotImplementedError def get_tf_datasets(self): '"Exposes data splits consistent with the Tensorflow `tf.data` API.\n\n Returns:\n A dictionary mapping split names (`str`, either `"training"`, `"validation"`,\n or `"test"`) to `tf.data.Dataset` objects.\n ' return self.tf_splits def get_training_set(self): 'Processed training set.\n\n Returns:\n A generator of elements from the training set as dictionaries mapping\n component names to the corresponding data (e.g. Numpy array).\n ' return self._get_set_generator('training') def get_validation_set(self): 'Processed validation set.\n\n Returns:\n A generator of elements from the training set as dictionaries mapping\n component names to the corresponding data (e.g. Numpy array).\n ' return self._get_set_generator('validation') def get_test_set(self): 'Processed test set.\n\n Returns:\n A generator of elements from the training set as dictionaries mapping\n component names to the corresponding data (e.g. Numpy array).\n ' return self._get_set_generator('test') def __init__(self, **config): self.config = dict_update(getattr(self, 'default_config', {}), config) self.dataset = self._init_dataset(**self.config) self.tf_splits = {} self.tf_next = {} with tf.device('/cpu:0'): for n in self.split_names: self.tf_splits[n] = self._get_data(self.dataset, n, **self.config) self.tf_next[n] = self.tf_splits[n].make_one_shot_iterator().get_next() self.end_set = tf.errors.OutOfRangeError self.sess = tf.Session() def _get_set_generator(self, set_name): while True: (yield self.sess.run(self.tf_next[set_name]))
class Coco(BaseDataset): default_config = {'labels': None, 'cache_in_memory': False, 'validation_size': 100, 'truncate': None, 'preprocessing': {'resize': [240, 320]}, 'num_parallel_calls': 10, 'augmentation': {'photometric': {'enable': False, 'primitives': 'all', 'params': {}, 'random_order': True}, 'homographic': {'enable': False, 'params': {}, 'valid_border_margin': 0}}, 'warped_pair': {'enable': False, 'params': {}, 'valid_border_margin': 0}} def _init_dataset(self, **config): base_path = Path(DATA_PATH, 'COCO/train2014/') image_paths = list(base_path.iterdir()) if config['truncate']: image_paths = image_paths[:config['truncate']] names = [p.stem for p in image_paths] image_paths = [str(p) for p in image_paths] files = {'image_paths': image_paths, 'names': names} if config['labels']: label_paths = [] for n in names: p = Path(EXPER_PATH, config['labels'], '{}.npz'.format(n)) assert p.exists(), 'Image {} has no corresponding label {}'.format(n, p) label_paths.append(str(p)) files['label_paths'] = label_paths tf.data.Dataset.map_parallel = (lambda self, fn: self.map(fn, num_parallel_calls=config['num_parallel_calls'])) return files def _get_data(self, files, split_name, **config): has_keypoints = ('label_paths' in files) is_training = (split_name == 'training') def _read_image(path): image = tf.read_file(path) image = tf.image.decode_png(image, channels=3) return tf.cast(image, tf.float32) def _preprocess(image): image = tf.image.rgb_to_grayscale(image) if config['preprocessing']['resize']: image = pipeline.ratio_preserving_resize(image, **config['preprocessing']) return image def _read_points(filename): return np.load(filename.decode('utf-8'))['points'].astype(np.float32) names = tf.data.Dataset.from_tensor_slices(files['names']) images = tf.data.Dataset.from_tensor_slices(files['image_paths']) images = images.map(_read_image) images = images.map(_preprocess) data = tf.data.Dataset.zip({'image': images, 'name': names}) if has_keypoints: kp = tf.data.Dataset.from_tensor_slices(files['label_paths']) kp = kp.map((lambda path: tf.py_func(_read_points, [path], tf.float32))) kp = kp.map((lambda points: tf.reshape(points, [(- 1), 2]))) data = tf.data.Dataset.zip((data, kp)).map((lambda d, k: {**d, 'keypoints': k})) data = data.map(pipeline.add_dummy_valid_mask) if (split_name == 'validation'): data = data.take(config['validation_size']) if config['cache_in_memory']: tf.logging.info('Caching data, fist access will take some time.') data = data.cache() if config['warped_pair']['enable']: assert has_keypoints warped = data.map_parallel((lambda d: pipeline.homographic_augmentation(d, add_homography=True, **config['warped_pair']))) if (is_training and config['augmentation']['photometric']['enable']): warped = warped.map_parallel((lambda d: pipeline.photometric_augmentation(d, **config['augmentation']['photometric']))) warped = warped.map_parallel(pipeline.add_keypoint_map) data = tf.data.Dataset.zip((data, warped)) data = data.map((lambda d, w: {**d, 'warped': w})) if (has_keypoints and is_training): if config['augmentation']['photometric']['enable']: data = data.map_parallel((lambda d: pipeline.photometric_augmentation(d, **config['augmentation']['photometric']))) if config['augmentation']['homographic']['enable']: assert (not config['warped_pair']['enable']) data = data.map_parallel((lambda d: pipeline.homographic_augmentation(d, **config['augmentation']['homographic']))) if has_keypoints: data = data.map_parallel(pipeline.add_keypoint_map) data = data.map_parallel((lambda d: {**d, 'image': (tf.to_float(d['image']) / 255.0)})) if config['warped_pair']['enable']: data = data.map_parallel((lambda d: {**d, 'warped': {**d['warped'], 'image': (tf.to_float(d['warped']['image']) / 255.0)}})) return data
class Mnist(BaseDataset): default_config = {'validation_size': 500} def _init_dataset(self, **config): return input_data.read_data_sets(os.path.join(DATA_PATH, 'MNIST'), reshape=False, validation_size=config['validation_size']) def _get_data(self, dataset, split_name, **config): if (split_name == 'training'): data = dataset.train elif (split_name == 'validation'): data = dataset.validation elif (split_name == 'test'): data = dataset.test data = tf.data.Dataset.from_tensor_slices({'image': data.images, 'label': data.labels.astype(int)}) data = data.shuffle(buffer_size=10000) return data
class PatchesDataset(BaseDataset): default_config = {'dataset': 'hpatches', 'alteration': 'all', 'cache_in_memory': False, 'truncate': None, 'preprocessing': {'resize': False}} def _init_dataset(self, **config): dataset_folder = ('COCO/patches' if (config['dataset'] == 'coco') else 'HPatches') base_path = Path(DATA_PATH, dataset_folder) folder_paths = [x for x in base_path.iterdir() if x.is_dir()] image_paths = [] warped_image_paths = [] homographies = [] for path in folder_paths: if ((config['alteration'] == 'i') and (path.stem[0] != 'i')): continue if ((config['alteration'] == 'v') and (path.stem[0] != 'v')): continue num_images = (1 if (config['dataset'] == 'coco') else 5) file_ext = ('.ppm' if (config['dataset'] == 'hpatches') else '.jpg') for i in range(2, (2 + num_images)): image_paths.append(str(Path(path, ('1' + file_ext)))) warped_image_paths.append(str(Path(path, (str(i) + file_ext)))) homographies.append(np.loadtxt(str(Path(path, ('H_1_' + str(i)))))) if config['truncate']: image_paths = image_paths[:config['truncate']] warped_image_paths = warped_image_paths[:config['truncate']] homographies = homographies[:config['truncate']] files = {'image_paths': image_paths, 'warped_image_paths': warped_image_paths, 'homography': homographies} return files def _get_data(self, files, split_name, **config): def _read_image(path): return cv2.imread(path.decode('utf-8')) def _preprocess(image): tf.Tensor.set_shape(image, [None, None, 3]) image = tf.image.rgb_to_grayscale(image) if config['preprocessing']['resize']: image = pipeline.ratio_preserving_resize(image, **config['preprocessing']) return tf.to_float(image) def _adapt_homography_to_preprocessing(zip_data): H = tf.cast(zip_data['homography'], tf.float32) source_size = tf.cast(zip_data['shape'], tf.float32) source_warped_size = tf.cast(zip_data['warped_shape'], tf.float32) target_size = tf.cast(tf.convert_to_tensor(config['preprocessing']['resize']), tf.float32) s = tf.reduce_max(tf.divide(target_size, source_size)) up_scale = tf.diag(tf.stack([(1.0 / s), (1.0 / s), tf.constant(1.0)])) warped_s = tf.reduce_max(tf.divide(target_size, source_warped_size)) down_scale = tf.diag(tf.stack([warped_s, warped_s, tf.constant(1.0)])) pad_y = tf.to_int32((((source_size[0] * s) - target_size[0]) / tf.constant(2.0))) pad_x = tf.to_int32((((source_size[1] * s) - target_size[1]) / tf.constant(2.0))) translation = tf.stack([tf.constant(1), tf.constant(0), pad_x, tf.constant(0), tf.constant(1), pad_y, tf.constant(0), tf.constant(0), tf.constant(1)]) translation = tf.to_float(tf.reshape(translation, [3, 3])) pad_y = tf.to_int32((((source_warped_size[0] * warped_s) - target_size[0]) / tf.constant(2.0))) pad_x = tf.to_int32((((source_warped_size[1] * warped_s) - target_size[1]) / tf.constant(2.0))) warped_translation = tf.stack([tf.constant(1), tf.constant(0), (- pad_x), tf.constant(0), tf.constant(1), (- pad_y), tf.constant(0), tf.constant(0), tf.constant(1)]) warped_translation = tf.to_float(tf.reshape(warped_translation, [3, 3])) H = ((((warped_translation @ down_scale) @ H) @ up_scale) @ translation) return H def _get_shape(image): return tf.shape(image)[:2] images = tf.data.Dataset.from_tensor_slices(files['image_paths']) images = images.map((lambda path: tf.py_func(_read_image, [path], tf.uint8))) homographies = tf.data.Dataset.from_tensor_slices(np.array(files['homography'])) warped_images = tf.data.Dataset.from_tensor_slices(files['warped_image_paths']) warped_images = warped_images.map((lambda path: tf.py_func(_read_image, [path], tf.uint8))) if config['preprocessing']['resize']: shapes = images.map(_get_shape) warped_shapes = warped_images.map(_get_shape) homographies = tf.data.Dataset.zip({'homography': homographies, 'shape': shapes, 'warped_shape': warped_shapes}) homographies = homographies.map(_adapt_homography_to_preprocessing) images = images.map(_preprocess) warped_images = warped_images.map(_preprocess) images = images.map((lambda img: (tf.to_float(img) / 255.0))) warped_images = warped_images.map((lambda img: (tf.to_float(img) / 255.0))) data = tf.data.Dataset.zip({'image': images, 'warped_image': warped_images, 'homography': homographies}) return data
class SyntheticShapes(BaseDataset): default_config = {'primitives': 'all', 'truncate': {}, 'validation_size': (- 1), 'test_size': (- 1), 'on-the-fly': False, 'cache_in_memory': False, 'suffix': None, 'add_augmentation_to_test_set': False, 'num_parallel_calls': 10, 'generation': {'split_sizes': {'training': 10000, 'validation': 200, 'test': 500}, 'image_size': [960, 1280], 'random_seed': 0, 'params': {'generate_background': {'min_kernel_size': 150, 'max_kernel_size': 500, 'min_rad_ratio': 0.02, 'max_rad_ratio': 0.031}, 'draw_stripes': {'transform_params': (0.1, 0.1)}, 'draw_multiple_polygons': {'kernel_boundaries': (50, 100)}}}, 'preprocessing': {'resize': [240, 320], 'blur_size': 11}, 'augmentation': {'photometric': {'enable': False, 'primitives': 'all', 'params': {}, 'random_order': True}, 'homographic': {'enable': False, 'params': {}, 'valid_border_margin': 0}}} drawing_primitives = ['draw_lines', 'draw_polygon', 'draw_multiple_polygons', 'draw_ellipses', 'draw_star', 'draw_checkerboard', 'draw_stripes', 'draw_cube', 'gaussian_noise'] def dump_primitive_data(self, primitive, tar_path, config): temp_dir = Path(os.environ['TMPDIR'], primitive) tf.logging.info('Generating tarfile for primitive {}.'.format(primitive)) synthetic_dataset.set_random_state(np.random.RandomState(config['generation']['random_seed'])) for (split, size) in self.config['generation']['split_sizes'].items(): (im_dir, pts_dir) = [Path(temp_dir, i, split) for i in ['images', 'points']] im_dir.mkdir(parents=True, exist_ok=True) pts_dir.mkdir(parents=True, exist_ok=True) for i in tqdm(range(size), desc=split, leave=False): image = synthetic_dataset.generate_background(config['generation']['image_size'], **config['generation']['params']['generate_background']) points = np.array(getattr(synthetic_dataset, primitive)(image, **config['generation']['params'].get(primitive, {}))) points = np.flip(points, 1) b = config['preprocessing']['blur_size'] image = cv2.GaussianBlur(image, (b, b), 0) points = ((points * np.array(config['preprocessing']['resize'], np.float)) / np.array(config['generation']['image_size'], np.float)) image = cv2.resize(image, tuple(config['preprocessing']['resize'][::(- 1)]), interpolation=cv2.INTER_LINEAR) cv2.imwrite(str(Path(im_dir, '{}.png'.format(i))), image) np.save(Path(pts_dir, '{}.npy'.format(i)), points) tar = tarfile.open(tar_path, mode='w:gz') tar.add(temp_dir, arcname=primitive) tar.close() shutil.rmtree(temp_dir) tf.logging.info('Tarfile dumped to {}.'.format(tar_path)) def _init_dataset(self, **config): primitives = parse_primitives(config['primitives'], self.drawing_primitives) tf.data.Dataset.map_parallel = (lambda self, fn: self.map(fn, num_parallel_calls=config['num_parallel_calls'])) if config['on-the-fly']: return None basepath = Path(DATA_PATH, ('synthetic_shapes' + ('_{}'.format(config['suffix']) if (config['suffix'] is not None) else ''))) basepath.mkdir(parents=True, exist_ok=True) splits = {s: {'images': [], 'points': []} for s in ['training', 'validation', 'test']} for primitive in primitives: tar_path = Path(basepath, '{}.tar.gz'.format(primitive)) if (not tar_path.exists()): self.dump_primitive_data(primitive, tar_path, config) tf.logging.info('Extracting archive for primitive {}.'.format(primitive)) tar = tarfile.open(tar_path) temp_dir = Path(os.environ['TMPDIR']) tar.extractall(path=temp_dir) tar.close() truncate = config['truncate'].get(primitive, 1) path = Path(temp_dir, primitive) for s in splits: e = [str(p) for p in Path(path, 'images', s).iterdir()] f = [p.replace('images', 'points') for p in e] f = [p.replace('.png', '.npy') for p in f] splits[s]['images'].extend(e[:int((truncate * len(e)))]) splits[s]['points'].extend(f[:int((truncate * len(f)))]) for s in splits: perm = np.random.RandomState(0).permutation(len(splits[s]['images'])) for obj in ['images', 'points']: splits[s][obj] = np.array(splits[s][obj])[perm].tolist() return splits def _get_data(self, filenames, split_name, **config): def _gen_shape(): primitives = parse_primitives(config['primitives'], self.drawing_primitives) while True: primitive = np.random.choice(primitives) image = synthetic_dataset.generate_background(config['generation']['image_size'], **config['generation']['params']['generate_background']) points = np.array(getattr(synthetic_dataset, primitive)(image, **config['generation']['params'].get(primitive, {}))) (yield (np.expand_dims(image, axis=(- 1)).astype(np.float32), np.flip(points.astype(np.float32), 1))) def _read_image(filename): image = tf.read_file(filename) image = tf.image.decode_png(image, channels=1) return tf.cast(image, tf.float32) def _read_points(filename): return np.load(filename.decode('utf-8')).astype(np.float32) if config['on-the-fly']: data = tf.data.Dataset.from_generator(_gen_shape, (tf.float32, tf.float32), (tf.TensorShape((config['generation']['image_size'] + [1])), tf.TensorShape([None, 2]))) data = data.map((lambda i, c: pipeline.downsample(i, c, **config['preprocessing']))) else: data = tf.data.Dataset.from_tensor_slices((filenames[split_name]['images'], filenames[split_name]['points'])) data = data.map((lambda image, points: (_read_image(image), tf.py_func(_read_points, [points], tf.float32)))) data = data.map((lambda image, points: (image, tf.reshape(points, [(- 1), 2])))) if (split_name == 'validation'): data = data.take(config['validation_size']) elif (split_name == 'test'): data = data.take(config['test_size']) data = data.map((lambda image, kp: {'image': image, 'keypoints': kp})) data = data.map(pipeline.add_dummy_valid_mask) if (config['cache_in_memory'] and (not config['on-the-fly'])): tf.logging.info('Caching data, fist access will take some time.') data = data.cache() if ((split_name == 'training') or config['add_augmentation_to_test_set']): if config['augmentation']['photometric']['enable']: data = data.map_parallel((lambda d: pipeline.photometric_augmentation(d, **config['augmentation']['photometric']))) if config['augmentation']['homographic']['enable']: data = data.map_parallel((lambda d: pipeline.homographic_augmentation(d, **config['augmentation']['homographic']))) data = data.map_parallel(pipeline.add_keypoint_map) data = data.map_parallel((lambda d: {**d, 'image': (tf.to_float(d['image']) / 255.0)})) return data
def get_evaluation(name): mod = __import__('evaluations.{}'.format(name), fromlist=['']) return getattr(mod, _module_to_class(name))
def _module_to_class(name): return ''.join((n.capitalize() for n in name.split('_')))
def train(config, n_iter, output_dir, pretrained_dir=None, checkpoint_name='model.ckpt'): checkpoint_path = os.path.join(output_dir, checkpoint_name) with _init_graph(config) as net: if (pretrained_dir is not None): net.load(pretrained_dir) try: net.train(n_iter, output_dir=output_dir, validation_interval=config.get('validation_interval', 100), save_interval=config.get('save_interval', None), checkpoint_path=checkpoint_path, keep_checkpoints=config.get('keep_checkpoints', 1)) except KeyboardInterrupt: logging.info('Got Keyboard Interrupt, saving model and closing.') net.save(os.path.join(output_dir, checkpoint_name))
def evaluate(config, output_dir, n_iter=None): with _init_graph(config) as net: net.load(output_dir) results = net.evaluate(config.get('eval_set', 'test'), max_iterations=n_iter) return results
def predict(config, output_dir, n_iter): pred = [] data = [] with _init_graph(config, with_dataset=True) as (net, dataset): if net.trainable: net.load(output_dir) test_set = dataset.get_test_set() for _ in range(n_iter): data.append(next(test_set)) pred.append(net.predict(data[(- 1)], keys='*')) return (pred, data)
def set_seed(seed): tf.set_random_seed(seed) np.random.seed(seed)
def get_num_gpus(): return len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
@contextmanager def _init_graph(config, with_dataset=False): set_seed(config.get('seed', int.from_bytes(os.urandom(4), byteorder='big'))) n_gpus = get_num_gpus() logging.info('Number of GPUs detected: {}'.format(n_gpus)) dataset = get_dataset(config['data']['name'])(**config['data']) model = get_model(config['model']['name'])(data=({} if with_dataset else dataset.get_tf_datasets()), n_gpus=n_gpus, **config['model']) model.__enter__() if with_dataset: (yield (model, dataset)) else: (yield model) model.__exit__() tf.reset_default_graph()
def _cli_train(config, output_dir, args): assert ('train_iter' in config) with open(os.path.join(output_dir, 'config.yml'), 'w') as f: yaml.dump(config, f, default_flow_style=False) if (args.pretrained_model is not None): pretrained_dir = os.path.join(EXPER_PATH, args.pretrained_model) if (not os.path.exists(pretrained_dir)): raise ValueError(('Missing pretrained model: ' + pretrained_dir)) else: pretrained_dir = None train(config, config['train_iter'], output_dir, pretrained_dir) if args.eval: _cli_eval(config, output_dir, args)
def _cli_eval(config, output_dir, args): with open(os.path.join(output_dir, 'config.yml'), 'r') as f: model_config = yaml.load(f)['model'] model_config.update(config.get('model', {})) config['model'] = model_config results = evaluate(config, output_dir, n_iter=config.get('eval_iter')) logging.info('Evaluation results: \n{}'.format(pprint(results, indent=2, default=str))) with open(os.path.join(output_dir, 'eval.txt'), 'a') as f: f.write('Evaluation for {} dataset:\n'.format(config['data']['name'])) for (r, v) in results.items(): f.write('\t{}:\n\t\t{}\n'.format(r, v)) f.write('\n')
def _cli_pred(config, args): raise NotImplementedError
def get_model(name): mod = __import__('superpoint.models.{}'.format(name), fromlist=['']) return getattr(mod, _module_to_class(name))
def _module_to_class(name): return ''.join((n.capitalize() for n in name.split('_')))
def vgg_block(inputs, filters, kernel_size, name, data_format, training=False, batch_normalization=True, kernel_reg=0.0, **params): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x = tfl.conv2d(inputs, filters, kernel_size, name='conv', kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_reg), data_format=data_format, **params) if batch_normalization: x = tfl.batch_normalization(x, training=training, name='bn', fused=True, axis=(1 if (data_format == 'channels_first') else (- 1))) return x
def vgg_backbone(inputs, **config): params_conv = {'padding': 'SAME', 'data_format': config['data_format'], 'activation': tf.nn.relu, 'batch_normalization': True, 'training': config['training'], 'kernel_reg': config.get('kernel_reg', 0.0)} params_pool = {'padding': 'SAME', 'data_format': config['data_format']} with tf.variable_scope('vgg', reuse=tf.AUTO_REUSE): x = vgg_block(inputs, 64, 3, 'conv1_1', **params_conv) x = vgg_block(x, 64, 3, 'conv1_2', **params_conv) x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params_pool) x = vgg_block(x, 64, 3, 'conv2_1', **params_conv) x = vgg_block(x, 64, 3, 'conv2_2', **params_conv) x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params_pool) x = vgg_block(x, 128, 3, 'conv3_1', **params_conv) x = vgg_block(x, 128, 3, 'conv3_2', **params_conv) x = tfl.max_pooling2d(x, 2, 2, name='pool3', **params_pool) x = vgg_block(x, 128, 3, 'conv4_1', **params_conv) x = vgg_block(x, 128, 3, 'conv4_2', **params_conv) return x
class Mode(): TRAIN = 'train' EVAL = 'eval' PRED = 'pred'
class BaseModel(metaclass=ABCMeta): 'Base model class.\n\n Arguments:\n data: A dictionary of `tf.data.Dataset` objects, can include the keys\n `"training"`, `"validation"`, and `"test"`.\n n_gpus: An integer, the number of GPUs available.\n data_shape: A dictionary, where the keys are the input features of the prediction\n network and the values are the associated shapes. Only required if `data` is\n empty or `None`.\n config: A dictionary containing the configuration parameters.\n Entries `"batch_size"` and `"learning_rate"` are required.\n\n Models should inherit from this class and implement the following methods:\n `_model`, `_loss`, and `_metrics`.\n Additionally, the following static attributes should be defined:\n input_spec: A dictionary, where the keys are the input features (e.g. `"image"`)\n and the associated values are dictionaries containing `"shape"` (list of\n dimensions, e.g. `[N, H, W, C]` where `None` indicates an unconstrained\n dimension) and `"type"` (e.g. `tf.float32`).\n required_config_keys: A list containing the required configuration entries.\n default_config: A dictionary of potential default configuration values.\n ' dataset_names = set(['training', 'validation', 'test']) required_baseconfig = ['batch_size', 'learning_rate'] _default_config = {'eval_batch_size': 1, 'pred_batch_size': 1} @abstractmethod def _model(self, inputs, mode, **config): 'Implements the graph of the model.\n\n This method is called three times: for training, evaluation and prediction (see\n the `mode` argument) and can return different tensors depending on the mode.\n It is a good practice to support both NCHW (channels first) and NHWC (channels\n last) data formats using a dedicated configuration entry.\n\n Arguments:\n inputs: A dictionary of input features, where the keys are their names\n (e.g. `"image"`) and the values of type `tf.Tensor`. Same keys as in the\n datasets given during the object instantiation.\n mode: An attribute of the `Mode` class, either `Mode.TRAIN`, `Mode.EVAL` or\n `Mode.PRED`.\n config: A configuration dictionary, given during the object instantiantion.\n\n Returns:\n A dictionary of outputs, where the keys are their names (e.g. `"logits"`) and\n the values are the corresponding `tf.Tensor`.\n ' raise NotImplementedError @abstractmethod def _loss(self, outputs, inputs, **config): 'Implements the sub-graph computing the training loss.\n\n This method is called on the outputs of the `_model` method in training mode.\n\n Arguments:\n outputs: A dictionary, as retuned by `_model` called with `mode=Mode.TRAIN`.\n inputs: A dictionary of input features (see same as for `_model`).\n config: A configuration dictionary.\n\n Returns:\n A tensor corresponding to the loss to be minimized during training.\n ' raise NotImplementedError @abstractmethod def _metrics(self, outputs, inputs, **config): 'Implements the sub-graph computing the evaluation metrics.\n\n This method is called on the outputs of the `_model` method in evaluation mode.\n\n Arguments:\n outputs: A dictionary, as retuned by `_model` called with `mode=Mode.EVAL`.\n inputs: A dictionary of input features (see same as for `_model`).\n config: A configuration dictionary.\n\n Returns:\n A dictionary of metrics, where the keys are their names (e.g. "`accuracy`")\n and the values are the corresponding `tf.Tensor`.\n ' raise NotImplementedError def __init__(self, data={}, n_gpus=1, data_shape=None, **config): self.datasets = data self.data_shape = data_shape self.n_gpus = n_gpus self.graph = tf.get_default_graph() self.name = self.__class__.__name__.lower() self.trainable = getattr(self, 'trainable', True) self.config = dict_update(self._default_config, getattr(self, 'default_config', {})) self.config = dict_update(self.config, config) required = (self.required_baseconfig + getattr(self, 'required_config_keys', [])) for r in required: assert (r in self.config), "Required configuration entry: '{}'".format(r) assert (set(self.datasets) <= self.dataset_names), 'Unknown dataset name: {}'.format((set(self.datasets) - self.dataset_names)) assert (n_gpus >= 0) with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE): self._build_graph() def _unstack_nested_dict(self, d, num): return {k: (self._unstack_nested_dict(v, num) if isinstance(v, dict) else tf.unstack(v, num=num, axis=0)) for (k, v) in d.items()} def _shard_nested_dict(self, d, num): shards = [{} for _ in range(num)] for (k, v) in d.items(): if isinstance(v, dict): stack = self._shard_nested_dict(v, num) else: stack = [tf.stack(v[i::num]) for i in range(num)] shards = [{**s, k: stack[i]} for (i, s) in enumerate(shards)] return shards def _gpu_tower(self, data, mode, batch_size): n_shards = max(1, self.n_gpus) device = ('cpu' if (self.n_gpus == 0) else 'gpu') with tf.device('/cpu:0'): with tf.name_scope('{}_data_sharding'.format(mode)): shards = self._unstack_nested_dict(data, (batch_size * n_shards)) shards = self._shard_nested_dict(shards, n_shards) tower_losses = [] tower_gradvars = [] tower_preds = [] tower_metrics = [] for i in range(n_shards): worker = '/{}:{}'.format(device, i) device_setter = tf.train.replica_device_setter(worker_device=worker, ps_device='/cpu:0', ps_tasks=1) with tf.name_scope('{}_tower{}'.format(mode, i)) as scope: with tf.device(device_setter): net_outputs = self._model(shards[i], mode, **self.config) if (mode == Mode.TRAIN): loss = self._loss(net_outputs, shards[i], **self.config) loss += tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope)) model_params = tf.trainable_variables() grad = tf.gradients(loss, model_params) tower_losses.append(loss) tower_gradvars.append(zip(grad, model_params)) if (i == 0): update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) elif (mode == Mode.EVAL): tower_metrics.append(self._metrics(net_outputs, shards[i], **self.config)) else: tower_preds.append(net_outputs) if (mode == Mode.TRAIN): return (tower_losses, tower_gradvars, update_ops) elif (mode == Mode.EVAL): return tower_metrics else: return {k: tf.stack([v for z in zip(*[tf.unstack(p[k], num=batch_size) for p in tower_preds]) for v in z]) for k in tower_preds[0]} def _train_graph(self, data): (tower_losses, tower_gradvars, update_ops) = self._gpu_tower(data, Mode.TRAIN, self.config['batch_size']) gradvars = [] with tf.device('/cpu:0'): with tf.name_scope('tower_averaging'): all_grads = {} for (grad, var) in itertools.chain(*tower_gradvars): if (grad is not None): all_grads.setdefault(var, []).append(grad) for (var, grads) in all_grads.items(): if (len(grads) == 1): avg_grad = grads[0] else: avg_grad = tf.multiply(tf.add_n(grads), (1.0 / len(grads))) gradvars.append((avg_grad, var)) self.loss = tf.reduce_mean(tower_losses) tf.summary.scalar('loss', self.loss) self.global_step = tf.Variable(0, trainable=False, name='global_step') opt = tf.train.AdamOptimizer(self.config['learning_rate']) with tf.control_dependencies(update_ops): self.trainer = opt.apply_gradients(gradvars, global_step=self.global_step) def _eval_graph(self, data): tower_metrics = self._gpu_tower(data, Mode.EVAL, self.config['eval_batch_size']) with tf.device('/cpu:0'): self.metrics = {m: tf.reduce_mean(tf.stack([t[m] for t in tower_metrics])) for m in tower_metrics[0]} def _pred_graph(self, data): pred_out = self._gpu_tower(data, Mode.PRED, self.config['pred_batch_size']) self.pred_out = {n: tf.identity(p, name=n) for (n, p) in pred_out.items()} def _build_graph(self): if self.datasets: self.dataset_iterators = {} with tf.device('/cpu:0'): for (n, d) in self.datasets.items(): output_shapes = d.output_shapes if (n == 'training'): train_batch = (self.config['batch_size'] * self.n_gpus) d = d.repeat().padded_batch(train_batch, output_shapes).prefetch(train_batch) self.dataset_iterators[n] = d.make_one_shot_iterator() else: d = d.padded_batch((self.config['eval_batch_size'] * self.n_gpus), output_shapes) self.dataset_iterators[n] = d.make_initializable_iterator() output_types = d.output_types output_shapes = d.output_shapes self.datasets[n] = d for (i, spec) in self.input_spec.items(): assert (i in output_shapes) tf.TensorShape(output_shapes[i]).assert_is_compatible_with(tf.TensorShape(spec['shape'])) if (self.data_shape is None): self.data_shape = output_shapes self.handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle(self.handle, output_types, output_shapes) data = iterator.get_next() if self.trainable: self._train_graph(data) self._eval_graph(data) self.summaries = tf.summary.merge_all() if (self.data_shape is None): self.data_shape = {i: spec['shape'] for (i, spec) in self.input_spec.items()} self.pred_in = {i: tf.placeholder(spec['type'], shape=self.data_shape[i], name=i) for (i, spec) in self.input_spec.items()} self._pred_graph(self.pred_in) sess_config = tf.ConfigProto(device_count={'GPU': self.n_gpus}, allow_soft_placement=True) sess_config.gpu_options.allow_growth = True self.sess = tf.Session(config=sess_config) if self.datasets: self.dataset_handles = {} for (n, i) in self.dataset_iterators.items(): self.dataset_handles[n] = self.sess.run(i.string_handle()) self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()]) def train(self, iterations, validation_interval=100, output_dir=None, profile=False, save_interval=None, checkpoint_path=None, keep_checkpoints=1): assert self.trainable, 'Model is not trainable.' assert ('training' in self.datasets), 'Training dataset is required.' if (output_dir is not None): train_writer = tf.summary.FileWriter(output_dir) if (not hasattr(self, 'saver')): with tf.device('/cpu:0'): self.saver = tf.train.Saver(save_relative_paths=True, max_to_keep=keep_checkpoints) if (not self.graph.finalized): self.graph.finalize() if profile: options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: (options, run_metadata) = (None, None) tf.logging.info('Start training') for i in range(iterations): (loss, summaries, _) = self.sess.run([self.loss, self.summaries, self.trainer], feed_dict={self.handle: self.dataset_handles['training']}, options=options, run_metadata=run_metadata) if (save_interval and checkpoint_path and (((i + 1) % save_interval) == 0)): self.save(checkpoint_path) if (('validation' in self.datasets) and ((i % validation_interval) == 0)): metrics = self.evaluate('validation', mute=True) tf.logging.info(('Iter {:4d}: loss {:.4f}'.format(i, loss) + ''.join([', {} {:.4f}'.format(m, metrics[m]) for m in metrics]))) if (output_dir is not None): train_writer.add_summary(summaries, i) metrics_summaries = tf.Summary(value=[tf.Summary.Value(tag=m, simple_value=v) for (m, v) in metrics.items()]) train_writer.add_summary(metrics_summaries, i) if (profile and (i != 0)): fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open(osp.join(output_dir, 'profile_{}.json'.format(i)), 'w') as f: f.write(chrome_trace) tf.logging.info('Training finished') def predict(self, data, keys='pred', batch=False): assert (set(data.keys()) >= set(self.input_spec.keys())) if isinstance(keys, str): if (keys == '*'): op = self.pred_out else: op = self.pred_out[keys] else: op = {k: self.pred_out[k] for k in keys} if (not batch): data = {d: [v] for (d, v) in data.items()} feed = {self.pred_in[i]: data[i] for i in self.input_spec} pred = self.sess.run(op, feed_dict=feed) if (not batch): if isinstance(pred, dict): pred = {p: v[0] for (p, v) in pred.items()} else: pred = pred[0] return pred def evaluate(self, dataset, max_iterations=None, mute=False): assert (dataset in self.datasets) self.sess.run(self.dataset_iterators[dataset].initializer) if (not mute): tf.logging.info("Starting evaluation of dataset '{}'".format(dataset)) if max_iterations: pbar = tqdm(total=max_iterations, ascii=True) i = 0 metrics = [] while True: try: metrics.append(self.sess.run(self.metrics, feed_dict={self.handle: self.dataset_handles[dataset]})) except tf.errors.OutOfRangeError: break if max_iterations: i += 1 if (not mute): pbar.update(1) if (i == max_iterations): break if (not mute): tf.logging.info('Finished evaluation') if max_iterations: pbar.close() metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def load(self, checkpoint_path): with tf.device('/cpu:0'): saver = tf.train.Saver(save_relative_paths=True) if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) if (checkpoint_path is None): raise ValueError('Checkpoint directory is empty.') saver.restore(self.sess, checkpoint_path) def save(self, checkpoint_path): step = self.sess.run(self.global_step) tf.logging.info('Saving checkpoint for iteration #{}'.format(step)) self.saver.save(self.sess, checkpoint_path, write_meta_graph=False, global_step=step) def close(self): self.sess.close() def __enter__(self): return self def __exit__(self, *args): self.close()
class MagicPoint(BaseModel): input_spec = {'image': {'shape': [None, None, None, 1], 'type': tf.float32}} required_config_keys = [] default_config = {'data_format': 'channels_first', 'kernel_reg': 0.0, 'grid_size': 8, 'detection_threshold': 0.4, 'homography_adaptation': {'num': 0}, 'nms': 0, 'top_k': 0} def _model(self, inputs, mode, **config): config['training'] = (mode == Mode.TRAIN) image = inputs['image'] def net(image): if (config['data_format'] == 'channels_first'): image = tf.transpose(image, [0, 3, 1, 2]) features = vgg_backbone(image, **config) outputs = detector_head(features, **config) return outputs if ((mode == Mode.PRED) and config['homography_adaptation']['num']): outputs = homography_adaptation(image, net, config['homography_adaptation']) else: outputs = net(image) prob = outputs['prob'] if config['nms']: prob = tf.map_fn((lambda p: box_nms(p, config['nms'], min_prob=config['detection_threshold'], keep_top_k=config['top_k'])), prob) outputs['prob_nms'] = prob pred = tf.to_int32(tf.greater_equal(prob, config['detection_threshold'])) outputs['pred'] = pred return outputs def _loss(self, outputs, inputs, **config): if (config['data_format'] == 'channels_first'): outputs['logits'] = tf.transpose(outputs['logits'], [0, 2, 3, 1]) return detector_loss(inputs['keypoint_map'], outputs['logits'], valid_mask=inputs['valid_mask'], **config) def _metrics(self, outputs, inputs, **config): pred = (inputs['valid_mask'] * outputs['pred']) labels = inputs['keypoint_map'] precision = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(pred)) recall = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(labels)) return {'precision': precision, 'recall': recall}
class SimpleClassifier(BaseModel): input_spec = {'image': {'shape': [None, None, None, 1], 'type': tf.float32}} required_config_keys = [] default_config = {'data_format': 'channels_first'} def _model(self, inputs, mode, **config): x = inputs['image'] if (config['data_format'] == 'channels_first'): x = tf.transpose(x, [0, 3, 1, 2]) params = {'padding': 'SAME', 'data_format': config['data_format']} x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params) x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params) x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params) x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params) x = tfl.flatten(x) x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1') x = tfl.dense(x, 10, name='fc2') if (mode == Mode.TRAIN): return {'logits': x} else: return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=(- 1))} def _loss(self, outputs, inputs, **config): with tf.name_scope('loss'): loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=inputs['label'], logits=outputs['logits'])) return loss def _metrics(self, outputs, inputs, **config): metrics = {} with tf.name_scope('metrics'): correct_count = tf.equal(outputs['pred'], inputs['label']) correct_count = tf.cast(correct_count, tf.float32) metrics['accuracy'] = tf.reduce_mean(correct_count) return metrics
class SuperPoint(BaseModel): input_spec = {'image': {'shape': [None, None, None, 1], 'type': tf.float32}} required_config_keys = [] default_config = {'data_format': 'channels_first', 'grid_size': 8, 'detection_threshold': 0.4, 'descriptor_size': 256, 'batch_size': 32, 'learning_rate': 0.001, 'lambda_d': 250, 'descriptor_size': 256, 'positive_margin': 1, 'negative_margin': 0.2, 'lambda_loss': 0.0001, 'nms': 0, 'top_k': 0} def _model(self, inputs, mode, **config): config['training'] = (mode == Mode.TRAIN) def net(image): if (config['data_format'] == 'channels_first'): image = tf.transpose(image, [0, 3, 1, 2]) features = vgg_backbone(image, **config) detections = utils.detector_head(features, **config) descriptors = utils.descriptor_head(features, **config) return {**detections, **descriptors} results = net(inputs['image']) if config['training']: warped_results = net(inputs['warped']['image']) results = {**results, 'warped_results': warped_results, 'homography': inputs['warped']['homography']} prob = results['prob'] if config['nms']: prob = tf.map_fn((lambda p: utils.box_nms(p, config['nms'], keep_top_k=config['top_k'])), prob) results['prob_nms'] = prob results['pred'] = tf.to_int32(tf.greater_equal(prob, config['detection_threshold'])) return results def _loss(self, outputs, inputs, **config): logits = outputs['logits'] warped_logits = outputs['warped_results']['logits'] descriptors = outputs['descriptors_raw'] warped_descriptors = outputs['warped_results']['descriptors_raw'] if (config['data_format'] == 'channels_first'): logits = tf.transpose(logits, [0, 2, 3, 1]) warped_logits = tf.transpose(warped_logits, [0, 2, 3, 1]) descriptors = tf.transpose(descriptors, [0, 2, 3, 1]) warped_descriptors = tf.transpose(warped_descriptors, [0, 2, 3, 1]) detector_loss = utils.detector_loss(inputs['keypoint_map'], logits, valid_mask=inputs['valid_mask'], **config) warped_detector_loss = utils.detector_loss(inputs['warped']['keypoint_map'], warped_logits, valid_mask=inputs['warped']['valid_mask'], **config) descriptor_loss = utils.descriptor_loss(descriptors, warped_descriptors, outputs['homography'], valid_mask=inputs['warped']['valid_mask'], **config) tf.summary.scalar('detector_loss1', detector_loss) tf.summary.scalar('detector_loss2', warped_detector_loss) tf.summary.scalar('detector_loss_full', (detector_loss + warped_detector_loss)) tf.summary.scalar('descriptor_loss', (config['lambda_loss'] * descriptor_loss)) loss = ((detector_loss + warped_detector_loss) + (config['lambda_loss'] * descriptor_loss)) return loss def _metrics(self, outputs, inputs, **config): pred = (inputs['valid_mask'] * outputs['pred']) labels = inputs['keypoint_map'] precision = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(pred)) recall = (tf.reduce_sum((pred * labels)) / tf.reduce_sum(labels)) return {'precision': precision, 'recall': recall}
class Bitset(Sequence): '\n A very simple bitset implementation for Python.\n\n Author: Geremy Condra\n Licensed under GPLv3\n Released 3 May 2009\n\n Usage:\n >>> b = Bitset(5)\n >>> b\n Bitset(101)\n >>> b[:]\n [True, False, True]\n >>> b[0] = False\n >>> b\n Bitset(001)\n >>> b << 1\n Bitset(010)\n >>> b >> 1\n Bitset(000)\n >>> b & 1\n Bitset(001)\n >>> b | 2\n Bitset(011)\n >>> b ^ 6\n Bitset(111)\n >>> ~b\n Bitset(110)\n ' value = 0 length = 0 @classmethod def from_sequence(cls, seq): '\n Iterates over the sequence to produce a new Bitset.\n As in integers, the 0 position represents the LSB.\n ' n = 0 for (index, value) in enumerate(reversed(seq)): n += ((2 ** index) * bool(int(value))) b = Bitset(n) return b def __init__(self, value=0, length=0): 'Creates a Bitset with the given integer value.' self.value = value try: self.length = (length or (math.floor(math.log(value, 2)) + 1)) except Exception: self.length = 0 def __and__(self, other): b = Bitset((self.value & int(other))) b.length = max((self.length, b.length)) return b def __or__(self, other): b = Bitset((self.value | int(other))) b.length = max((self.length, b.length)) return b def __invert__(self): b = Bitset((~ self.value)) b.length = max((self.length, b.length)) return b def __xor__(self, value): b = Bitset((self.value ^ int(value))) b.length = max((self.length, b.length)) return b def __lshift__(self, value): b = Bitset((self.value << int(value))) b.length = max((self.length, b.length)) return b def __rshift__(self, value): b = Bitset((self.value >> int(value))) b.length = max((self.length, b.length)) return b def __eq__(self, other): try: return (self.value == other.value) except Exception: return (self.value == other) def __int__(self): return self.value def __str__(self): s = '' for i in self[:]: s += ('1' if i else '0') return s def __repr__(self): return ('Bitset(%s)' % str(self)) def __getitem__(self, s): '\n Gets the specified position.\n Like normal integers, 0 represents the MSB.\n ' try: (start, stop, step) = s.indices(len(self)) results = [] for position in range(start, stop, step): pos = ((len(self) - position) - 1) results.append(bool((self.value & (1 << pos)))) return results except BaseException: pos = ((len(self) - s) - 1) return bool((self.value & (1 << pos))) def __setitem__(self, s, value): '\n Sets the specified position/s to value.\n Like normal integers, 0 represents the MSB.\n ' try: (start, stop, step) = s.indices(len(self)) for position in range(start, stop, step): pos = ((len(self) - position) - 1) if value: self.value |= (1 << pos) else: self.value &= (~ (1 << pos)) maximum_position = max(((start + 1), stop, len(self))) self.length = maximum_position except BaseException: pos = ((len(self) - s) - 1) if value: self.value |= (1 << pos) else: self.value &= (~ (1 << pos)) if (len(self) < pos): self.length = pos return self def __iter__(self): 'Iterates over the values in the bitset.' for i in self[:]: (yield i) def __len__(self): 'Returns the length of the bitset.' return self.length
def flush(): 'Try to flush all stdio buffers, both from python and from C.' try: sys.stdout.flush() sys.stderr.flush() except (AttributeError, ValueError, IOError): pass
@contextmanager def capture_outputs(filename): 'Duplicate stdout and stderr to a file on the file descriptor level.' with open(filename, 'a+') as target: original_stdout_fd = 1 original_stderr_fd = 2 target_fd = target.fileno() saved_stdout_fd = os.dup(original_stdout_fd) saved_stderr_fd = os.dup(original_stderr_fd) tee_stdout = subprocess.Popen(['tee', '-a', '/dev/stderr'], start_new_session=True, stdin=subprocess.PIPE, stderr=target_fd, stdout=1) tee_stderr = subprocess.Popen(['tee', '-a', '/dev/stderr'], start_new_session=True, stdin=subprocess.PIPE, stderr=target_fd, stdout=2) flush() os.dup2(tee_stdout.stdin.fileno(), original_stdout_fd) os.dup2(tee_stderr.stdin.fileno(), original_stderr_fd) try: (yield) finally: flush() tee_stdout.stdin.close() tee_stderr.stdin.close() os.dup2(saved_stdout_fd, original_stdout_fd) os.dup2(saved_stderr_fd, original_stderr_fd) def kill_tees(): tee_stdout.kill() tee_stderr.kill() tee_timer = Timer(1, kill_tees) try: tee_timer.start() tee_stdout.wait() tee_stderr.wait() finally: tee_timer.cancel() os.close(saved_stdout_fd) os.close(saved_stderr_fd)
def dict_update(d, u): 'Improved update for nested dictionaries.\n\n Arguments:\n d: The dictionary to be updated.\n u: The update dictionary.\n\n Returns:\n The updated dictionary.\n ' for (k, v) in u.items(): if isinstance(v, collections.abc.Mapping): d[k] = dict_update(d.get(k, {}), v) else: d[k] = v return d
def main(): config = parser.parse_args() fine_LSTM = MyModel.fine_LSTM(config).cuda(config.use_gpu) coarseNet = MyModel.coarseNet(config).cuda(config.use_gpu) if (config.stage == 'test'): fine_LSTM = torch.load(((('output/' + '730') + config.testName) + 'fine_LSTM.pkl'), map_location=(lambda storage, loc: storage.cuda(config.use_gpu))) coarseNet = torch.load(((('output/' + '730') + config.testName) + 'coarse.pkl'), map_location=(lambda storage, loc: storage.cuda(config.use_gpu))) dataRoot = 'processed_data/' transform_origin = transforms.Compose([Rescale(config.origin_image_size), ToTensor()]) train_dataset_origin = LandmarksDataset(csv_file=(dataRoot + config.traincsv), root_dir=(dataRoot + 'images'), transform=transform_origin, landmarksNum=config.landmarkNum) val_dataset = LandmarksDataset(csv_file=(dataRoot + config.testcsv), root_dir=(dataRoot + 'images'), transform=transform_origin, landmarksNum=config.landmarkNum) train_dataloader = [] val_dataloader = [] train_dataloader_t = DataLoader(train_dataset_origin, batch_size=config.batchSize, shuffle=False, num_workers=0) if (config.stage == 'train'): for data in train_dataloader_t: train_dataloader.append(data) val_dataloader_t = DataLoader(val_dataset, batch_size=config.batchSize, shuffle=False, num_workers=0) for data in val_dataloader_t: val_dataloader.append(data) print(len(train_dataloader), len(val_dataloader)) dataloaders = {'train': train_dataloader, 'val': val_dataloader} criterion_coarse = LossFunction.coarse_heatmap(config) criterion_fine = LossFunction.fine_heatmap(config) params = (list(coarseNet.parameters()) + list(fine_LSTM.parameters())) optimizer_ft = optim.Adam(params) TrainNet.train_model(coarseNet, fine_LSTM, dataloaders, criterion_coarse, criterion_fine, optimizer_ft, config)
@add_arg_scope def gate_conv(x_in, cnum, ksize, stride=1, rate=1, name='conv', padding='SAME', activation='leaky_relu', use_lrn=True, training=True): assert (padding in ['SYMMETRIC', 'SAME', 'REFELECT']) if ((padding == 'SYMMETRIC') or (padding == 'REFELECT')): p = int(((rate * (ksize - 1)) / 2)) x = tf.pad(x_in, [[0, 0], [p, p], [p, p], [0, 0]], mode=padding) padding = 'VALID' x = tf.layers.conv2d(x_in, cnum, ksize, stride, dilation_rate=rate, activation=None, padding=padding, name=name) if use_lrn: x = tf.nn.lrn(x, bias=5e-05) if (activation == 'leaky_relu'): x = tf.nn.leaky_relu(x) g = tf.layers.conv2d(x_in, cnum, ksize, stride, dilation_rate=rate, activation=tf.nn.sigmoid, padding=padding, name=(name + '_g')) x = tf.multiply(x, g) return (x, g)
@add_arg_scope def gate_deconv(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name='deconv', training=True): with tf.variable_scope(name): w = tf.get_variable('w', [k_h, k_w, output_shape[(- 1)], input_.get_shape()[(- 1)]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable('biases1', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0)) deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) deconv = tf.nn.leaky_relu(deconv) g = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) b = tf.get_variable('biases2', [output_shape[(- 1)]], initializer=tf.constant_initializer(0.0)) g = tf.reshape(tf.nn.bias_add(g, b), deconv.get_shape()) g = tf.nn.sigmoid(deconv) deconv = tf.multiply(g, deconv) return (deconv, g)
class GraphicsScene(QGraphicsScene): def __init__(self, mode_list, parent=None): QGraphicsScene.__init__(self, parent) self.modes = mode_list self.mouse_clicked = False self.prev_pt = None self.mask_points = [] self.sketch_points = [] self.stroke_points = [] self.history = [] self.stk_color = None def reset(self): self.mask_points = [] self.sketch_points = [] self.stroke_points = [] self.history = [] self.stk_color = None self.prev_pt = None def mousePressEvent(self, event): self.mouse_clicked = True def mouseReleaseEvent(self, event): self.prev_pt = None self.mouse_clicked = False def mouseMoveEvent(self, event): if self.mouse_clicked: if (self.modes[0] == 1): if self.prev_pt: self.drawMask(self.prev_pt, event.scenePos()) pts = {} pts['prev'] = (int(self.prev_pt.x()), int(self.prev_pt.y())) pts['curr'] = (int(event.scenePos().x()), int(event.scenePos().y())) self.mask_points.append(pts) self.history.append(0) self.prev_pt = event.scenePos() else: self.prev_pt = event.scenePos() elif (self.modes[1] == 1): if self.prev_pt: self.drawSketch(self.prev_pt, event.scenePos()) pts = {} pts['prev'] = (int(self.prev_pt.x()), int(self.prev_pt.y())) pts['curr'] = (int(event.scenePos().x()), int(event.scenePos().y())) self.sketch_points.append(pts) self.history.append(1) self.prev_pt = event.scenePos() else: self.prev_pt = event.scenePos() elif (self.modes[2] == 1): if self.prev_pt: self.drawStroke(self.prev_pt, event.scenePos()) pts = {} pts['prev'] = (int(self.prev_pt.x()), int(self.prev_pt.y())) pts['curr'] = (int(event.scenePos().x()), int(event.scenePos().y())) pts['color'] = self.stk_color self.stroke_points.append(pts) self.history.append(2) self.prev_pt = event.scenePos() else: self.prev_pt = event.scenePos() def drawMask(self, prev_pt, curr_pt): lineItem = QGraphicsLineItem(QLineF(prev_pt, curr_pt)) lineItem.setPen(QPen(Qt.white, 12, Qt.SolidLine)) self.addItem(lineItem) def drawSketch(self, prev_pt, curr_pt): lineItem = QGraphicsLineItem(QLineF(prev_pt, curr_pt)) lineItem.setPen(QPen(Qt.black, 1, Qt.SolidLine)) self.addItem(lineItem) def drawStroke(self, prev_pt, curr_pt): lineItem = QGraphicsLineItem(QLineF(prev_pt, curr_pt)) lineItem.setPen(QPen(QColor(self.stk_color), 4, Qt.SolidLine)) self.addItem(lineItem) def get_stk_color(self, color): self.stk_color = color def erase_prev_pt(self): self.prev_pt = None def reset_items(self): for i in range(len(self.items())): item = self.items()[0] self.removeItem(item) def undo(self): if (len(self.items()) > 1): if (len(self.items()) >= 9): for i in range(8): item = self.items()[0] self.removeItem(item) if (self.history[(- 1)] == 0): self.mask_points.pop() self.history.pop() elif (self.history[(- 1)] == 1): self.sketch_points.pop() self.history.pop() elif (self.history[(- 1)] == 2): self.stroke_points.pop() self.history.pop() elif (self.history[(- 1)] == 3): self.history.pop() else: for i in range((len(self.items()) - 1)): item = self.items()[0] self.removeItem(item) if (self.history[(- 1)] == 0): self.mask_points.pop() self.history.pop() elif (self.history[(- 1)] == 1): self.sketch_points.pop() self.history.pop() elif (self.history[(- 1)] == 2): self.stroke_points.pop() self.history.pop() elif (self.history[(- 1)] == 3): self.history.pop()
class Ui_Form(object): def setupUi(self, Form): Form.setObjectName('Form') Form.resize(1200, 660) self.pushButton = QtWidgets.QPushButton(Form) self.pushButton.setGeometry(QtCore.QRect(10, 10, 97, 27)) self.pushButton.setObjectName('pushButton') self.pushButton_2 = QtWidgets.QPushButton(Form) self.pushButton_2.setGeometry(QtCore.QRect(130, 10, 97, 27)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_3 = QtWidgets.QPushButton(Form) self.pushButton_3.setGeometry(QtCore.QRect(250, 10, 97, 27)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_4 = QtWidgets.QPushButton(Form) self.pushButton_4.setGeometry(QtCore.QRect(370, 10, 97, 27)) self.pushButton_4.setObjectName('pushButton_4') self.pushButton_5 = QtWidgets.QPushButton(Form) self.pushButton_5.setGeometry(QtCore.QRect(560, 360, 81, 27)) self.pushButton_5.setObjectName('pushButton_5') self.pushButton_6 = QtWidgets.QPushButton(Form) self.pushButton_6.setGeometry(QtCore.QRect(490, 40, 97, 27)) self.pushButton_6.setObjectName('pushButton_6') self.pushButton_7 = QtWidgets.QPushButton(Form) self.pushButton_7.setGeometry(QtCore.QRect(490, 10, 97, 27)) self.pushButton_7.setObjectName('pushButton_7') self.pushButton_8 = QtWidgets.QPushButton(Form) self.pushButton_8.setGeometry(QtCore.QRect(370, 40, 97, 27)) self.pushButton_8.setObjectName('pushButton_8') self.graphicsView = QtWidgets.QGraphicsView(Form) self.graphicsView.setGeometry(QtCore.QRect(20, 120, 512, 512)) self.graphicsView.setObjectName('graphicsView') self.graphicsView_2 = QtWidgets.QGraphicsView(Form) self.graphicsView_2.setGeometry(QtCore.QRect(660, 120, 512, 512)) self.graphicsView_2.setObjectName('graphicsView_2') self.saveImg = QtWidgets.QPushButton(Form) self.saveImg.setGeometry(QtCore.QRect(610, 10, 97, 27)) self.saveImg.setObjectName('saveImg') self.arrangement = QtWidgets.QPushButton(Form) self.arrangement.setGeometry(QtCore.QRect(610, 40, 97, 27)) self.arrangement.setObjectName('arrangement') self.retranslateUi(Form) self.pushButton.clicked.connect(Form.open) self.pushButton_2.clicked.connect(Form.mask_mode) self.pushButton_3.clicked.connect(Form.sketch_mode) self.pushButton_4.clicked.connect(Form.stroke_mode) self.pushButton_5.clicked.connect(Form.complete) self.pushButton_6.clicked.connect(Form.undo) self.pushButton_7.clicked.connect(Form.color_change_mode) self.pushButton_8.clicked.connect(Form.clear) self.saveImg.clicked.connect(Form.save_img) self.arrangement.clicked.connect(Form.arrange) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate('Form', 'SC-FEGAN')) self.pushButton.setText(_translate('Form', 'Open Image')) self.pushButton_2.setText(_translate('Form', 'Mask')) self.pushButton_3.setText(_translate('Form', 'Sketches')) self.pushButton_4.setText(_translate('Form', 'Color')) self.pushButton_5.setText(_translate('Form', 'Complete')) self.pushButton_6.setText(_translate('Form', 'Undo')) self.pushButton_7.setText(_translate('Form', 'Palette')) self.pushButton_8.setText(_translate('Form', 'Clear')) self.saveImg.setText(_translate('Form', 'Save Img')) self.arrangement.setText(_translate('Form', 'Arrange'))
class Config(object): def __init__(self, filename=None): assert os.path.exists(filename), "ERROR: Config File doesn't exist." try: with open(filename, 'r') as f: self._cfg_dict = yaml.load(f) except EnvironmentError: logger.error('Please check the file with name of "%s"', filename) logger.info(' APP CONFIG '.center(80, '-')) logger.info(''.center(80, '-')) def __getattr__(self, name): value = self._cfg_dict[name] if isinstance(value, dict): value = DictAsMember(value) return value
def main(): '\n Code for launching the downstream training\n ' parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='config/semseg_nuscenes.yaml', help='specify the config for training') parser.add_argument('--resume_path', type=str, default=None, help='provide a path to resume an incomplete training') parser.add_argument('--pretraining_path', type=str, default=None, help='provide a path to pre-trained weights') args = parser.parse_args() config = generate_config(args.cfg_file) if args.resume_path: config['resume_path'] = args.resume_path if args.pretraining_path: config['pretraining_path'] = args.pretraining_path if (os.environ.get('LOCAL_RANK', 0) == 0): print(('\n' + '\n'.join(list(map((lambda x: f'{x[0]:20}: {x[1]}'), config.items()))))) dm = DownstreamDataModule(config) model = make_model(config, config['pretraining_path']) if (config['num_gpus'] > 1): model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model) module = LightningDownstream(model, config) path = os.path.join(config['working_dir'], config['datetime']) trainer = pl.Trainer(gpus=config['num_gpus'], accelerator='ddp', default_root_dir=path, checkpoint_callback=True, max_epochs=config['num_epochs'], plugins=DDPPlugin(find_unused_parameters=True), num_sanity_val_steps=0, resume_from_checkpoint=config['resume_path'], check_val_every_n_epoch=10) print('Starting the training') trainer.fit(module, dm) print('Training finished, now evaluating the results') del trainer del dm del module gc.collect() if (config['dataset'].lower() == 'nuscenes'): phase = ('verifying' if (config['training'] in ('parametrize', 'parametrizing')) else 'val') val_dataloader = make_data_loader_nuscenes(config, phase, num_threads=config['num_threads']) elif (config['dataset'].lower() == 'kitti'): val_dataloader = make_data_loader_kitti(config, 'val', num_threads=config['num_threads']) elif (config['dataset'].lower() == 'scannet'): val_dataloader = make_data_loader_scannet(config, 'val', num_threads=config['num_threads']) evaluate(model.to(0), val_dataloader, config)
class DownstreamDataModule(pl.LightningDataModule): '\n The equivalent of a DataLoader for pytorch lightning.\n ' def __init__(self, config): super().__init__() self.config = config self.batch_size = (config['batch_size'] // config['num_gpus']) self.num_workers = max((config['num_threads'] // config['num_gpus']), 1) def setup(self, stage): transforms = make_transforms_clouds(self.config) if (self.config['dataset'].lower() == 'nuscenes'): Dataset = NuScenesDataset elif (self.config['dataset'].lower() == 'scannet'): Dataset = scannet_Dataset elif (self.config['dataset'].lower() in ('kitti', 'semantickitti')): Dataset = SemanticKITTIDataset else: raise Exception(f"Unknown dataset {self.config['dataset']}") if (self.config['training'] in ('parametrize', 'parametrizing')): phase_train = 'parametrizing' phase_val = 'verifying' else: phase_train = 'train' phase_val = 'val' self.train_dataset = Dataset(phase=phase_train, transforms=transforms, config=self.config) if (Dataset == NuScenesDataset): self.val_dataset = Dataset(phase=phase_val, config=self.config, cached_nuscenes=self.train_dataset.nusc) else: self.val_dataset = Dataset(phase=phase_val, config=self.config) def train_dataloader(self): if self.config['num_gpus']: num_workers = (self.config['num_threads'] // self.config['num_gpus']) else: num_workers = self.config['num_threads'] if (self.config['dataset'].lower() == 'nuscenes'): default_collate_pair_fn = minkunet_collate_pair_fn elif (self.config['dataset'].lower() == 'kitti'): default_collate_pair_fn = kitti_collate_pair_fn elif (self.config['dataset'].lower() == 'scannet'): default_collate_pair_fn = scannet_collate_pair_fn return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=num_workers, collate_fn=default_collate_pair_fn, pin_memory=True, drop_last=True, worker_init_fn=(lambda id: np.random.seed(((torch.initial_seed() // (2 ** 32)) + id)))) def val_dataloader(self): if self.config['num_gpus']: num_workers = (self.config['num_threads'] // self.config['num_gpus']) else: num_workers = self.config['num_threads'] if (self.config['dataset'].lower() == 'nuscenes'): default_collate_pair_fn = minkunet_collate_pair_fn elif (self.config['dataset'].lower() == 'kitti'): default_collate_pair_fn = kitti_collate_pair_fn elif (self.config['dataset'].lower() == 'scannet'): default_collate_pair_fn = scannet_collate_pair_fn return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=num_workers, collate_fn=default_collate_pair_fn, pin_memory=True, drop_last=False, worker_init_fn=(lambda id: np.random.seed(((torch.initial_seed() // (2 ** 32)) + id))))
def load_state_with_same_shape(model, weights): '\n Load common weights in two similar models\n (for instance between a pretraining and a downstream training)\n ' model_state = model.state_dict() if list(weights.keys())[0].startswith('model.'): weights = {k.partition('model.')[2]: weights[k] for k in weights.keys()} if list(weights.keys())[0].startswith('model_points.'): weights = {k.partition('model_points.')[2]: weights[k] for k in weights.keys()} if list(weights.keys())[0].startswith('module.'): print('Loading multigpu weights with module. prefix...') weights = {k.partition('module.')[2]: weights[k] for k in weights.keys()} if list(weights.keys())[0].startswith('encoder.'): print('Loading multigpu weights with encoder. prefix...') weights = {k.partition('encoder.')[2]: weights[k] for k in weights.keys()} filtered_weights = {k: v for (k, v) in weights.items() if ((k in model_state) and (v.size() == model_state[k].size()))} removed_weights = {k: v for (k, v) in weights.items() if (not ((k in model_state) and (v.size() == model_state[k].size())))} print(('Loading weights:' + ', '.join(filtered_weights.keys()))) print('') print(('Not loading weights:' + ', '.join(removed_weights.keys()))) return filtered_weights
def make_model(config, load_path=None): '\n Build the points model according to what is in the config\n ' assert (not config['normalize_features']), "You shouldn't normalize features for the downstream task" model = MinkUNet(3, config['model_n_out'], config) if load_path: print('Training with pretrained model') checkpoint = torch.load(load_path, map_location='cpu') if ('config' in checkpoint): for cfg in ('voxel_size', 'cylindrical_coordinates'): assert (checkpoint['config'][cfg] == config[cfg]), f"{cfg} is not consistant. Checkpoint: {checkpoint['config'][cfg]}, Config: {config[cfg]}." if (set(checkpoint.keys()) == set(['epoch', 'model', 'optimizer', 'train_criterion'])): print('Pre-trained weights are coming from DepthContrast.') pretraining_epochs = checkpoint['epoch'] print(f'==> Number of pre-training epochs {pretraining_epochs}') checkpoint = checkpoint['model'] if list(checkpoint.keys())[0].startswith('module.'): print('Loading multigpu weights with module. prefix...') checkpoint = {k.partition('module.')[2]: checkpoint[k] for k in checkpoint.keys()} voxel_net_suffix = 'trunk.2.' checkpoint = {key.partition(voxel_net_suffix)[2]: checkpoint[key] for key in checkpoint.keys() if key.startswith(voxel_net_suffix)} print(f'==> Number of loaded weight blobs {len(checkpoint)}') checkpoint = {'model_points': checkpoint} key = ('model_points' if ('model_points' in checkpoint) else 'state_dict') filtered_weights = load_state_with_same_shape(model, checkpoint[key]) model_dict = model.state_dict() model_dict.update(filtered_weights) model.load_state_dict(model_dict) if config['freeze_layers']: for param in list(model.parameters())[:(- 2)]: param.requires_grad = False return model
def main(): '\n Code for launching the downstream evaluation\n ' parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') parser.add_argument('--resume_path', type=str, default=None, help='provide a path to resume an incomplete training') parser.add_argument('--dataset', type=str, default=None, help='Choose between nuScenes and KITTI') args = parser.parse_args() if ((args.cfg_file is None) and (args.dataset is not None)): if (args.dataset.lower() == 'kitti'): args.cfg_file = 'config/semseg_kitti.yaml' elif (args.dataset.lower() == 'nuscenes'): args.cfg_file = 'config/semseg_nuscenes.yaml' else: raise Exception(f'Dataset not recognized: {args.dataset}') elif (args.cfg_file is None): args.cfg_file = 'config/semseg_nuscenes.yaml' config = generate_config(args.cfg_file) if args.resume_path: config['resume_path'] = args.resume_path print(('\n' + '\n'.join(list(map((lambda x: f'{x[0]:20}: {x[1]}'), config.items()))))) print('Creating the loaders') if (config['dataset'].lower() == 'nuscenes'): phase = ('verifying' if (config['training'] in ('parametrize', 'parametrizing')) else 'val') val_dataloader = make_data_loader_nuscenes(config, phase, num_threads=config['num_threads']) elif (config['dataset'].lower() == 'kitti'): val_dataloader = make_data_loader_kitti(config, 'val', num_threads=config['num_threads']) else: raise Exception(f'Dataset not recognized: {args.dataset}') print('Creating the model') model = make_model(config, config['pretraining_path']).to(0) checkpoint = torch.load(config['resume_path'], map_location=torch.device(0)) if ('config' in checkpoint): for cfg in ('voxel_size', 'cylindrical_coordinates'): assert (checkpoint['config'][cfg] == config[cfg]), f'''{cfg} is not consistant. Checkpoint: {checkpoint['config'][cfg]} Config: {config[cfg]}.''' try: model.load_state_dict(checkpoint['model_points']) except KeyError: weights = {k.replace('model.', ''): v for (k, v) in checkpoint['state_dict'].items() if k.startswith('model.')} model.load_state_dict(weights) evaluate(model, val_dataloader, config)
def build_backbone(cfg): 'Build backbone.' return BACKBONES.build(cfg)
def build_neck(cfg): 'Build neck.' return NECKS.build(cfg)
def build_head(cfg): 'Build head.' return HEADS.build(cfg)
def build_loss(cfg): 'Build loss.' return LOSSES.build(cfg)
def build_segmentor(cfg, train_cfg=None, test_cfg=None): 'Build segmentor.' if ((train_cfg is not None) or (test_cfg is not None)): warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning) assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field ' assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field ' return SEGMENTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
class ClipFeatureExtractor(nn.Module): '\n DINO Vision Transformer Feature Extractor.\n ' def __init__(self, config, preprocessing=None): super(ClipFeatureExtractor, self).__init__() (self.encoder, preprocess) = clip.load('ViT-B/32', device='cuda') for param in self.encoder.parameters(): param.requires_grad = False self.preprocessing = preprocess self.normalize_feature = config['normalize_features'] def forward(self, x): if self.preprocessing: x = self.preprocessing(x) (batch_size, _, height, width) = x.size() print(x.size()) x = self.encoder(x) x = self.decoder(x) if self.normalize_feature: x = F.normalize(x, p=2, dim=1) return x
class NormType(Enum): BATCH_NORM = 0 SPARSE_LAYER_NORM = 1 SPARSE_INSTANCE_NORM = 2 SPARSE_SWITCH_NORM = 3
def get_norm(norm_type, n_channels, D, bn_momentum=0.1): if (norm_type == NormType.BATCH_NORM): return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum) elif (norm_type == NormType.SPARSE_INSTANCE_NORM): return ME.MinkowskiInstanceNorm(n_channels, D=D) else: raise ValueError(f'Norm type: {norm_type} not supported')
class ConvType(Enum): '\n Define the kernel region type\n ' HYPERCUBE = (0, 'HYPERCUBE') SPATIAL_HYPERCUBE = (1, 'SPATIAL_HYPERCUBE') SPATIO_TEMPORAL_HYPERCUBE = (2, 'SPATIO_TEMPORAL_HYPERCUBE') HYPERCROSS = (3, 'HYPERCROSS') SPATIAL_HYPERCROSS = (4, 'SPATIAL_HYPERCROSS') SPATIO_TEMPORAL_HYPERCROSS = (5, 'SPATIO_TEMPORAL_HYPERCROSS') SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = (6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ') def __new__(cls, value, name): member = object.__new__(cls) member._value_ = value member.fullname = name return member def __int__(self): return self.value
def convert_conv_type(conv_type, kernel_size, D): assert isinstance(conv_type, ConvType), 'conv_type must be of ConvType' region_type = conv_to_region_type[conv_type] axis_types = None if (conv_type == ConvType.SPATIAL_HYPERCUBE): if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = ([kernel_size] * 3) if (D == 4): kernel_size.append(1) elif (conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE): assert (D == 4) elif (conv_type == ConvType.HYPERCUBE): pass elif (conv_type == ConvType.SPATIAL_HYPERCROSS): if isinstance(kernel_size, collections.Sequence): kernel_size = kernel_size[:3] else: kernel_size = ([kernel_size] * 3) if (D == 4): kernel_size.append(1) elif (conv_type == ConvType.HYPERCROSS): pass elif (conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS): assert (D == 4) elif (conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS): axis_types = ([ME.RegionType.HYPER_CUBE] * 3) if (D == 4): axis_types.append(ME.RegionType.HYPER_CROSS) return (region_type, axis_types, kernel_size)
def conv(in_planes, out_planes, kernel_size, stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=(- 1)): assert (D > 0), 'Dimension must be a positive integer' (region_type, axis_types, kernel_size) = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator(kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolution(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, bias=bias, kernel_generator=kernel_generator, dimension=D)
def conv_tr(in_planes, out_planes, kernel_size, upsample_stride=1, dilation=1, bias=False, conv_type=ConvType.HYPERCUBE, D=(- 1)): assert (D > 0), 'Dimension must be a positive integer' (region_type, axis_types, kernel_size) = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator(kernel_size, upsample_stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiConvolutionTranspose(in_channels=in_planes, out_channels=out_planes, kernel_size=kernel_size, stride=upsample_stride, dilation=dilation, bias=bias, kernel_generator=kernel_generator, dimension=D)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=(- 1)): assert (D > 0), 'Dimension must be a positive integer' (region_type, axis_types, kernel_size) = convert_conv_type(conv_type, kernel_size, D) kernel_generator = ME.KernelGenerator(kernel_size, stride, dilation, region_type=region_type, axis_types=axis_types, dimension=D) return ME.MinkowskiSumPooling(kernel_size=kernel_size, stride=stride, dilation=dilation, kernel_generator=kernel_generator, dimension=D)
class BasicBlockBase(nn.Module): expansion = 1 NORM_TYPE = NormType.BATCH_NORM def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, conv_type=ConvType.HYPERCUBE, bn_momentum=0.1, D=3): super(BasicBlockBase, self).__init__() self.conv1 = conv(inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, conv_type=conv_type, D=D) self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.conv2 = conv(planes, planes, kernel_size=3, stride=1, dilation=dilation, bias=False, conv_type=conv_type, D=D) self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum) self.relu = MinkowskiReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
class BasicBlock(BasicBlockBase): NORM_TYPE = NormType.BATCH_NORM