code
stringlengths
17
6.64M
class SHREC(InMemoryDataset): 'The shrec classification dataset.\n\n This is the remeshed version from MeshCNN.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n ' url = 'https://dl.dropboxusercontent.com/s/biiwlkkky7bp5ya/shrec_16.zip' class_names = ['alien', 'ants', 'armadillo', 'bird1', 'bird2', 'camel', 'cat', 'centaur', 'dinosaur', 'dino_ske', 'dog1', 'dog2', 'flamingo', 'glasses', 'gorilla', 'hand', 'horse', 'lamp', 'laptop', 'man', 'myScissor', 'octopus', 'pliers', 'rabbit', 'santa', 'shark', 'snake', 'spiders', 'two_balls', 'woman'] def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None, split10=True): self.split10 = split10 super(SHREC, self).__init__(root, transform, pre_transform, pre_filter) path = (self.processed_paths[0] if train else self.processed_paths[1]) (self.data, self.slices) = torch.load(path) @property def raw_file_names(self): return ['shrec_16.zip'] @property def processed_file_names(self): return ['training.pt', 'test.pt'] @property def num_classes(self): return len(self.class_names) def download(self): download_url(self.url, self.raw_dir) def process(self): print('Extracting zip...') extract_zip(self.raw_paths[0], self.raw_dir, log=False) training_list = [] test_list = [] print('Processing Shrec...') raw_path = osp.join(self.raw_dir, 'shrec_16') for (class_idx, class_name) in enumerate(self.class_names): train_meshes = osp.join(raw_path, class_name, 'train') mesh_files = osls(train_meshes) idx = (np.random.permutation(len(mesh_files))[:10] if self.split10 else np.arange(len(mesh_files))) for (file_i, filename) in progressbar.progressbar(enumerate(mesh_files)): if (file_i not in idx): continue data = read_obj(osp.join(train_meshes, filename)) data.y = class_idx if ((self.pre_filter is not None) and (not self.pre_filter(data))): continue if (self.pre_transform is not None): data = self.pre_transform(data) training_list.append(data) test_meshes = osp.join(raw_path, class_name, 'test') for filename in progressbar.progressbar(osls(test_meshes)): data = read_obj(osp.join(test_meshes, filename)) data.y = class_idx if ((self.pre_filter is not None) and (not self.pre_filter(data))): continue if (self.pre_transform is not None): data = self.pre_transform(data) test_list.append(data) torch.save(self.collate(training_list), self.processed_paths[0]) torch.save(self.collate(test_list), self.processed_paths[1]) shutil.rmtree(osp.join(self.raw_dir, 'shrec_16'))
def read_obj(path): mesh = openmesh.read_trimesh(path) pos = torch.from_numpy(mesh.points()).to(torch.float) face = torch.from_numpy(mesh.face_vertex_indices()) face = face.t().to(torch.long).contiguous() return Data(pos=pos, face=face)
def test(args): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ShapeNet') pre_transform = Compose((T.NormalizeScale(), T.GeodesicFPS(args.num_points))) transform = Compose((T.RandomScale(((2 / 3), (3 / 2))), T.RandomTranslateGlobal(0.1))) test_dataset = ShapeNet(path, categories=args.class_choice, split='test', pre_transform=pre_transform, transform=transform) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) num_classes = (test_dataset.num_classes if (args.class_choice is None) else len(test_dataset.seg_classes[args.class_choice])) model = shapenet_model(args, num_classes) state_dict = torch.load(args.checkpoint) model.load_state_dict(state_dict) evaluate(model, args.device, test_loader, args)
def evaluate(model, device, loader, args): model.eval() test_pred_seg_acc = None test_pred_seg = [] test_true_seg = [] test_label_seg = [] for i in progressbar(range(args.num_votes)): for data in loader: data = data.to(device) with torch.no_grad(): pred = model(data) test_pred_seg.append(pred.detach().cpu().numpy().reshape(data.num_graphs, (- 1), pred.size(1))) if (i == 0): test_true_seg.append(data.y.cpu().numpy().reshape(data.num_graphs, (- 1))) test_label_seg.append(data.category.max(dim=1)[1].cpu().numpy()) if (test_pred_seg_acc is None): test_pred_seg_acc = np.concatenate(test_pred_seg, axis=0) else: test_pred_seg_acc += np.concatenate(test_pred_seg, axis=0) test_pred_seg = [] test_pred_seg = np.argmax(test_pred_seg_acc, axis=2) test_true_seg = np.concatenate(test_true_seg, axis=0) test_acc = metrics.accuracy_score(test_true_seg.flatten(), test_pred_seg.flatten()) avg_per_class_acc = metrics.balanced_accuracy_score(test_true_seg.flatten(), test_pred_seg.flatten()) test_label_seg = np.concatenate(test_label_seg) test_ious = calc_shape_IoU(test_pred_seg, test_true_seg, test_label_seg, args.class_choice) print('test mean iou: ', np.mean(test_ious)) print('test accuracy: ', test_acc) print('test avg class accuracy', avg_per_class_acc) mean_iou_per_class = scatter_mean(torch.Tensor(test_ious), torch.from_numpy(test_label_seg)).numpy() for (i, (key, _)) in enumerate(loader.dataset.category_ids.items()): print('iou {}: '.format(key), mean_iou_per_class[i]) return test_ious
def train(args, writer): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ModelNet{}'.format(args.num_classes)) pre_transform = Compose((T.NormalizeScale(), SamplePoints((args.num_points * args.sampling_margin), include_normals=True), T.GeodesicFPS(args.num_points))) transform = Compose((T.RandomScale(((4 / 5), (5 / 4))), T.RandomTranslateGlobal(0.1))) train_dataset = ModelNet(path, None, str(args.num_classes), True, transform=transform, pre_transform=pre_transform) test_dataset = ModelNet(path, None, str(args.num_classes), False, pre_transform=pre_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) model = DeltaNetClassification(in_channels=3, num_classes=args.num_classes, num_neighbors=args.k, grad_regularizer=args.grad_regularizer, grad_kernel_width=args.grad_kernel).to(args.device) if (not args.evaluating): optimizer = torch.optim.SGD(model.parameters(), lr=(100 * args.lr), momentum=args.momentum, weight_decay=0.0001) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr) for epoch in progressbar(range(1, (args.epochs + 1))): train_epoch(epoch, model, args.device, optimizer, train_loader, writer) (test_acc, test_mca) = evaluate(model, args.device, test_loader) writer.add_scalar('test accuracy', test_acc, epoch) writer.add_scalar('test mean class accuracy', test_mca, epoch) if ((epoch > 0) and ((epoch % 50) == 0)): torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'epoch_{}.pt'.format(epoch))) scheduler.step() torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'last.pt')) else: model.load_state_dict(torch.load(args.checkpoint)) (test_acc, test_mca) = evaluate(model, args.device, test_loader) print('Test accuracy: {}, test mca: {}'.format(test_acc, test_mca))
def train_epoch(epoch, model, device, optimizer, loader, writer): 'Train the model for one iteration on each item in the loader.' model.train() total_loss = 0 running_loss = 0.0 train_pred = [] train_true = [] for (i, data) in enumerate(loader): data = data.to(device) optimizer.zero_grad() out = model(data) loss = calc_loss(out, data.y) loss.backward() total_loss += (loss.item() * data.num_graphs) optimizer.step() running_loss += loss.item() train_pred.append(out.max(dim=1)[1].detach().cpu().numpy()) train_true.append(data.y.cpu().numpy()) if ((i % 50) == 49): writer.add_scalar('training loss', (running_loss / 50), ((epoch * len(loader)) + i)) running_loss = 0.0 train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) train_acc = metrics.accuracy_score(train_true, train_pred) train_mean_class_acc = metrics.balanced_accuracy_score(train_true, train_pred) writer.add_scalar('training accuracy', train_acc, epoch) writer.add_scalar('training mean class accuracy', train_mean_class_acc, epoch)
def evaluate(model, device, loader): 'Evaluate the model for on each item in the loader.' model.eval() correct = 0 eval_pred = [] eval_true = [] for data in loader: data = data.to(device) with torch.no_grad(): pred = model(data).max(dim=1)[1] correct += pred.eq(data.y).sum().item() eval_pred.append(pred.detach().cpu().numpy()) eval_true.append(data.y.cpu().numpy()) eval_true = np.concatenate(eval_true) eval_pred = np.concatenate(eval_pred) eval_acc = metrics.accuracy_score(eval_true, eval_pred) eval_mean_class_acc = metrics.balanced_accuracy_score(eval_true, eval_pred) return (eval_acc, eval_mean_class_acc)
def train(args, writer): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ScanObjectNN') pre_transform = T.GeodesicFPS(args.num_points) transform = Compose((RandomRotate(360, 1), RandomTranslate(0.01), T.RandomScale(((4 / 5), (5 / 4))), T.RandomTranslateGlobal(0.1))) train_dataset = ScanObjectNN(path, background=args.background, augmentation=args.augmentation, train=True, transform=transform, pre_transform=pre_transform) test_dataset = ScanObjectNN(path, background=args.background, augmentation=args.augmentation, train=False, pre_transform=pre_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=False) model = DeltaNetClassification(in_channels=3, num_classes=15, conv_channels=[64, 64, 64, 128], num_neighbors=args.k, grad_regularizer=args.grad_regularizer, grad_kernel_width=args.grad_kernel).to(args.device) if (not args.evaluating): optimizer = torch.optim.SGD(model.parameters(), lr=(100 * args.lr), momentum=args.momentum, weight_decay=0.0001) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr) for epoch in progressbar(range(1, (args.epochs + 1))): train_epoch(epoch, model, args.device, optimizer, train_loader, writer) test_acc = evaluate(model, args.device, test_loader) writer.add_scalar('test accuracy', test_acc, epoch) scheduler.step() torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'last.pt')) else: model.load_state_dict(torch.load(args.checkpoint)) test_acc = evaluate(model, args.device, test_loader) print('Test accuracy: {}'.format(test_acc))
def train_epoch(epoch, model, device, optimizer, loader, writer): 'Train the model for one iteration on each item in the loader.' model.train() total_loss = 0 running_loss = 0.0 train_pred = [] train_true = [] for (i, data) in enumerate(loader): data = data.to(device) optimizer.zero_grad() out = model(data) loss = calc_loss(out, data.y, smoothing=True) loss.backward() total_loss += (loss.item() * data.num_graphs) optimizer.step() running_loss += loss.item() train_pred.append(out.max(dim=1)[1].detach().cpu().numpy()) train_true.append(data.y.cpu().numpy()) if ((i % 50) == 49): writer.add_scalar('training loss', (running_loss / 50), ((epoch * len(loader)) + i)) running_loss = 0.0 train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) train_acc = metrics.accuracy_score(train_true, train_pred) train_mean_class_acc = metrics.balanced_accuracy_score(train_true, train_pred) writer.add_scalar('training accuracy', train_acc, epoch) writer.add_scalar('training mean class accuracy', train_mean_class_acc, epoch)
def evaluate(model, device, loader): 'Evaluate the model for on each item in the loader.' model.eval() correct = 0 eval_pred = [] eval_true = [] for data in loader: data = data.to(device) with torch.no_grad(): pred = model(data).max(dim=1)[1] correct += pred.eq(data.y).sum().item() eval_pred.append(pred.detach().cpu().numpy()) eval_true.append(data.y.cpu().numpy()) eval_true = np.concatenate(eval_true) eval_pred = np.concatenate(eval_pred) eval_acc = metrics.accuracy_score(eval_true, eval_pred) return eval_acc
def train(args, writer): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ShapeNet') pre_transform = Compose((T.NormalizeScale(), T.GeodesicFPS(args.num_points))) transform = Compose((T.RandomScale(((2 / 3), (3 / 2))), T.RandomTranslateGlobal(0.2))) train_dataset = ShapeNet(path, categories=args.class_choice, split='trainval', transform=transform, pre_transform=pre_transform) test_dataset = ShapeNet(path, categories=args.class_choice, split='test', pre_transform=pre_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) num_classes = (train_dataset.num_classes if (args.class_choice is None) else len(train_dataset.seg_classes[args.class_choice])) model = shapenet_model(args, num_classes) optimizer = torch.optim.SGD(model.parameters(), lr=(100 * args.lr), momentum=args.momentum, weight_decay=0.0001) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr) for epoch in progressbar(range(1, (args.epochs + 1))): train_epoch(epoch, model, args.device, optimizer, train_loader, writer, args) test_ious = evaluate(model, args.device, test_loader, args) writer.add_scalar('test mean iou', np.mean(test_ious), epoch) scheduler.step() torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'last.pt'))
def shapenet_model(args, num_classes): ' Define ShapeNet model in a separate function, so it can be reused by the test script. ' return DeltaNetSegmentation(in_channels=3, num_classes=num_classes, conv_channels=[64, 128, 256], mlp_depth=2, embedding_size=1024, num_neighbors=args.k, grad_regularizer=args.grad_regularizer, grad_kernel_width=args.grad_kernel, categorical_vector=True).to(args.device)
def train_epoch(epoch, model, device, optimizer, loader, writer, args): 'Train the model for one iteration on each item in the loader.' model.train() total_loss = 0 running_loss = 0.0 train_pred_seg = [] train_true_seg = [] train_label_seg = [] for (i, data) in enumerate(loader): data = data.to(device) if (args.class_choice is not None): labels = (data.y - data.y.min()) else: labels = data.y optimizer.zero_grad() out = model(data) loss = calc_loss(out, labels, smoothing=False) loss.backward() total_loss += (loss.item() * data.num_graphs) optimizer.step() running_loss += loss.item() pred = out.max(dim=1)[1].detach().cpu().numpy() true = labels.cpu().numpy() train_pred_seg.append(pred.reshape(data.num_graphs, (- 1))) train_true_seg.append(true.reshape(data.num_graphs, (- 1))) train_label_seg.append(data.category.max(dim=1)[1].cpu().numpy()) if ((i % 50) == 49): writer.add_scalar('training loss', (running_loss / 50), ((epoch * len(loader)) + i)) running_loss = 0.0 train_true_seg = np.concatenate(train_true_seg, axis=0) train_pred_seg = np.concatenate(train_pred_seg, axis=0) train_acc = metrics.accuracy_score(train_true_seg.flatten(), train_pred_seg.flatten()) avg_per_class_acc = metrics.balanced_accuracy_score(train_true_seg.flatten(), train_pred_seg.flatten()) train_label_seg = np.concatenate(train_label_seg) train_ious = calc_shape_IoU(train_pred_seg, train_true_seg, train_label_seg, args.class_choice) writer.add_scalar('training mean iou', np.mean(train_ious), epoch) writer.add_scalar('training accuracy', train_acc, epoch) writer.add_scalar('training avg class accuracy', avg_per_class_acc, epoch)
def evaluate(model, device, loader, args): 'Evaluate the model for on each item in the loader.' model.eval() correct = 0 eval_pred_seg = [] eval_true_seg = [] eval_label_seg = [] for data in loader: data = data.to(device) if (args.class_choice is not None): labels = (data.y - data.y.min()) else: labels = data.y with torch.no_grad(): pred = model(data).max(dim=1)[1] correct += pred.eq(labels).sum().item() eval_pred_seg.append(pred.detach().cpu().numpy().reshape(data.num_graphs, (- 1))) eval_true_seg.append(labels.cpu().numpy().reshape(data.num_graphs, (- 1))) eval_label_seg.append(data.category.max(dim=1)[1].cpu().numpy()) eval_true_seg = np.concatenate(eval_true_seg, axis=0) eval_pred_seg = np.concatenate(eval_pred_seg, axis=0) eval_label_seg = np.concatenate(eval_label_seg) eval_ious = calc_shape_IoU(eval_pred_seg, eval_true_seg, eval_label_seg, args.class_choice) return eval_ious
def train(args, writer): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/ShapeSeg') pre_transform = Compose((T.NormalizeArea(), T.NormalizeAxes(), GenerateMeshNormals(), T.SamplePoints((args.num_points * args.sampling_margin), include_normals=True, include_labels=True), T.GeodesicFPS(args.num_points))) transform = Compose((T.RandomScale((0.8, 1.2)), T.RandomRotate(360, axis=2), T.RandomTranslateGlobal(0.1))) train_dataset = ShapeSeg(path, True, transform=transform, pre_transform=pre_transform) num_samples = len(train_dataset) num_train = int((num_samples * 0.9)) num_validation = (num_samples - num_train) (train_dataset, validation_dataset) = torch.utils.data.random_split(train_dataset, [num_train, num_validation], generator=torch.Generator().manual_seed(args.seed)) test_dataset = ShapeSeg(path, False, pre_transform=pre_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=True) validation_loader = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) model = DeltaNetSegmentation(in_channels=3, num_classes=8, conv_channels=([128] * 8), mlp_depth=1, embedding_size=512, num_neighbors=args.k, grad_regularizer=args.grad_regularizer, grad_kernel_width=args.grad_kernel).to(args.device) if (not args.evaluating): optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1) best_validation = 0 best_validation_test_score = 0 for epoch in progressbar(range(1, (args.epochs + 1))): train_epoch(epoch, model, args.device, optimizer, train_loader, writer) validation_accuracy = evaluate(model, args.device, validation_loader) writer.add_scalar('validation accuracy', validation_accuracy, epoch) test_accuracy = evaluate(model, args.device, test_loader) writer.add_scalar('test accuracy', test_accuracy, epoch) if (validation_accuracy > best_validation): best_validation = validation_accuracy best_validation_test_score = test_accuracy torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'best.pt')) scheduler.step() else: model.load_state_dict(torch.load(args.checkpoint)) best_validation_test_score = evaluate(model, args.device, test_loader) print('Test accuracy: {}'.format(best_validation_test_score))
def train_epoch(epoch, model, device, optimizer, loader, writer): 'Train the model for one iteration on each item in the loader.' model.train() running_loss = 0.0 for (i, data) in enumerate(loader): optimizer.zero_grad() out = model(data.to(device)) loss = calc_loss(out, data.y, smoothing=False) loss.backward() optimizer.step() running_loss += loss.item() if ((i % 50) == 49): writer.add_scalar('training loss', (running_loss / 50), ((epoch * len(loader)) + i)) running_loss = 0.0 model.train()
def evaluate(model, device, loader): 'Evaluate the model for on each item in the loader.' model.eval() correct = 0 total_num = 0 for data in loader: pred = model(data.to(device)).max(1)[1] correct += pred.eq(data.y).sum().item() total_num += data.y.size(0) eval_acc = (correct / total_num) return eval_acc
def train(args, writer): path = osp.join(osp.dirname(osp.realpath(__file__)), 'data/shrec') pre_transform = Compose((T.NormalizeScale(), SamplePoints((args.num_points * args.sampling_margin), include_normals=True), T.GeodesicFPS(args.num_points))) transform = Compose((T.RandomRotate(360, 0), T.RandomRotate(360, 1), T.RandomRotate(360, 2), T.RandomTranslateGlobal(0.1))) train_dataset = SHREC(path, train=True, transform=transform, pre_transform=pre_transform) test_dataset = SHREC(path, train=False, pre_transform=pre_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0, drop_last=False) model = DeltaNetClassification(in_channels=3, num_classes=30, conv_channels=([32] * 4), num_neighbors=args.k, grad_regularizer=args.grad_regularizer, grad_kernel_width=args.grad_kernel).to(args.device) if (not args.evaluating): optimizer = torch.optim.SGD(model.parameters(), lr=(100 * args.lr), momentum=args.momentum, weight_decay=0.0001) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr) for epoch in progressbar(range(1, (args.epochs + 1))): train_epoch(epoch, model, args.device, optimizer, train_loader, writer) test_acc = evaluate(model, args.device, test_loader) writer.add_scalar('test accuracy', test_acc, epoch) scheduler.step() torch.save(model.state_dict(), osp.join(args.checkpoint_dir, 'last.pt')) else: model.load_state_dict(torch.load(args.checkpoint)) test_acc = evaluate(model, args.device, test_loader) print('Test accuracy: {}'.format(test_acc))
def train_epoch(epoch, model, device, optimizer, loader, writer): 'Train the model for one iteration on each item in the loader.' model.train() total_loss = 0 running_loss = 0.0 train_pred = [] train_true = [] for (i, data) in enumerate(loader): data = data.to(device) optimizer.zero_grad() out = model(data) loss = calc_loss(out, data.y, smoothing=True) loss.backward() total_loss += (loss.item() * data.num_graphs) optimizer.step() running_loss += loss.item() train_pred.append(out.max(dim=1)[1].detach().cpu().numpy()) train_true.append(data.y.cpu().numpy()) if ((i % 50) == 49): writer.add_scalar('training loss', (running_loss / 50), ((epoch * len(loader)) + i)) running_loss = 0.0 train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) train_acc = metrics.accuracy_score(train_true, train_pred) writer.add_scalar('training accuracy', train_acc, epoch)
def evaluate(model, device, loader): 'Evaluate the model for on each item in the loader.' model.eval() correct = 0 eval_pred = [] eval_true = [] for data in loader: data = data.to(device) with torch.no_grad(): pred = model(data).max(dim=1)[1] correct += pred.eq(data.y).sum().item() eval_pred.append(pred.detach().cpu().numpy()) eval_true.append(data.y.cpu().numpy()) eval_true = np.concatenate(eval_true) eval_pred = np.concatenate(eval_pred) eval_acc = metrics.accuracy_score(eval_true, eval_pred) return eval_acc
def calc_loss(pred, true, smoothing=True): 'Calculate cross entropy loss, apply label smoothing if needed.' true = true.contiguous().view((- 1)) if smoothing: eps = 0.2 n_class = pred.size(1) one_hot = torch.zeros_like(pred).scatter(1, true.view((- 1), 1), 1) one_hot = ((one_hot * (1 - eps)) + (((1 - one_hot) * eps) / (n_class - 1))) log_prb = F.log_softmax(pred, dim=1) loss = (- (one_hot * log_prb).sum(dim=1).mean()) else: loss = F.cross_entropy(pred, true, reduction='mean') return loss
def calc_shape_IoU(pred_np, seg_np, label, class_choice): 'Calculate IoU for a shape in ShapeNet.' seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3] index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47] label = label.squeeze() shape_ious = [] for shape_idx in range(seg_np.shape[0]): if (not class_choice): start_index = index_start[label[shape_idx]] num = seg_num[label[shape_idx]] parts = range(start_index, (start_index + num)) else: parts = range(seg_num[label[0]]) part_ious = [] for part in parts: I = np.sum(np.logical_and((pred_np[shape_idx] == part), (seg_np[shape_idx] == part))) U = np.sum(np.logical_or((pred_np[shape_idx] == part), (seg_np[shape_idx] == part))) if (U == 0): iou = 1 else: iou = (I / float(U)) part_ious.append(iou) shape_ious.append(np.mean(part_ious)) return shape_ious
class CMakeExtension(Extension): def __init__(self, name, sourcedir=''): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext): def run(self): try: out = subprocess.check_output(['cmake', '--version']) except OSError: raise RuntimeError(('CMake must be installed to build the following extensions: ' + ', '.join((e.name for e in self.extensions)))) if (platform.system() == 'Windows'): cmake_version = LooseVersion(re.search('version\\s*([\\d.]+)', out.decode()).group(1)) if (cmake_version < '3.1.0'): raise RuntimeError('CMake >= 3.1.0 is required on Windows') for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) cmake_args = [('-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir), ('-DPYTHON_EXECUTABLE=' + sys.executable)] cfg = ('Debug' if self.debug else 'Release') build_args = ['--config', cfg] if (platform.system() == 'Windows'): cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] if (sys.maxsize > (2 ** 32)): cmake_args += ['-A', 'x64'] build_args += ['--', '/m'] else: cmake_args += [('-DCMAKE_BUILD_TYPE=' + cfg)] build_args += ['--', '-j3'] env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version()) if (not os.path.exists(self.build_temp)): os.makedirs(self.build_temp) subprocess.check_call((['cmake', ext.sourcedir] + cmake_args), cwd=self.build_temp, env=env) subprocess.check_call((['cmake', '--build', '.'] + build_args), cwd=self.build_temp)
def test_geodesic_fps(): n = 1024 n_samples = 512 pos = np.random.randn(n, 3) samples1 = geodesic_fps(pos, n) assert (samples1.shape[0] == n) assert (np.unique(samples1).shape[0] == n) samples2 = geodesic_fps(pos, n_samples) assert (samples2.shape[0] == n_samples) assert (np.unique(samples2).shape[0] == n_samples) with pytest.raises(ValueError): samples3 = geodesic_fps(torch.rand(n, 3), n) with pytest.raises(ValueError): samples3 = geodesic_fps(np.random.randn(n, 2, 3), n)
def test_batch_dot(): a = torch.rand(1024, 10) b = torch.rand(1024, 10) a_dot_b = (a * b).sum(dim=1, keepdim=True) out = batch_dot(a, b) assert torch.allclose(out, a_dot_b)
def test_deltaconv(): N = 1000 C_in = 3 C_out = 32 torch.manual_seed(1) conv = DeltaConv(C_in, C_out, depth=1, centralized=True, vector=True) assert (conv.__repr__() == f'DeltaConv({C_in}, {C_out})') x = torch.rand(N, C_in) edge_index = knn_graph(x, 20, loop=True, flow='target_to_source') (normal, x_basis, y_basis) = estimate_basis(x, edge_index) (grad, div) = build_grad_div(x, normal, x_basis, y_basis, edge_index, regularizer=1e-08) assert (grad.size(0) == (2 * N)) assert (grad.size(1) == N) assert (div.size(0) == N) assert (div.size(1) == (2 * N)) v = (grad @ x) assert (v.size(0) == (2 * N)) (x_out, v_out) = conv(x, v, grad, div, edge_index) assert (x_out.size(1) == C_out) assert (v_out.size(1) == C_out) conv1 = DeltaConv(C_in, C_out, depth=1, centralized=True, vector=False) (x_out, v_out) = conv1(x, v, grad, div, edge_index) assert (x_out.size(1) == C_out) assert torch.allclose(v, v_out) x_basis_rot = rotate_around(x_basis, normal, ((torch.rand(N) * 2) * torch.pi)) y_basis_rot = torch.cross(normal, x_basis_rot) (grad_rot, div_rot) = build_grad_div(x, normal, x_basis_rot, y_basis_rot, edge_index, regularizer=1e-08) target_x = torch.rand(N, 1) conv2 = DeltaConv(C_in, 1, depth=1, centralized=False) conv2.zero_grad() v = (grad @ x) (out, _) = conv2(x, v, grad, div, edge_index) loss = F.l1_loss(out, target_x) loss.backward() gradients = torch.cat([p.grad.flatten() for p in conv2.parameters() if (p.grad is not None)]) conv2.zero_grad() v_rot = (grad_rot @ x) (out_rot, _) = conv2(x, v_rot, grad_rot, div_rot, edge_index) loss_rot = F.l1_loss(out_rot, target_x) loss_rot.backward() gradients_rot = torch.cat([p.grad.flatten() for p in conv2.parameters() if (p.grad is not None)]) assert torch.allclose(gradients, gradients_rot, atol=1e-05)
def test_mlp(): x = torch.rand(10, 16) mlp1 = MLP((16, 32)) out = mlp1(x) assert (out.size(1) == 32) assert (out.isnan().sum() == 0) mlp2 = MLP((16, 32, 32, 64)) out = mlp2(x) assert (out.size(1) == 64) assert (out.isnan().sum() == 0)
def test_vectormlp(): N = 1000 C_in = 16 C_out = 32 v = torch.rand(N, C_in) v_mlp1 = VectorMLP((C_in, C_out)) out = v_mlp1(v) assert (out.size(1) == C_out) assert (out.isnan().sum() == 0) v_mlp2 = VectorMLP((C_in, C_out, C_out, C_out)) out = v_mlp2(v) assert (out.size(1) == C_out) assert (out.isnan().sum() == 0) angle = ((torch.rand((N // 2)) * 2) * torch.pi) (c, s) = (torch.cos(angle), torch.sin(angle)) R = torch.stack([torch.stack([c, s], dim=1), torch.stack([(- s), c], dim=1)], dim=1) ones = torch.ones((N // 2)) zeros = torch.zeros((N // 2)) reflect = torch.where((torch.rand((N // 2)) > 0.1), ones, (- ones)) F = torch.stack([torch.stack([ones, zeros], dim=1), torch.stack([zeros, reflect], dim=1)], dim=1) T = torch.bmm(F, R) t_mlp1_v = torch.bmm(T, v_mlp1(v).view((- 1), 2, C_out)).view((- 1), C_out) t_mlp2_v = torch.bmm(T, v_mlp2(v).view((- 1), 2, C_out)).view((- 1), C_out) v_transformed = torch.bmm(T, v.view((- 1), 2, C_in)).view((- 1), C_in) mlp1_t_v = v_mlp1(v_transformed) mlp2_t_v = v_mlp2(v_transformed) assert torch.allclose(t_mlp1_v, mlp1_t_v, atol=1e-05) assert torch.allclose(t_mlp2_v, mlp2_t_v, atol=1e-05)
def test_scalarvectormlp_identity(): N = 1000 C_in = 16 C_out = 32 x = torch.rand(N, C_in) v = torch.rand((N * 2), C_in) sv_mlp = ScalarVectorMLP((C_in, C_out), vector_stream=True) sv_out_sv = sv_mlp((x, v)) assert (type(sv_out_sv) is tuple) assert (sv_out_sv[0].size(1) == C_out) assert (sv_out_sv[1].size(1) == C_out) s_mlp = ScalarVectorMLP((C_in, C_out), vector_stream=False) s_out_s = s_mlp(x) assert (type(s_out_s) is torch.Tensor) assert (s_out_s.size(1) == C_out) s_out_sv = s_mlp((x, v)) assert (type(s_out_sv) is torch.Tensor) assert (s_out_sv.size(1) == C_out) identity = ScalarVectorIdentity() assert torch.allclose(x, identity(x)) assert ((x, v) == identity((x, v))) assert torch.allclose(v, identity(v))
def test_batchnorm1d(): bn = BatchNorm1d(10) bn.reset_parameters() assert (bn.__repr__() == 'BatchNorm1d(10)') x = torch.stack(([torch.rand(10)] * 4), dim=0) out = bn(x) assert isinstance(out, Tensor) assert (out.size() == x.size()) assert torch.allclose(out, torch.zeros_like(x)) assert (torch.isnan(out).sum() == 0) bn = BatchNorm1d(5) bn.reset_parameters() assert (bn.__repr__() == 'BatchNorm1d(5)') zeromean = torch.FloatTensor([2, 1, 0, (- 1), (- 2), 1.5, (- 1.5), 1, 1, (- 2)]) assert (zeromean.mean() == 0) shifts = torch.FloatTensor([1, 2, 3, 4, 5]) x = torch.stack(([zeromean] * 5), dim=1) x_shifted = ((x * shifts) + shifts) assert torch.allclose(x_shifted.mean(dim=0), shifts) out = bn(x) out_shifted = bn(x_shifted) assert torch.allclose(out, out_shifted)
def test_vectornonlin(): vnl = VectorNonLin(4) vnl.reset_parameters() assert (vnl.__repr__() == 'VectorNonLin(batchnorm=None)') v = torch.rand((10, 4)) out = vnl(v) assert isinstance(out, Tensor) assert torch.allclose(out, v) assert (torch.isnan(out).sum() == 0) vnl_bn = VectorNonLin(1, batchnorm=BatchNorm1d(1)) v_x = torch.FloatTensor([1, 0, (- 0.75), 0.25, 0.5, 0, 0, 0, 0, 0]) v_y = torch.FloatTensor([0, 0, 0, 0, 0, (- 1), 0, 0.75, (- 0.25), (- 0.5)]) v_norm_gt = torch.FloatTensor([1, 0, 0.75, 0.25, 0.5, 1, 0, 0.75, 0.25, 0.5]).unsqueeze((- 1)) v = torch.stack([v_x, v_y], dim=1).view((- 1), 1) v_norm = norm(v) assert torch.allclose(v_norm, v_norm_gt) out = vnl_bn(v) out_norm = norm(out) assert torch.allclose((out_norm > 0), (v_norm_gt > 0.5)) assert torch.allclose((out_norm == 0), (v_norm_gt <= 0.5)) (out_x, out_y) = out.view((- 1), 2).T assert torch.allclose((out_x == 0), ((v_x <= 0.5) * (v_x >= (- 0.5)))) assert torch.allclose((out_y == 0), ((v_y <= 0.5) * (v_y >= (- 0.5)))) assert torch.allclose((out_x > 0), (v_x > 0.5)) assert torch.allclose((out_y > 0), (v_y > 0.5)) assert torch.allclose((out_x < 0), (v_x < (- 0.5))) assert torch.allclose((out_y < 0), (v_y < (- 0.5)))
def calculate_diff_w_significance(A_scores, B_scores, alpha=1e-05): A_scores = np.array(A_scores) B_scores = np.array(B_scores) mu = (np.mean(A_scores) - np.mean(B_scores)) p_value = stats.ttest_ind(A_scores, B_scores, alternative='greater')[1] mu_variance = ((np.var(A_scores) / len(A_scores)) + (np.var(B_scores) / len(B_scores))) mu_std = np.sqrt(mu_variance) target_z = norm.ppf((1 - (alpha / 2))) (lo, hi) = ((mu - (target_z * mu_std)), (mu + (target_z * mu_std))) return {'mu': mu, 'p_value': p_value, 'mu_std': mu_std, 'lo': lo, 'hi': hi}
class D5(): def __init__(self, A_samples: List[str], B_samples: List[str], validator: Validator, proposer, top_fraction: List[float]=None, total_hypotheses_count: int=60, early_stop: bool=True, top_K_hypotheses: int=5): (self.A_samples, self.B_samples) = (A_samples, B_samples) (self.proposer, self.validator) = (proposer, validator) self.sample2membership = {} for sample in A_samples: self.sample2membership[sample] = 1.0 for sample in B_samples: self.sample2membership[sample] = 0.0 self.h2h_dicts = {} self.top_fraction = top_fraction if (top_fraction is None): self.top_fraction = [0.05, 0.2, 1.0] self.total_hypotheses_count = total_hypotheses_count self.early_stop = early_stop self.top_K_hypotheses = top_K_hypotheses def get_hypotheses(self): for idx in range(3): for p in self.top_fraction: if (len(self.h2h_dicts) >= self.total_hypotheses_count): break (X_A, X_B) = lexical_diversity(self.A_samples, self.B_samples, top_p=p, num_samples=25) r = self.proposer.propose_hypotheses(X_A, X_B) (hyps, provenance) = (r['hypotheses'], r['query_args']) provenance['top_p'] = p provenance['idx'] = idx for hyp in hyps: if ((hyp not in self.h2h_dicts) and (len(self.h2h_dicts) < self.total_hypotheses_count)): h_dict = {'hypothesis': hyp, 'sample2score': {}, 'provenance': provenance, 'diff_w_significance': None, 'active': True} self.h2h_dicts[hyp] = h_dict def get_V_info(self): for h in self.h2h_dicts: hyp_dict = self.h2h_dicts[h] ordered_text = sorted(hyp_dict['sample2score'], key=hyp_dict['sample2score'].get) A_scores = [hyp_dict['sample2score'][sample] for sample in ordered_text if (self.sample2membership[sample] == 1.0)] B_scores = [hyp_dict['sample2score'][sample] for sample in ordered_text if (self.sample2membership[sample] == 0.0)] self.h2h_dicts[h]['diff_w_significance'] = calculate_diff_w_significance(A_scores, B_scores) def filter_weak_hypotheses(self): lower_bounds = [hyp_dict['diff_w_significance']['lo'] for hyp_dict in self.h2h_dicts.values()] threshold = (sorted(lower_bounds, reverse=True)[:self.top_K_hypotheses][(- 1)] if self.early_stop else 0) for (h, hyp_dict) in self.h2h_dicts.items(): if (hyp_dict['active'] and (hyp_dict['diff_w_significance']['hi'] < threshold)): hyp_dict['active'] = False def validate(self): random_sample_order = list(self.sample2membership.keys()) random.shuffle(random_sample_order) cur_pointer = 0 print('Filtering out weak hypotheses') with tqdm.tqdm(total=len(random_sample_order)) as pbar: while (cur_pointer < len(random_sample_order)): samples = random_sample_order[cur_pointer:(cur_pointer + VALIDATE_HYP_BLOCK_SIZE)] cur_pointer += VALIDATE_HYP_BLOCK_SIZE validator_dicts = [] for sample in samples: for (h, hyp_dict) in self.h2h_dicts.items(): if (not hyp_dict['active']): continue validator_dict = {'hypothesis': h, 'text': sample, 'pointer': hyp_dict} validator_dicts.append(validator_dict) all_scores = list(self.validator.validate_w_scores(validator_dicts)) assert (len(all_scores) == len(validator_dicts)) for (d, s) in zip(validator_dicts, all_scores): d['pointer']['sample2score'][d['text']] = (s + (eps * random.random())) pbar.update(len(samples)) self.get_V_info() self.filter_weak_hypotheses() pbar.set_description(('Num hypotheses: %d' % len([h for h in self.h2h_dicts if self.h2h_dicts[h]['active']]))) def run(self): self.get_hypotheses() self.validate() self.get_V_info() return self.h2h_dicts
def subsample(samples, n=1000): selected_idxes = list(range(len(samples))) random.shuffle(selected_idxes) selected_idxes = selected_idxes[:n] return [samples[i] for i in sorted(selected_idxes)]
def flip_problem(problem): problem = deepcopy(problem) (problem['A_desc'], problem['B_desc']) = (problem['B_desc'], problem['A_desc']) problem['split'] = {k: {'A_samples': v['B_samples'], 'B_samples': v['A_samples']} for (k, v) in problem['split'].items()} return problem
def classify_cmp(x: str) -> bool: tokenized_x = nltk.word_tokenize(x) pos_tags = nltk.pos_tag(tokenized_x) all_tags = {t[1] for t in pos_tags} return any(((tag in ('JJR', 'RBR')) for tag in all_tags))
def construct_blocks(A_samples: List[str], B_samples: List[str], num_incontext_samples: int=25): A_subsampled_samples = np.random.choice(A_samples, min(num_incontext_samples, len(A_samples)), replace=False) A_block = ''.join([(('Group A: ' + s) + '\n') for s in A_subsampled_samples]) B_subsampled_samples = np.random.choice(B_samples, min(num_incontext_samples, len(B_samples)), replace=False) B_block = ''.join([(('Group B: ' + s) + '\n') for s in B_subsampled_samples]) return {'A_block': A_block, 'B_block': B_block, 'A_subsampled_samples': A_subsampled_samples, 'B_subsampled_samples': B_subsampled_samples}
def prefix_subspan(x: str, prefix_token_max_len: int=SINGLE_SAMPLE_MAX_LENGTH, tok: AutoTokenizer=GPT3_TOK) -> str: tokens = tok.tokenize(x) total_length = len(tokens) if (total_length <= prefix_token_max_len): return x subspan_toks = tokens[:prefix_token_max_len] return (tok.convert_tokens_to_string(subspan_toks) + '...')
def convert_cmp_to_ind(s: str) -> str: for _ in range(3): if (not classify_cmp(s)): break prompt = rm_cmp_prompt.format(input=s) response = gpt3wrapper(prompt=prompt, max_tokens=2048, temperature=0.0, top_p=1, frequency_penalty=0.0, presence_penalty=0.0, stop=['\n\n'], engine='text-davinci-002') if (response is None): return s s = response['choices'][0]['text'].strip() if (classify_cmp(s) or ('group a' in s.lower()) or ('group b' in s.lower())): return None return s
def gpt3wrapper(max_repeat=20, **arguments): i = 0 while (i < max_repeat): try: response = openai.Completion.create(**arguments) return response except KeyboardInterrupt: raise KeyboardInterrupt except Exception as e: print(e) print(arguments['prompt']) print(('now sleeping for %d seconds...' % SLEEP_TIME_AFTER_ERROR)) time.sleep(SLEEP_TIME_AFTER_ERROR) i += 1 return None
class GPT3_Proposer(): def __init__(self, problem, use_default_hypotheses=False, single_max_length=SINGLE_SAMPLE_MAX_LENGTH, engine_name='text-davinci-003', temperature=0.7): if use_default_hypotheses: self.example_hypotheses = DEFAULT_HYPOTHESES else: self.example_hypotheses = (problem['example_hypotheses'] + DEFAULT_HYPOTHESES)[:3] self.problem = problem self.prompt_template = open('templates/gpt3_proposer.txt', 'r').read() self.single_max_length = single_max_length self.engine_name = engine_name self.temperature = temperature def propose_hypotheses(self, X_A: List[str], X_B: List[str]): X_A = [prefix_subspan(x) for x in X_A] X_B = [prefix_subspan(x) for x in X_B] num_incontext_samples = 25 prompt = None arg_dict = {k: self.problem[k] for k in ['dataset_description', 'generation', 'A_desc', 'B_desc', 'user', 'target']} random.shuffle(self.example_hypotheses) for (i, hypothesis) in enumerate(self.example_hypotheses): arg_dict[f'example_hypothesis_{(i + 1)}'] = hypothesis while (num_incontext_samples > 1): sent_subset = construct_blocks(X_A, X_B, num_incontext_samples=num_incontext_samples) (A_block, B_block) = (sent_subset['A_block'], sent_subset['B_block']) tmp_arg_dict = deepcopy(arg_dict) tmp_arg_dict['A_block'] = A_block tmp_arg_dict['B_block'] = B_block prompt = self.prompt_template.format(**tmp_arg_dict) prompt_length = len(GPT3_TOK.encode(prompt)) if (prompt_length < MAX_PROMPT_LENGTH): break else: print(num_incontext_samples) num_incontext_samples -= 1 arg_dict['A_block'] = sent_subset['A_block'] arg_dict['B_block'] = sent_subset['B_block'] prompt = self.prompt_template.format(**arg_dict) query_args = {'engine': self.engine_name, 'prompt': prompt, 'temperature': self.temperature, 'max_tokens': 512, 'top_p': 1, 'n': 1} result = gpt3wrapper(**query_args) returned_text = result['choices'][0]['text'] hs = [] for h in returned_text.split('\n\n')[0].split('\n-'): h = convert_cmp_to_ind(h.replace('"', '').strip()) if ((h is not None) and (len(h) > 0)): if (h[(- 1)] == '.'): h = h[:(- 1)] hs.append(h) return {'hypotheses': hs, 'query_args': query_args}
def flip_problem(problem): problem = deepcopy(problem) (problem['A_desc'], problem['B_desc']) = (problem['B_desc'], problem['A_desc']) problem['split'] = {k: {'A_samples': v['B_samples'], 'B_samples': v['A_samples']} for (k, v) in problem['split'].items()} return problem
def subsample(samples, n=1000): selected_idxes = list(range(len(samples))) random.shuffle(selected_idxes) selected_idxes = selected_idxes[:n] return [samples[i] for i in sorted(selected_idxes)]
def re_order(sorted_l: List[str], top_p: float) -> List[str]: part1 = sorted_l[:(int((len(sorted_l) * top_p)) + 1)] part2 = sorted_l[(int((len(sorted_l) * top_p)) + 1):] np.random.shuffle(part1) np.random.shuffle(part2) return (part1 + part2)
def get_word_set_of_sample(sample: str) -> Set[str]: sample_no_punc = sample.translate(str.maketrans('', '', string.punctuation)) word_set = {ps.stem(word) for word in word_tokenize(sample_no_punc) if (word not in stops)} return word_set
def lexical_diversity(sorted_A: List[str], sorted_B: List[str], top_p: float=0.2, num_samples: int=4, max_gap=None): (sorted_A, sorted_B) = (deepcopy(sorted_A), deepcopy(sorted_B)) a_candidates = [] b_candidates = [] if (max_gap is None): max_gap = ((num_samples // 4) + 1) reordered_A = re_order(sorted_A, top_p) reordered_B = re_order(sorted_B, top_p) (a_words_count, b_words_count) = (defaultdict(int), defaultdict(int)) (cur_A_pointer, cur_B_pointer) = (0, 0) for _ in range(num_samples): while (cur_A_pointer < len(reordered_A)): sample_A = reordered_A[cur_A_pointer] cur_A_pointer += 1 word_set_A = get_word_set_of_sample(sample_A) add_A_flg = True for word in word_set_A: if ((a_words_count[word] - b_words_count[word]) >= max_gap): add_A_flg = False break if add_A_flg: a_candidates.append(sample_A) for word in word_set_A: a_words_count[word] += 1 break while (cur_B_pointer < len(reordered_B)): sample_B = reordered_B[cur_B_pointer] cur_B_pointer += 1 word_set_B = get_word_set_of_sample(sample_B) add_B_flg = True for word in word_set_B: if ((b_words_count[word] - a_words_count[word]) >= max_gap): add_B_flg = False if add_B_flg: b_candidates.append(sample_B) for word in word_set_B: b_words_count[word] += 1 break return (a_candidates, b_candidates)
class BaseDataLoader(DataLoader): '\n Base class for all data loaders\n ' def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate): self.validation_split = validation_split self.shuffle = shuffle self.batch_idx = 0 self.n_samples = len(dataset) (self.sampler, self.valid_sampler) = self._split_sampler(self.validation_split) self.init_kwargs = {'dataset': dataset, 'batch_size': batch_size, 'shuffle': self.shuffle, 'collate_fn': collate_fn, 'num_workers': num_workers} super().__init__(sampler=self.sampler, **self.init_kwargs) def _split_sampler(self, split): if (split == 0.0): return (None, None) idx_full = np.arange(self.n_samples) np.random.seed(0) np.random.shuffle(idx_full) if isinstance(split, int): assert (split > 0) assert (split < self.n_samples), 'validation set size is configured to be larger than entire dataset.' len_valid = split else: len_valid = int((self.n_samples * split)) valid_idx = idx_full[0:len_valid] train_idx = np.delete(idx_full, np.arange(0, len_valid)) train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) self.shuffle = False self.n_samples = len(train_idx) return (train_sampler, valid_sampler) def split_validation(self): if (self.valid_sampler is None): return None else: return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
class BaseModel(nn.Module): '\n Base class for all models\n ' @abstractmethod def forward(self, *inputs): '\n Forward pass logic\n\n :return: Model output\n ' raise NotImplementedError def __str__(self): '\n Model prints with number of trainable parameters\n ' model_parameters = filter((lambda p: p.requires_grad), self.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return (super().__str__() + '\nTrainable parameters: {}'.format(params))
class BaseTrainer(): '\n Base class for all trainers\n ' def __init__(self, model, loss, metrics, optimizer, config): self.config = config if ('trainer' in config.config): self.logger = config.get_logger('trainer', config['trainer']['verbosity']) cfg_trainer = config['trainer'] self.epochs = cfg_trainer['epochs'] self.save_period = cfg_trainer['save_period'] self.monitor = cfg_trainer.get('monitor', 'off') else: self.logger = config.get_logger('evaluater') self.monitor = 'off' (self.device, self.device_ids) = self._prepare_device(config['n_gpu']) self.model = model.to(self.device) if (len(self.device_ids) > 1): self.model = torch.nn.DataParallel(model, device_ids=self.device_ids) self.loss = loss self.metrics = metrics self.optimizer = optimizer self.save_multiple = True if (self.monitor == 'off'): self.mnt_mode = 'off' self.mnt_best = 0 else: (self.mnt_mode, self.mnt_metric) = self.monitor.split() assert (self.mnt_mode in ['min', 'max']) self.mnt_best = (inf if (self.mnt_mode == 'min') else (- inf)) self.early_stop = cfg_trainer.get('early_stop', inf) self.start_epoch = 1 self.checkpoint_dir = config.save_dir if ('trainer' in config.config): self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard']) if (config.resume is not None): self._resume_checkpoint(config.resume) @abstractmethod def _train_epoch(self, epoch): '\n Training logic for an epoch\n\n :param epoch: Current epoch number\n ' raise NotImplementedError def train(self): '\n Full training logic\n ' not_improved_count = 0 for epoch in range(self.start_epoch, (self.epochs + 1)): result = self._train_epoch(epoch) log = {'epoch': epoch} for (key, value) in result.items(): if (key == 'metrics'): log.update({mtr.__name__: value[i] for (i, mtr) in enumerate(self.metrics)}) elif (key == 'val_metrics'): log.update({('val_' + mtr.__name__): value[i] for (i, mtr) in enumerate(self.metrics)}) else: log[key] = value for (key, value) in log.items(): self.logger.info(' {:15s}: {}'.format(str(key), value)) best = False if (self.mnt_mode != 'off'): try: improved = (((self.mnt_mode == 'min') and (log[self.mnt_metric] <= self.mnt_best)) or ((self.mnt_mode == 'max') and (log[self.mnt_metric] >= self.mnt_best))) except KeyError: self.logger.warning("Warning: Metric '{}' is not found. Model performance monitoring is disabled.".format(self.mnt_metric)) self.mnt_mode = 'off' improved = False if improved: self.mnt_best = log[self.mnt_metric] not_improved_count = 0 best = True else: not_improved_count += 1 if (not_improved_count > self.early_stop): self.logger.info("Validation performance didn't improve for {} epochs. Training stops.".format(self.early_stop)) break if ((epoch % self.save_period) == 0): self._save_checkpoint(epoch, save_best=best) def _prepare_device(self, n_gpu_use): '\n setup GPU device if available, move model into configured device\n ' n_gpu = torch.cuda.device_count() if ((n_gpu_use > 0) and (n_gpu == 0)): self.logger.warning("Warning: There's no GPU available on this machine,training will be performed on CPU.") n_gpu_use = 0 if (n_gpu_use > n_gpu): self.logger.warning("Warning: The number of GPU's configured to use is {}, but only {} are available on this machine.".format(n_gpu_use, n_gpu)) n_gpu_use = n_gpu device = torch.device(('cuda:0' if (n_gpu_use > 0) else 'cpu')) list_ids = list(range(n_gpu_use)) return (device, list_ids) def _save_checkpoint(self, epoch, save_best=False): "\n Saving checkpoints\n\n :param epoch: current epoch number\n :param log: logging information of the epoch\n :param save_best: if True, rename the saved checkpoint to 'model_best.pth'\n " arch = type(self.model).__name__ state = {'arch': arch, 'epoch': epoch, 'state_dict': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'monitor_best': self.mnt_best, 'config': self.config} if self.save_multiple: filename = str((self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))) torch.save(state, filename) self.logger.info('Saving checkpoint: {} ...'.format(filename)) else: filename = str((self.checkpoint_dir / 'checkpoint.pth')) torch.save(state, filename) self.logger.info('Saving checkpoint: {} ...'.format(filename)) if save_best: best_path = str((self.checkpoint_dir / 'model_best.pth')) torch.save(state, best_path) self.logger.info('Saving current best: model_best.pth ...') def _resume_checkpoint(self, resume_path): '\n Resume from saved checkpoints\n\n :param resume_path: Checkpoint path to be resumed\n ' resume_path = str(resume_path) self.logger.info('Loading checkpoint: {} ...'.format(resume_path)) checkpoint = torch.load(resume_path) self.start_epoch = (checkpoint['epoch'] + 1) self.mnt_best = checkpoint['monitor_best'] if (checkpoint['config']['arch'] != self.config['arch']): self.logger.warning('Warning: Architecture configuration given in config file is different from that of checkpoint. This may yield an exception while state_dict is being loaded.') checkpoint_state_dict = filter_state_dict(checkpoint['state_dict'], ((checkpoint['arch'] == 'DataParallel') and (len(self.device_ids) == 1))) self.model.load_state_dict(checkpoint_state_dict, strict=False) if (checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']): self.logger.warning('Warning: Optimizer type given in config file is different from that of checkpoint. Optimizer parameters not being resumed.') else: self.optimizer.load_state_dict(checkpoint['optimizer']) self.logger.info('Checkpoint loaded. Resume training from epoch {}'.format(self.start_epoch))
def main(config): logger = config.get_logger('test') output_dir = Path(config.config.get('output_dir', 'saved')) output_dir.mkdir(exist_ok=True, parents=True) file_name = config.config.get('file_name', 'pc.ply') use_mask = config.config.get('use_mask', True) roi = config.config.get('roi', None) max_d = config.config.get('max_d', 30) min_d = config.config.get('min_d', 3) start = config.config.get('start', 0) end = config.config.get('end', (- 1)) data_loader = DataLoader(DS_Wrapper(config.initialize('data_set', module_data), start=start, end=end), batch_size=1, shuffle=False, num_workers=8) model = config.initialize('arch', module_arch) logger.info(model) if (config['n_gpu'] > 1): model = torch.nn.DataParallel(model) device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) model = model.to(device) model.eval() mask_fill = 32 n = data_loader.batch_size target_image_size = data_loader.dataset.dataset.target_image_size plysaver = PLYSaver(target_image_size[0], target_image_size[1], min_d=min_d, max_d=max_d, batch_size=n, roi=roi, dropout=0.75) plysaver.to(device) pose_buffer = [] intrinsics_buffer = [] mask_buffer = [] keyframe_buffer = [] depth_buffer = [] buffer_length = 5 min_hits = 1 key_index = (buffer_length // 2) with torch.no_grad(): for (i, (data, target)) in enumerate(tqdm(data_loader)): data = to(data, device) result = model(data) if (not isinstance(result, dict)): result = {'result': result[0]} output = result['result'] if ('cv_mask' not in result): result['cv_mask'] = output.new_zeros(output.shape) mask = (result['cv_mask'] >= 0.1).to(dtype=torch.float32) mask = (F.conv2d(mask, mask.new_ones((1, 1, (mask_fill + 1), (mask_fill + 1))), padding=(mask_fill // 2)) < 1).to(dtype=torch.float32) pose_buffer += data['keyframe_pose'] intrinsics_buffer += [data['keyframe_intrinsics']] mask_buffer += [mask] keyframe_buffer += [data['keyframe']] depth_buffer += [output] if (len(pose_buffer) >= buffer_length): pose = pose_buffer[key_index] intrinsics = intrinsics_buffer[key_index] keyframe = keyframe_buffer[key_index] depth = depth_buffer[key_index] mask = (torch.sum(torch.stack(mask_buffer), dim=0) > (buffer_length - min_hits)).to(dtype=torch.float32) if use_mask: depth *= mask plysaver.add_depthmap(depth, keyframe, intrinsics, pose) del pose_buffer[0] del intrinsics_buffer[0] del mask_buffer[0] del keyframe_buffer[0] del depth_buffer[0] with open((output_dir / file_name), 'wb') as f: plysaver.save(f)
class KittiOdometryDataloader(BaseDataLoader): def __init__(self, batch_size=1, shuffle=True, validation_split=0.0, num_workers=4, **kwargs): self.dataset = KittiOdometryDataset(**kwargs) super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
def main(): parser = argparse.ArgumentParser(description='\n This script creates depth images from annotated velodyne data.\n ') parser.add_argument('--output', '-o', help='Path of KITTI odometry dataset', default='../../../data/dataset') parser.add_argument('--input', '-i', help='Path to KITTI depth dataset (zipped)', required=True) parser.add_argument('--depth_folder', '-d', help='Name of depth map folders for the respective sequences', default='image_depth_annotated') args = parser.parse_args() input = Path(args.input) output = Path(args.output) depth_folder = args.depth_folder drives = mapping.keys() print('Creating folder structure') for drive in drives: sequence = mapping[drive] folder = (((output / 'sequences') / sequence) / depth_folder) folder.mkdir(parents=True, exist_ok=True) print(folder) print('Extracting enhanced depth maps') with ZipFile(input) as depth_archive: for name in depth_archive.namelist(): if (name[0] == 't'): drive = name[6:27] else: drive = name[4:25] cam = name[(- 16)] img = name[(- 10):] if (drive == '2011_09_30_drive_0028'): raw_img_id = img.split('.')[0] raw_img_id = int(raw_img_id) if (raw_img_id < 1100): continue else: img = '{:06d}.png'.format((raw_img_id - 1100)) if ((cam == '2') and (drive in drives)): to = ((((output / 'sequences') / mapping[drive]) / depth_folder) / img) print(name, ' -> ', to) with depth_archive.open(name) as i, open(to, 'wb') as o: shutil.copyfileobj(i, o)
def main(config: ConfigParser): logger = config.get_logger('train') data_loader = config.initialize('data_loader', module_data) loss = getattr(module_loss, config['loss']) metrics = [getattr(module_metric, met) for met in config['metrics']] if ('arch' in config.config): models = [config.initialize('arch', module_arch)] else: models = config.initialize_list('models', module_arch) results = [] for (i, model) in enumerate(models): model_dict = dict(model.__dict__) keys = list(model_dict.keys()) for k in keys: if k.startswith('_'): model_dict.__delitem__(k) elif (type(model_dict[k]) == np.ndarray): model_dict[k] = list(model_dict[k]) dataset_dict = dict(data_loader.dataset.__dict__) keys = list(dataset_dict.keys()) for k in keys: if k.startswith('_'): dataset_dict.__delitem__(k) elif (type(dataset_dict[k]) == np.ndarray): dataset_dict[k] = list(dataset_dict[k]) elif isinstance(dataset_dict[k], pathlib.PurePath): dataset_dict[k] = str(dataset_dict[k]) logger.info(model_dict) logger.info(dataset_dict) logger.info(f'{sum((p.numel() for p in model.parameters()))} total parameters') evaluater = Evaluater(model, loss, metrics, config=config, data_loader=data_loader) result = evaluater.eval(i) result['metrics'] = result['metrics'] del model result['metrics_info'] = [metric.__name__ for metric in metrics] logger.info(result) results.append({'model': model_dict, 'dataset': dataset_dict, 'result': result}) save_file = (Path(config.log_dir) / 'results.json') with open(save_file, 'w') as f: json.dump(results, f, indent=4) logger.info('Finished')
class Evaluater(BaseTrainer): '\n Trainer class\n\n Note:\n Inherited from BaseTrainer.trainer\n ' def __init__(self, model, loss, metrics, config, data_loader): super().__init__(model, loss, metrics, None, config) self.config = config self.data_loader = data_loader self.log_step = config['evaluater'].get('log_step', int(np.sqrt(data_loader.batch_size))) self.model = model self.loss = loss self.metrics = metrics self.len_data = len(self.data_loader) if isinstance(loss, torch.nn.Module): self.loss.to(self.device) if (len(self.device_ids) > 1): self.loss = torch.nn.DataParallel(self.loss, self.device_ids) self.roi = config['evaluater'].get('roi', None) self.alpha = config['evaluater'].get('alpha', None) self.max_distance = config['evaluater'].get('max_distance', None) self.correct_length = config['evaluater'].get('correct_length', False) self.median_scaling = config['evaluater'].get('median_scaling', False) self.eval_mono = config['evaluater'].get('eval_mono', False) def _eval_metrics(self, data_dict): acc_metrics = np.zeros(len(self.metrics)) acc_metrics_mv = np.zeros(len(self.metrics)) for (i, metric) in enumerate(self.metrics): if self.median_scaling: data_dict = median_scaling(data_dict) acc_metrics[i] += metric(data_dict, self.roi, self.max_distance, eval_mono=self.eval_mono) acc_metrics_mv[i] += metric(data_dict, self.roi, self.max_distance, use_cvmask=True, eval_mono=self.eval_mono) if np.any(np.isnan(acc_metrics)): acc_metrics = np.zeros(len(self.metrics)) valid = np.zeros(len(self.metrics)) else: valid = np.ones(len(self.metrics)) if np.any(np.isnan(acc_metrics_mv)): acc_metrics_mv = np.zeros(len(self.metrics)) valid_mv = np.zeros(len(self.metrics)) else: valid_mv = np.ones(len(self.metrics)) return (acc_metrics, valid, acc_metrics_mv, valid_mv) def eval(self, model_index): '\n Training logic for an epoch\n\n :param model_index: Current training epoch.\n :return: A log that contains all information you want to save.\n\n Note:\n If you have additional information to record, for example:\n > additional_log = {"x": x, "y": y}\n merge it with log before return. i.e.\n > log = {**log, **additional_log}\n > return log\n\n The metrics in log must have the key \'metrics\'.\n ' self.model.eval() total_loss = 0 total_loss_dict = {} total_metrics = np.zeros(len(self.metrics)) total_metrics_valid = np.zeros(len(self.metrics)) total_metrics_mv = np.zeros(len(self.metrics)) total_metrics_valid_mv = np.zeros(len(self.metrics)) total_metrics_runningavg = np.zeros(len(self.metrics)) num_samples = 0 for (batch_idx, (data, target)) in enumerate(self.data_loader): (data, target) = (to(data, self.device), to(target, self.device)) data['target'] = target with torch.no_grad(): data = self.model(data) loss_dict = {'loss': torch.tensor([0])} loss = loss_dict['loss'] output = data['result'] total_loss += loss.item() total_loss_dict = operator_on_dict(total_loss_dict, loss_dict, operator.add) (metrics, valid, metrics_mv, valid_mv) = self._eval_metrics(data) total_metrics += metrics total_metrics_valid += valid total_metrics_mv += metrics_mv total_metrics_valid_mv += valid_mv batch_size = target.shape[0] if (num_samples == 0): total_metrics_runningavg += metrics else: total_metrics_runningavg = ((total_metrics_runningavg * (num_samples / (num_samples + batch_size))) + (metrics * (batch_size / (num_samples + batch_size)))) num_samples += batch_size if ((batch_idx % self.log_step) == 0): self.logger.debug(f'Evaluating {self._progress(batch_idx)} Loss: {(loss.item() / (batch_idx + 1)):.6f} Metrics: {list((total_metrics / (batch_idx + 1)))}') if (batch_idx == self.len_data): break log = {'loss': (total_loss / self.len_data), 'metrics': self.save_digits((total_metrics / total_metrics_valid).tolist()), 'metrics_mv': self.save_digits((total_metrics_mv / total_metrics_valid_mv).tolist()), 'metrics_correct': self.save_digits(total_metrics_runningavg.tolist()), 'valid_batches': total_metrics_valid[0], 'valid_batches_mv': total_metrics_valid_mv[0]} for (loss_component, v) in total_loss_dict.items(): log[f'loss_{loss_component}'] = (v.item() / self.len_data) return log def save_digits(self, input_list): return [float('{:.3f}'.format(i)) for i in input_list] def _progress(self, batch_idx): base = '[{}/{} ({:.0f}%)]' if hasattr(self.data_loader, 'n_samples'): current = (batch_idx * self.data_loader.batch_size) total = self.data_loader.n_samples else: current = batch_idx total = self.len_data return base.format(current, total, ((100.0 * current) / total))
def to(data, device): if isinstance(data, dict): return {k: to(data[k], device) for k in data.keys()} elif isinstance(data, list): return [to(v, device) for v in data] else: return data.to(device)
def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO): '\n Setup logging configuration\n ' log_config = Path(log_config) if log_config.is_file(): config = read_json(log_config) for (_, handler) in config['handlers'].items(): if ('filename' in handler): handler['filename'] = str((save_dir / handler['filename'])) logging.config.dictConfig(config) else: print('Warning: logging configuration file is not found in {}.'.format(log_config)) logging.basicConfig(level=default_level)
class TensorboardWriter(): def __init__(self, log_dir, logger, enabled): self.writer = None self.selected_module = '' if enabled: log_dir = str(log_dir) succeeded = False for module in ['torch.utils.tensorboard', 'tensorboardX']: try: self.writer = importlib.import_module(module).SummaryWriter(log_dir) succeeded = True break except ImportError: succeeded = False self.selected_module = module if (not succeeded): message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on this machine. Please install either TensorboardX with 'pip install tensorboardx', upgrade PyTorch to version >= 1.1 for using 'torch.utils.tensorboard' or turn off the option in the 'config.json' file." logger.warning(message) self.step = 0 self.mode = '' self.tb_writer_ftns = {'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio', 'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'} self.tag_mode_exceptions = {'add_histogram', 'add_embedding'} self.timer = Timer() def set_step(self, step, mode='train'): self.mode = mode self.step = step if (step == 0): self.timer.reset() else: duration = self.timer.check() self.add_scalar('steps_per_sec', (1 / duration)) def __getattr__(self, name): '\n If visualization is configured to use:\n return add_data() methods of tensorboard with additional information (step, tag) added.\n Otherwise:\n return a blank function handle that does nothing\n ' if (name in self.tb_writer_ftns): add_data = getattr(self.writer, name, None) def wrapper(tag, data, *args, **kwargs): if (add_data is not None): if (name not in self.tag_mode_exceptions): tag = '{}/{}'.format(tag, self.mode) add_data(tag, data, self.step, *args, **kwargs) return wrapper else: try: attr = object.__getattr__(name) except AttributeError: raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name)) return attr
class CrossCueFusion(nn.Module): def __init__(self, cv_hypo_num=32, mid_dim=32, input_size=(256, 512)): super().__init__() self.cv_hypo_num = cv_hypo_num self.mid_dim = mid_dim self.residual_connection = True self.is_reduce = (True if (input_size[1] > 650) else False) if (not self.is_reduce): self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) else: self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.kq_dim = ((self.mid_dim // 4) if (self.mid_dim > 128) else self.mid_dim) self.lin_mono_k = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_mono_q = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_mono_v = nn.Conv2d(self.mid_dim, self.mid_dim, kernel_size=1) self.lin_multi_k = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_multi_q = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_multi_v = nn.Conv2d(self.mid_dim, self.mid_dim, kernel_size=1) self.softmax = nn.Softmax(dim=(- 1)) if self.residual_connection: self.mono_reg = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_reg = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=1, padding=0), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.gamma = nn.Parameter(torch.zeros(1)) def forward(self, mono_pseudo_cost, cost_volume): (init_b, init_c, init_h, init_w) = cost_volume.shape mono_feat = self.mono_expand(mono_pseudo_cost) multi_feat = self.multi_expand(cost_volume) (b, c, h, w) = multi_feat.shape mono_q = self.lin_mono_q(mono_feat).view(b, (- 1), (h * w)).permute(0, 2, 1) mono_k = self.lin_mono_k(mono_feat).view(b, (- 1), (h * w)) mono_score = torch.bmm(mono_q, mono_k) mono_atten = self.softmax(mono_score) multi_q = self.lin_multi_q(multi_feat).view(b, (- 1), (h * w)).permute(0, 2, 1) multi_k = self.lin_multi_k(multi_feat).view(b, (- 1), (h * w)) multi_score = torch.bmm(multi_q, multi_k) multi_atten = self.softmax(multi_score) mono_v = self.lin_mono_v(mono_feat).view(b, (- 1), (h * w)) mono_out = torch.bmm(mono_v, multi_atten.permute(0, 2, 1)) mono_out = mono_out.view(b, self.mid_dim, h, w) multi_v = self.lin_multi_v(multi_feat).view(b, (- 1), (h * w)) multi_out = torch.bmm(multi_v, mono_atten.permute(0, 2, 1)) multi_out = multi_out.view(b, self.mid_dim, h, w) fused = torch.cat((multi_out, mono_out), dim=1) fused = torch.nn.functional.interpolate(fused, size=(init_h, init_w)) if self.residual_connection: mono_residual = self.mono_reg(mono_pseudo_cost) multi_residual = self.multi_reg(cost_volume) fused_cat = torch.cat((mono_residual, multi_residual), dim=1) fused = (fused_cat + (self.gamma * fused)) return fused
class MultiGuideMono(nn.Module): def __init__(self, cv_hypo_num=32, mid_dim=32, input_size=(256, 512)): super().__init__() self.cv_hypo_num = cv_hypo_num self.mid_dim = mid_dim self.residual_connection = True self.is_reduce = (True if (input_size[1] > 650) else False) if (not self.is_reduce): self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) else: self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.kq_dim = ((self.mid_dim // 4) if (self.mid_dim > 128) else self.mid_dim) self.lin_mono_v = nn.Conv2d(self.mid_dim, self.mid_dim, kernel_size=1) self.lin_multi_k = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_multi_q = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.softmax = nn.Softmax(dim=(- 1)) if self.residual_connection: self.mono_reg = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.gamma = nn.Parameter(torch.zeros(1)) def forward(self, mono_pseudo_cost, cost_volume): (init_b, init_c, init_h, init_w) = cost_volume.shape mono_feat = self.mono_expand(mono_pseudo_cost) multi_feat = self.multi_expand(cost_volume) (b, c, h, w) = multi_feat.shape multi_q = self.lin_multi_q(multi_feat).view(b, (- 1), (h * w)).permute(0, 2, 1) multi_k = self.lin_multi_k(multi_feat).view(b, (- 1), (h * w)) multi_score = torch.bmm(multi_q, multi_k) multi_atten = self.softmax(multi_score) mono_v = self.lin_mono_v(mono_feat).view(b, (- 1), (h * w)) mono_out = torch.bmm(mono_v, multi_atten.permute(0, 2, 1)) mono_out = mono_out.view(b, self.mid_dim, h, w) fused = torch.nn.functional.interpolate(mono_out, size=(init_h, init_w)) if self.residual_connection: mono_residual = self.mono_reg(mono_pseudo_cost) fused = (mono_residual + (self.gamma * fused)) return fused
class MonoGuideMulti(nn.Module): def __init__(self, cv_hypo_num=32, mid_dim=32, input_size=(256, 512)): super().__init__() self.cv_hypo_num = cv_hypo_num self.mid_dim = mid_dim self.residual_connection = True self.is_reduce = (True if (input_size[1] > 650) else False) if (not self.is_reduce): self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) else: self.mono_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.multi_expand = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True), nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.kq_dim = ((self.mid_dim // 4) if (self.mid_dim > 128) else self.mid_dim) self.lin_mono_k = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_mono_q = nn.Conv2d(self.mid_dim, self.kq_dim, kernel_size=1) self.lin_multi_v = nn.Conv2d(self.mid_dim, self.mid_dim, kernel_size=1) self.softmax = nn.Softmax(dim=(- 1)) if self.residual_connection: self.multi_reg = nn.Sequential(nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=1, padding=0), nn.BatchNorm2d(self.mid_dim), nn.ReLU(inplace=True)) self.gamma = nn.Parameter(torch.zeros(1)) def forward(self, mono_pseudo_cost, cost_volume): (init_b, init_c, init_h, init_w) = cost_volume.shape mono_feat = self.mono_expand(mono_pseudo_cost) multi_feat = self.multi_expand(cost_volume) (b, c, h, w) = multi_feat.shape mono_q = self.lin_mono_q(mono_feat).view(b, (- 1), (h * w)).permute(0, 2, 1) mono_k = self.lin_mono_k(mono_feat).view(b, (- 1), (h * w)) mono_score = torch.bmm(mono_q, mono_k) mono_atten = self.softmax(mono_score) multi_v = self.lin_multi_v(multi_feat).view(b, (- 1), (h * w)) multi_out = torch.bmm(multi_v, mono_atten.permute(0, 2, 1)) multi_out = multi_out.view(b, self.mid_dim, h, w) fused = torch.nn.functional.interpolate(multi_out, size=(init_h, init_w)) if self.residual_connection: multi_residual = self.multi_reg(cost_volume) fused = (multi_residual + (self.gamma * fused)) return fused
class DyMultiDepthModel(nn.Module): def __init__(self, inv_depth_min_max=(0.33, 0.0025), cv_depth_steps=32, pretrain_mode=False, pretrain_dropout=0.0, pretrain_dropout_mode=0, augmentation=None, use_mono=True, use_stereo=False, use_ssim=True, sfcv_mult_mask=True, simple_mask=False, mask_use_cv=True, mask_use_feats=True, cv_patch_size=3, depth_large_model=False, no_cv=False, freeze_backbone=True, freeze_module=(), checkpoint_location=None, mask_cp_loc=None, depth_cp_loc=None, fusion_type='ccf_fusion', input_size=[256, 512], ccf_mid_dim=32, use_img_in_depthnet=True, backbone_type='resnet18'): '\n :param inv_depth_min_max: Min / max (inverse) depth. (Default=(0.33, 0.0025))\n :param cv_depth_steps: Number of depth steps for the cost volume. (Default=32)\n :param pretrain_mode: Which pretrain mode to use:\n 0 / False: Run full network.\n 1 / True: Only run depth module. In this mode, dropout can be activated to zero out patches from the\n unmasked cost volume. Dropout was not used for the paper.\n 2: Only run mask module. In this mode, the network will return the mask as the main result.\n 3: Only run depth module, but use the auxiliary masks to mask the cost volume. This mode was not used in\n the paper. (Default=0)\n :param pretrain_dropout: Dropout rate used in pretrain_mode=1. (Default=0)\n :param augmentation: Which augmentation module to use. "mask"=MaskAugmentation, "depth"=DepthAugmentation. The\n exact way to use this is very context dependent. Refer to the training scripts for more details. (Default="none")\n :param use_mono: Use monocular frames during the forward pass. (Default=True)\n :param use_stereo: Use stereo frame during the forward pass. (Default=False)\n :param use_ssim: Use SSIM during cost volume computation. (Default=True)\n :param sfcv_mult_mask: For the single frame cost volumes: If a pixel does not have a valid reprojection at any \n depth step, all depths get invalidated. (Default=True)\n :param simple_mask: Use the standard cost volume instead of multiple single frame cost volumes in the mask \n module. (Default=False) \n :param cv_patch_size: Patchsize, over which the ssim errors get averaged. (Default=3)\n :param freeze_module: Freeze given string list of modules. (Default=())\n :param checkpoint_location: Load given list of checkpoints. (Default=None)\n :param mask_cp_loc: Load list of checkpoints for the mask module. (Default=None)\n :param depth_cp_loc: Load list of checkpoints for the depth module. (Default=None)\n ' super().__init__() self.inv_depth_min_max = inv_depth_min_max self.cv_depth_steps = cv_depth_steps self.use_mono = use_mono self.use_stereo = use_stereo self.use_ssim = use_ssim self.sfcv_mult_mask = sfcv_mult_mask self.pretrain_mode = int(pretrain_mode) self.pretrain_dropout = pretrain_dropout self.pretrain_dropout_mode = pretrain_dropout_mode self.augmentation = augmentation self.simple_mask = simple_mask self.mask_use_cv = mask_use_cv self.mask_use_feats = mask_use_feats self.cv_patch_size = cv_patch_size self.no_cv = no_cv self.depth_large_model = depth_large_model self.checkpoint_location = checkpoint_location self.mask_cp_loc = mask_cp_loc self.depth_cp_loc = depth_cp_loc self.freeze_module = freeze_module self.freeze_backbone = freeze_backbone self.fusion_type = fusion_type self.input_size = input_size self.ccf_mid_dim = ccf_mid_dim self.use_img_in_depthnet = use_img_in_depthnet self.backbone_type = backbone_type assert (self.backbone_type in ['resnet18', 'efficientnetb5']) self.depthmodule_in_chn = self.cv_depth_steps if (fusion_type == 'ccf_fusion'): self.extra_input_dim = 0 self.fusion_module = CrossCueFusion(cv_hypo_num=self.cv_depth_steps, mid_dim=32, input_size=self.input_size) self.depthmodule_in_chn = (self.ccf_mid_dim * 2) elif (fusion_type == 'mono_guide_multi'): self.extra_input_dim = 0 self.fusion_module = MonoGuideMulti(cv_hypo_num=self.cv_depth_steps, mid_dim=32, input_size=self.input_size) self.depthmodule_in_chn = self.ccf_mid_dim elif (fusion_type == 'multi_guide_mono'): self.extra_input_dim = 0 self.fusion_module = MultiGuideMono(cv_hypo_num=self.cv_depth_steps, mid_dim=32, input_size=self.input_size) self.depthmodule_in_chn = self.ccf_mid_dim if (self.backbone_type == 'resnet18'): self._feature_extractor = ResnetEncoder(num_layers=18, pretrained=True) elif (self.backbone_type == 'efficientnetb5'): self._feature_extractor = EfficientNetEncoder(pretrained=True) if self.freeze_backbone: for p in self._feature_extractor.parameters(True): p.requires_grad_(False) self.cv_module = CostVolumeModule(use_mono=use_mono, use_stereo=use_stereo, use_ssim=use_ssim, sfcv_mult_mask=self.sfcv_mult_mask, patch_size=cv_patch_size) self.depth_module = DepthModule(self.depthmodule_in_chn, feature_channels=self._feature_extractor.num_ch_enc, large_model=self.depth_large_model, use_input_img=self.use_img_in_depthnet) self.mono_module = MonoDepthModule(extra_input_dim=self.extra_input_dim, feature_channels=self._feature_extractor.num_ch_enc, large_model=self.depth_large_model) if (self.checkpoint_location is not None): if (not isinstance(checkpoint_location, list)): checkpoint_location = [checkpoint_location] for cp in checkpoint_location: checkpoint = torch.load(cp, map_location=torch.device('cpu')) checkpoint_state_dict = checkpoint['state_dict'] checkpoint_state_dict = filter_state_dict(checkpoint_state_dict, (checkpoint['arch'] == 'DataParallel')) self.load_state_dict(checkpoint_state_dict, strict=True) for module_name in self.freeze_module: module = self.__getattr__((module_name + '_module')) module.eval() for param in module.parameters(True): param.requires_grad_(False) if (self.augmentation == 'depth'): self.augmenter = DepthAugmentation() elif (self.augmentation == 'mask'): self.augmenter = MaskAugmentation() else: self.augmenter = None def forward(self, data_dict): keyframe = data_dict['keyframe'] data_dict['inv_depth_min'] = keyframe.new_tensor([self.inv_depth_min_max[0]]) data_dict['inv_depth_max'] = keyframe.new_tensor([self.inv_depth_min_max[1]]) data_dict['cv_depth_steps'] = keyframe.new_tensor([self.cv_depth_steps], dtype=torch.int32) with torch.no_grad(): data_dict = self.cv_module(data_dict) if ((self.augmenter is not None) and self.training): self.augmenter(data_dict) data_dict['image_features'] = self._feature_extractor((data_dict['keyframe'] + 0.5)) data_dict['cost_volume_init'] = data_dict['cost_volume'] data_dict = self.mono_module(data_dict) data_dict['predicted_inverse_depths_mono'] = [(((1 - pred) * self.inv_depth_min_max[1]) + (pred * self.inv_depth_min_max[0])) for pred in data_dict['predicted_inverse_depths_mono']] mono_depth_pred = torch.clamp((1.0 / data_dict['predicted_inverse_depths_mono'][0]), min=0.001, max=80.0).detach() (b, c, h, w) = keyframe.shape pseudo_mono_cost = self.pseudocost_from_mono(mono_depth_pred, depth_hypothesis=data_dict['cv_bin_steps'].view(1, (- 1), 1, 1).expand(b, (- 1), h, w).detach()).detach() if self.training: if (self.pretrain_dropout_mode == 0): cv_mask = keyframe.new_ones(b, 1, (h // 8), (w // 8), requires_grad=False) F.dropout(cv_mask, p=(1 - self.pretrain_dropout), training=self.training, inplace=True) cv_mask = (cv_mask != 0).float() cv_mask = F.upsample(cv_mask, (h, w)) else: cv_mask = keyframe.new_ones(b, 1, 1, 1, requires_grad=False) F.dropout(cv_mask, p=(1 - self.pretrain_dropout), training=self.training, inplace=True) cv_mask = cv_mask.expand((- 1), (- 1), h, w) else: cv_mask = keyframe.new_zeros(b, 1, h, w, requires_grad=False) data_dict['cv_mask'] = cv_mask data_dict['cost_volume'] = ((1 - data_dict['cv_mask']) * self.fusion_module(pseudo_mono_cost, data_dict['cost_volume'])) data_dict = self.depth_module(data_dict) data_dict['predicted_inverse_depths'] = [(((1 - pred) * self.inv_depth_min_max[1]) + (pred * self.inv_depth_min_max[0])) for pred in data_dict['predicted_inverse_depths']] if ((self.augmenter is not None) and self.training): self.augmenter.revert(data_dict) data_dict['result'] = data_dict['predicted_inverse_depths'][0] data_dict['result_mono'] = data_dict['predicted_inverse_depths_mono'][0] data_dict['mask'] = data_dict['cv_mask'] return data_dict def pseudocost_from_mono(self, monodepth, depth_hypothesis): abs_depth_diff = torch.abs((monodepth - depth_hypothesis)) min_diff_index = torch.argmin(abs_depth_diff, dim=1, keepdim=True) pseudo_cost = depth_hypothesis.new_zeros(depth_hypothesis.shape) ones = depth_hypothesis.new_ones(depth_hypothesis.shape) pseudo_cost.scatter_(dim=1, index=min_diff_index, src=ones) return pseudo_cost def find_mincost_depth(self, cost_volume, depth_hypos): argmax = torch.argmax(cost_volume, dim=1, keepdim=True) mincost_depth = torch.gather(input=depth_hypos, dim=1, index=argmax) return mincost_depth
def completeness_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None): return torch.mean((depth_prediction != 0).to(dtype=torch.float32))
def covered_gt_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None): gt_mask = (depth_gt != 0) return mask_mean((depth_prediction != 0).to(dtype=torch.float32), gt_mask)
def sc_inv_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None): '\n Computes scale inveriant metric described in (14)\n :param depth_prediction: Depth prediction computed by the network\n :param depth_gt: GT Depth\n :param roi: Specify a region of interest on which the metric should be computed\n :return: metric (mean over batch_size)\n ' (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) n = (depth_gt.shape[2] * depth_gt.shape[3]) E = (torch.log(depth_prediction) - torch.log(depth_gt)) E[torch.isnan(E)] = 0 batch_metric = torch.sqrt((((1 / n) * torch.sum((E ** 2), dim=[2, 3])) - ((1 / (n ** 2)) * (torch.sum(E, dim=[2, 3]) ** 2)))) batch_metric[torch.isnan(batch_metric)] = 0 result = torch.mean(batch_metric) return result
def l1_rel_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None): '\n Computes the L1-rel metric described in (15)\n :param depth_prediction: Depth prediction computed by the network\n :param depth_gt: GT Depth\n :param roi: Specify a region of interest on which the metric should be computed\n :return: metric (mean over batch_size)\n ' (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return torch.mean((torch.abs((depth_prediction - depth_gt)) / depth_gt))
def l1_inv_metric(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, roi=None, max_distance=None): '\n Computes the L1-inv metric described in (16)\n :param depth_prediction: Depth prediction computed by the network\n :param depth_gt: GT Depth\n :param roi: Specify a region of interest on which the metric should be computed\n :return: metric (mean over batch_size)\n ' (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) return torch.mean(torch.abs((depth_prediction - depth_gt)))
def a1_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)) return torch.mean((thresh < 1.25).type(torch.float))
def a2_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)).type(torch.float) return torch.mean((thresh < (1.25 ** 2)).type(torch.float))
def a3_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)).type(torch.float) return torch.mean((thresh < (1.25 ** 3)).type(torch.float))
def rmse_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) se = ((depth_prediction - depth_gt) ** 2) return torch.mean(torch.sqrt(torch.mean(se, dim=[1, 2, 3])))
def rmse_log_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) sle = ((torch.log(depth_prediction) - torch.log(depth_gt)) ** 2) return torch.mean(torch.sqrt(torch.mean(sle, dim=[1, 2, 3])))
def abs_rel_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return torch.mean((torch.abs((depth_prediction - depth_gt)) / depth_gt))
def sq_rel_metric(data_dict: dict, roi=None, max_distance=None): depth_prediction = data_dict['result'] depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return torch.mean((((depth_prediction - depth_gt) ** 2) / depth_gt))
def find_mincost_depth(cost_volume, depth_hypos): argmax = torch.argmax(cost_volume, dim=1, keepdim=True) mincost_depth = torch.gather(input=depth_hypos, dim=1, index=argmax) return mincost_depth
def a1_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return a1_base(depth_prediction, depth_gt, mask)
def a2_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return a2_base(depth_prediction, depth_gt, mask)
def a3_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return a3_base(depth_prediction, depth_gt, mask)
def rmse_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return rmse_base(depth_prediction, depth_gt, mask)
def rmse_log_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return rmse_log_base(depth_prediction, depth_gt, mask)
def abs_rel_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return abs_rel_base(depth_prediction, depth_gt, mask)
def sq_rel_sparse_metric(data_dict: dict, roi=None, max_distance=None, pred_all_valid=True, use_cvmask=False, eval_mono=False): depth_prediction = (data_dict['result_mono'] if eval_mono else data_dict['result']) depth_gt = data_dict['target'] (depth_prediction, depth_gt) = preprocess_roi(depth_prediction, depth_gt, roi) mask = get_mask(depth_prediction, depth_gt, max_distance=max_distance, pred_all_valid=pred_all_valid) if use_cvmask: mask |= (~ (data_dict['mvobj_mask'] > 0.5)) (depth_prediction, depth_gt) = get_positive_depth(depth_prediction, depth_gt) (depth_prediction, depth_gt) = get_absolute_depth(depth_prediction, depth_gt, max_distance) return sq_rel_base(depth_prediction, depth_gt, mask)
def save_results(path, name, img, gt_depth, pred_depth, validmask, cv_mask, costvolume): savepath = os.path.join(path, name) device = img.device (bs, _, h, w) = img.shape img = (img[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() + 0.5) gt_depth = gt_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() gt_depth[(gt_depth == 80)] = 0 pred_depth = pred_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() validmask = validmask[(0, 0, ...)].detach().cpu().numpy() cv_mask = cv_mask[(0, 0, ...)].detach().cpu().numpy() img = img (error_map, _) = get_error_map_value(pred_depth, gt_depth, grag_crop=False, median_scaling=False) errorpil = numpy_intensitymap_to_pcolor(error_map, vmin=0, vmax=0.5, colormap='jet') pred_pil = numpy_intensitymap_to_pcolor(pred_depth) gt_pil = numpy_intensitymap_to_pcolor(gt_depth) img_pil = numpy_rgb_to_pil(img) validmask_pil = Image.fromarray((validmask * 255.0).astype(np.uint8)) cv_mask_pil = Image.fromarray((cv_mask * 255.0).astype(np.uint8)) print(bs, h, w) depths = (1 / torch.linspace(0.0025, 0.33, 32, device=device)).cuda().view(1, (- 1), 1, 1).expand(bs, (- 1), h, w) cost_volume_depth = find_mincost_depth(costvolume, depths) cost_volume_depth = cost_volume_depth[(0, ...)].permute(1, 2, 0).detach().cpu().numpy() cv_depth_pil = numpy_intensitymap_to_pcolor(cost_volume_depth) (h, w, _) = gt_depth.shape dst = Image.new('RGB', (w, (h * 3))) dst.paste(img_pil, (0, 0)) dst.paste(pred_pil, (0, h)) dst.paste(gt_pil, (0, (2 * h))) dst.save(savepath) print(f'saved to {savepath}')
def a1_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return a1_sparse_metric(data_dict, roi, max_distance, False)
def a2_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return a2_sparse_metric(data_dict, roi, max_distance, False)
def a3_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return a3_sparse_metric(data_dict, roi, max_distance, False)
def rmse_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return rmse_sparse_metric(data_dict, roi, max_distance, False)
def rmse_log_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return rmse_log_sparse_metric(data_dict, roi, max_distance, False)
def abs_rel_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return abs_rel_sparse_metric(data_dict, roi, max_distance, False)
def sq_rel_sparse_onlyvalid_metric(data_dict: dict, roi=None, max_distance=None): return sq_rel_sparse_metric(data_dict, roi, max_distance, False)
def a1_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return a1_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def a2_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return a2_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def a3_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return a3_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def rmse_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return rmse_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def rmse_log_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return rmse_log_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def abs_rel_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return abs_rel_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def sq_rel_sparse_onlydynamic_metric(data_dict: dict, roi=None, max_distance=None): return sq_rel_sparse_metric(data_dict, roi, max_distance, use_cvmask=True)
def a1_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)) return mask_mean((thresh < 1.25).type(torch.float), mask)
def a2_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): depth_gt[mask] = 1 depth_prediction[mask] = 1 thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)).type(torch.float) return mask_mean((thresh < (1.25 ** 2)).type(torch.float), mask)
def a3_base(depth_prediction: torch.Tensor, depth_gt: torch.Tensor, mask): depth_gt[mask] = 1 depth_prediction[mask] = 1 thresh = torch.max((depth_gt / depth_prediction), (depth_prediction / depth_gt)).type(torch.float) return mask_mean((thresh < (1.25 ** 3)).type(torch.float), mask)