code
stringlengths
17
6.64M
def blocks_tags(obj): results = [] if isinstance(obj, PIL.Image.Image): results.append(pil_to_html(obj)) elif isinstance(obj, (str, int, float)): results.append('<div>') results.append(html_module.escape(str(obj))) results.append('</div>') elif isinstance(obj, IPython.display.HTML): results.append(obj.data) elif isinstance(obj, dict): results.extend(blocks_tags([(k, v) for (k, v) in obj.items()])) elif hasattr(obj, '__iter__'): (blockstart, blockend, tstart, tend, rstart, rend, cstart, cend) = [('<div style="display:inline-block;text-align:center;line-height:1;' + 'vertical-align:top;padding:1px">'), '</div>', '<table style="display:inline-table">', '</table>', '<tr style="padding:0">', '</tr>', '<td style="text-align:left; vertical-align:top; padding:1px">', '</td>'] needs_end = False table_mode = False for (i, line) in enumerate(obj): if (i == 0): needs_end = True if isinstance(line, tuple): table_mode = True results.append(tstart) else: results.append(blockstart) if table_mode: results.append(rstart) if ((not isinstance(line, str)) and hasattr(line, '__iter__')): for cell in line: results.append(cstart) results.extend(blocks_tags(cell)) results.append(cend) else: results.append(cstart) results.extend(blocks_tags(line)) results.append(cend) results.append(rend) else: results.extend(blocks_tags(line)) if needs_end: results.append(((table_mode and tend) or blockend)) return results
def pil_to_b64(img, format='png'): buffered = io.BytesIO() img.save(buffered, format=format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
def pil_to_url(img, format='png'): return ('data:image/%s;base64,%s' % (format, pil_to_b64(img, format)))
def pil_to_html(img, margin=1): mattr = (' style="margin:%dpx"' % margin) return ('<img src="%s"%s>' % (pil_to_url(img), mattr))
def a(x, cols=None): global g_buffer if (g_buffer is None): g_buffer = [] g_buffer.append(x) if ((cols is not None) and (len(g_buffer) >= cols)): flush()
def reset(): global g_buffer g_buffer = []
def flush(*args, **kwargs): global g_buffer if (g_buffer is not None): x = g_buffer g_buffer = None display(blocks(x, *args, **kwargs))
def show(x=None, *args, **kwargs): flush(*args, **kwargs) if (x is not None): display(blocks(x, *args, **kwargs))
class CallableModule(types.ModuleType): def __init__(self): types.ModuleType.__init__(self, __name__) self.__dict__.update(sys.modules[__name__].__dict__) def __call__(self, x=None, *args, **kwargs): show(x, *args, **kwargs)
class LinePlotter(object): def __init__(self, writer, tag): self.writer = writer self.tag = tag def plot(self, x, data, walltime=None): if (not hasattr(self, 'plot_data')): self.plot_data = {'X': [], 'Y': []} self.plot_data['X'].append(x) self.plot_data['Y'].append(data) self.writer.add_scalar(self.tag, data, x, walltime) def save_final_plot(self, save_dir): save_path = os.path.join(save_dir, '{}'.format(self.tag.replace('/', '_'))) os.makedirs(save_path, exist_ok=True) if hasattr(self, 'plot_data'): save_data = dict(X=np.array(self.plot_data['X']), Y=np.array(self.plot_data['Y'])) np.savez((save_path + '.npz'), **save_data) logging.info('Saved to {}'.format(save_path))
class ImageGridPlotter(object): def __init__(self, writer, ncols, grid=False): self.ncols = ncols self.writer = writer self.grid = grid def plot(self, visuals, niter=0): ncols = self.ncols ncols = min(ncols, len(visuals)) if self.grid: images = [] labels = '|' idx = 0 for (label, im) in visuals.items(): images.append(im[0]) labels += (label + '|') idx += 1 if (((idx % ncols) == 0) and (idx > 0)): labels += '||' blank_image = torch.ones_like(images[0]) while ((idx % ncols) != 0): images.append(blank_image) idx += 1 labels += ' |' self.writer.add_text('Visuals Labels', labels, niter) x = vutils.make_grid(images, normalize=True, nrow=ncols) self.writer.add_image('Visuals', x, niter) else: for (label, im) in visuals.items(): x = vutils.make_grid([im[0]], normalize=True) self.writer.add_image(label, x, niter)
def remove_prefix(s, prefix): if s.startswith(prefix): s = s[len(prefix):] return s
def get_subset_dict(in_dict, keys): if len(keys): subset = OrderedDict() for key in keys: subset[key] = in_dict[key] else: subset = in_dict return subset
def datestring(): return time.strftime('%Y-%m-%d %H:%M:%S')
def format_str_one(v, float_prec=6, int_pad=1): if (isinstance(v, torch.Tensor) and (v.numel() == 1)): v = v.item() if isinstance(v, float): return (('{:.' + str(float_prec)) + 'f}').format(v) if (isinstance(v, int) and int_pad): return (('{:0' + str(int_pad)) + 'd}').format(v) return str(v)
def format_str(*args, format_opts={}, **kwargs): ss = [format_str_one(arg, **format_opts) for arg in args] for (k, v) in kwargs.items(): ss.append('{}: {}'.format(k, format_str_one(v, **format_opts))) return '\t'.join(ss)
def complete_device(device): if (not torch.cuda.is_available()): return torch.device('cpu') if (type(device) == str): device = torch.device(device) if ((device.type == 'cuda') and (device.index is None)): return torch.device(device.type, torch.cuda.current_device()) return device
def check_timestamp(checkpoint_path, timestamp_path): " returns True if checkpoint_path timestamp is different\n from timestamp path or timestamp_path doesn't exist" if (not os.path.isfile(timestamp_path)): print('No timestamp found') return True newtime = os.path.getmtime(checkpoint_path) newtime = datetime.fromtimestamp(newtime).strftime('%Y-%m-%d %H:%M:%S') with open(timestamp_path) as f: oldtime = f.readlines()[0].strip() if (oldtime != newtime): print('Timestamp out of date') return True print('Timestamp is correct') return False
def update_timestamp(checkpoint_path, timestamp_path): ' write the last modified date of checkpoint_path to the\n the file timestamp_path ' newtime = os.path.getmtime(checkpoint_path) newtime = datetime.fromtimestamp(newtime).strftime('%Y-%m-%d %H:%M:%S') with open(timestamp_path, 'w') as f: f.write(('%s' % newtime))
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count)
class Visualizer(): def __init__(self, opt, loss_names, visual_names=None): from . import tensorboard_utils as tb_utils self.name = opt.name self.opt = opt self.visual_names = visual_names tb_path = os.path.join('runs', self.name) if os.path.isdir(tb_path): logging.info(('Found existing tensorboard history at %s' % tb_path)) if (not opt.overwrite_config): logging.info('Use --overwrite_config to write to existing tensorboard history') exit(0) self.writer = SummaryWriter(logdir=tb_path) self.plotters = [] for name in loss_names: setattr(self, (name + '_plotter'), tb_utils.LinePlotter(self.writer, name.replace('_', '/', 1))) self.plotters.append(getattr(self, (name + '_plotter'))) self.imgrid = tb_utils.ImageGridPlotter(self.writer, ncols=5, grid=True) def display_current_results(self, visuals, epoch): self.imgrid.plot(visuals, epoch) def plot_current_losses(self, niter, losses): for (k, v) in losses.items(): plotter = getattr(self, (k + '_plotter')) plotter.plot(niter, v) def print_current_losses(self, epoch, iters, total_steps, losses, t, t_data, prefix=''): message = ('(epoch: %d, iters: %.3f, time: %.3f, data: %.3f) ' % (epoch, iters, t, t_data)) message += prefix message += ' ' for (k, v) in losses.items(): message += ('%s: %.3f, ' % (k, v)) logging.info(('%s' % message)) logging.info(('Total batches: %0.2f k\n' % (total_steps / 1000))) def save_final_plots(self): save_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'visualize') for plotter in self.plotters: plotter.save_final_plot(save_dir)
def CreateDataset(opt): 'loads dataset class' if ((opt.arch == 'vae') or (opt.arch == 'gan')): from data.grasp_sampling_data import GraspSamplingData dataset = GraspSamplingData(opt) else: from data.grasp_evaluator_data import GraspEvaluatorData dataset = GraspEvaluatorData(opt) return dataset
class DataLoader(): 'multi-threaded data loading' def __init__(self, opt): self.opt = opt self.dataset = CreateDataset(opt) self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.num_objects_per_batch, shuffle=(not opt.serial_batches), num_workers=int(opt.num_threads), collate_fn=collate_fn) def __len__(self): return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): for (i, data) in enumerate(self.dataloader): if ((i * self.opt.batch_size) >= self.opt.max_dataset_size): break (yield data)
def create_model(opt): from .grasp_net import GraspNetModel model = GraspNetModel(opt) return model
class GraspNetModel(): ' Class for training Model weights\n\n :args opt: structure containing configuration params\n e.g.,\n --dataset_mode -> sampling / evaluation)\n ' def __init__(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.is_train = opt.is_train if (self.gpu_ids and (self.gpu_ids[0] >= torch.cuda.device_count())): self.gpu_ids[0] = (torch.cuda.device_count() - 1) self.device = (torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')) self.save_dir = join(opt.checkpoints_dir, opt.name) self.optimizer = None self.loss = None self.pcs = None self.grasps = None self.net = networks.define_classifier(opt, self.gpu_ids, opt.arch, opt.init_type, opt.init_gain, self.device) self.criterion = networks.define_loss(opt) self.confidence_loss = None if (self.opt.arch == 'vae'): self.kl_loss = None self.reconstruction_loss = None elif (self.opt.arch == 'gan'): self.reconstruction_loss = None else: self.classification_loss = None if self.is_train: self.optimizer = torch.optim.Adam(self.net.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) self.scheduler = networks.get_scheduler(self.optimizer, opt) if ((not self.is_train) or opt.continue_train): self.load_network(opt.which_epoch, self.is_train) def set_input(self, data): input_pcs = torch.from_numpy(data['pc']).contiguous() input_grasps = torch.from_numpy(data['grasp_rt']).float() if (self.opt.arch == 'evaluator'): targets = torch.from_numpy(data['labels']).float() else: targets = torch.from_numpy(data['target_cps']).float() self.pcs = input_pcs.to(self.device).requires_grad_(self.is_train) self.grasps = input_grasps.to(self.device).requires_grad_(self.is_train) self.targets = targets.to(self.device) def generate_grasps(self, pcs, z=None): with torch.no_grad(): return self.net.module.generate_grasps(pcs, z=z) def evaluate_grasps(self, pcs, gripper_pcs): (success, _) = self.net.module(pcs, gripper_pcs) return torch.sigmoid(success) def forward(self): return self.net(self.pcs, self.grasps, train=self.is_train) def backward(self, out): if (self.opt.arch == 'vae'): (predicted_cp, confidence, mu, logvar) = out predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device) (self.reconstruction_loss, self.confidence_loss) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device) self.kl_loss = (self.opt.kl_loss_weight * self.criterion[0](mu, logvar, device=self.device)) self.loss = ((self.kl_loss + self.reconstruction_loss) + self.confidence_loss) elif (self.opt.arch == 'gan'): (predicted_cp, confidence) = out predicted_cp = utils.transform_control_points(predicted_cp, predicted_cp.shape[0], device=self.device) (self.reconstruction_loss, self.confidence_loss) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device) self.loss = (self.reconstruction_loss + self.confidence_loss) elif (self.opt.arch == 'evaluator'): (grasp_classification, confidence) = out (self.classification_loss, self.confidence_loss) = self.criterion(grasp_classification.squeeze(), self.targets, confidence, self.opt.confidence_weight, device=self.device) self.loss = (self.classification_loss + self.confidence_loss) self.loss.backward() def optimize_parameters(self): self.optimizer.zero_grad() out = self.forward() self.backward(out) self.optimizer.step() def load_network(self, which_epoch, train=True): 'load model from disk' save_filename = ('%s_net.pth' % which_epoch) load_path = join(self.save_dir, save_filename) net = self.net if isinstance(net, torch.nn.DataParallel): net = net.module print(('loading the model from %s' % load_path)) checkpoint = torch.load(load_path, map_location=self.device) if hasattr(checkpoint['model_state_dict'], '_metadata'): del checkpoint['model_state_dict']._metadata net.load_state_dict(checkpoint['model_state_dict']) if train: self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.scheduler.load_state_dict(checkpoint['scheduler_state_dict']) self.opt.epoch_count = checkpoint['epoch'] else: net.eval() def save_network(self, net_name, epoch_num): 'save model to disk' save_filename = ('%s_net.pth' % net_name) save_path = join(self.save_dir, save_filename) torch.save({'epoch': (epoch_num + 1), 'model_state_dict': self.net.module.cpu().state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'scheduler_state_dict': self.scheduler.state_dict()}, save_path) if ((len(self.gpu_ids) > 0) and torch.cuda.is_available()): self.net.cuda(self.gpu_ids[0]) def update_learning_rate(self): 'update learning rate (called once every epoch)' self.scheduler.step() lr = self.optimizer.param_groups[0]['lr'] print(('learning rate = %.7f' % lr)) def test(self): 'tests model\n returns: number correct and total number\n ' with torch.no_grad(): out = self.forward() (prediction, confidence) = out if (self.opt.arch == 'vae'): predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device) (reconstruction_loss, _) = self.criterion[1](predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device) return (reconstruction_loss, 1) elif (self.opt.arch == 'gan'): predicted_cp = utils.transform_control_points(prediction, prediction.shape[0], device=self.device) (reconstruction_loss, _) = self.criterion(predicted_cp, self.targets, confidence=confidence, confidence_weight=self.opt.confidence_weight, device=self.device) return (reconstruction_loss, 1) else: predicted = torch.round(torch.sigmoid(prediction)).squeeze() correct = (predicted == self.targets).sum().item() return (correct, len(self.targets))
def control_point_l1_loss_better_than_threshold(pred_control_points, gt_control_points, confidence, confidence_threshold, device='cpu'): npoints = pred_control_points.shape[1] mask = torch.greater_equal(confidence, confidence_threshold) mask_ratio = torch.mean(mask) mask = torch.repeat_interleave(mask, npoints, dim=1) p1 = pred_control_points[mask] p2 = gt_control_points[mask] return (control_point_l1_loss(p1, p2), mask_ratio)
def accuracy_better_than_threshold(pred_success_logits, gt, confidence, confidence_threshold, device='cpu'): '\n Computes average precision for the grasps with confidence > threshold.\n ' pred_classes = torch.argmax(pred_success_logits, (- 1)) correct = torch.equal(pred_classes, gt) mask = torch.squeeze(torch.greater_equal(confidence, confidence_threshold), (- 1)) positive_acc = (torch.sum(((correct * mask) * gt)) / torch.max(torch.sum((mask * gt)), torch.tensor(1))) negative_acc = (torch.sum(((correct * mask) * (1.0 - gt))) / torch.max(torch.sum((mask * (1.0 - gt))), torch.tensor(1))) return ((0.5 * (positive_acc + negative_acc)), (torch.sum(mask) / gt.shape[0]))
def control_point_l1_loss(pred_control_points, gt_control_points, confidence=None, confidence_weight=None, device='cpu'): '\n Computes the l1 loss between the predicted control points and the\n groundtruth control points on the gripper.\n ' error = torch.sum(torch.abs((pred_control_points - gt_control_points)), (- 1)) error = torch.mean(error, (- 1)) if (confidence is not None): assert (confidence_weight is not None) error *= confidence confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight) if (confidence is None): return torch.mean(error) else: return (torch.mean(error), (- confidence_term))
def classification_with_confidence_loss(pred_logit, gt, confidence, confidence_weight, device='cpu'): '\n Computes the cross entropy loss and confidence term that penalizes\n outputing zero confidence. Returns cross entropy loss and the confidence\n regularization term.\n ' classification_loss = torch.nn.functional.binary_cross_entropy_with_logits(pred_logit, gt) confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight) return (classification_loss, (- confidence_term))
def min_distance_loss(pred_control_points, gt_control_points, confidence=None, confidence_weight=None, threshold=None, device='cpu'): '\n Computes the minimum distance (L1 distance)between each gt control point \n and any of the predicted control points.\n\n Args: \n pred_control_points: tensor of (N_pred, M, 4) shape. N is the number of\n grasps. M is the number of points on the gripper.\n gt_control_points: (N_gt, M, 4)\n confidence: tensor of N_pred, tensor for the confidence of each \n prediction.\n confidence_weight: float, the weight for confidence loss.\n ' pred_shape = pred_control_points.shape gt_shape = gt_control_points.shape if (len(pred_shape) != 3): raise ValueError('pred_control_point should have len of 3. {}'.format(pred_shape)) if (len(gt_shape) != 3): raise ValueError('gt_control_point should have len of 3. {}'.format(gt_shape)) if (pred_shape != gt_shape): raise ValueError('shapes do no match {} != {}'.format(pred_shape, gt_shape)) error = (pred_control_points.unsqueeze(1) - gt_control_points.unsqueeze(0)) error = torch.sum(torch.abs(error), (- 1)) error = torch.mean(error, (- 1)) (min_distance_error, closest_index) = error.min(0) if (confidence is not None): selected_confidence = torch.nn.functional.one_hot(closest_index, num_classes=closest_index.shape[0]).float() selected_confidence *= confidence selected_confidence = torch.sum(selected_confidence, (- 1)) min_distance_error *= selected_confidence confidence_term = (torch.mean(torch.log(torch.max(confidence, torch.tensor(0.0001).to(device)))) * confidence_weight) else: confidence_term = 0.0 return (torch.mean(min_distance_error), (- confidence_term))
def min_distance_better_than_threshold(pred_control_points, gt_control_points, confidence, confidence_threshold, device='cpu'): error = (torch.expand_dims(pred_control_points, 1) - torch.expand_dims(gt_control_points, 0)) error = torch.sum(torch.abs(error), (- 1)) error = torch.mean(error, (- 1)) error = torch.min(error, (- 1)) mask = torch.greater_equal(confidence, confidence_threshold) mask = torch.squeeze(mask, dim=(- 1)) return (torch.mean(error[mask]), torch.mean(mask))
def kl_divergence(mu, log_sigma, device='cpu'): '\n Computes the kl divergence for batch of mu and log_sigma.\n ' return torch.mean(((- 0.5) * torch.sum((((1.0 + log_sigma) - (mu ** 2)) - torch.exp(log_sigma)), dim=(- 1))))
def confidence_loss(confidence, confidence_weight, device='cpu'): return (torch.mean(torch.log(torch.max(confidence, torch.tensor(1e-10).to(device)))) * confidence_weight)
def get_scheduler(optimizer, opt): if (opt.lr_policy == 'lambda'): def lambda_rule(epoch): lr_l = (1.0 - (max(0, (((epoch + 1) + 1) - opt.niter)) / float((opt.niter_decay + 1)))) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif (opt.lr_policy == 'step'): scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif (opt.lr_policy == 'plateau'): scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
def init_weights(net, init_type, init_gain): def init_func(m): classname = m.__class__.__name__ if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))): if (init_type == 'normal'): init.normal_(m.weight.data, 0.0, init_gain) elif (init_type == 'xavier'): init.xavier_normal_(m.weight.data, gain=init_gain) elif (init_type == 'kaiming'): init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif (init_type == 'orthogonal'): init.orthogonal_(m.weight.data, gain=init_gain) else: raise NotImplementedError(('initialization method [%s] is not implemented' % init_type)) elif (classname.find('BatchNorm') != (- 1)): init.normal_(m.weight.data, 1.0, init_gain) init.constant_(m.bias.data, 0.0) net.apply(init_func)
def init_net(net, init_type, init_gain, gpu_ids): if (len(gpu_ids) > 0): assert torch.cuda.is_available() net.cuda(gpu_ids[0]) net = net.cuda() net = torch.nn.DataParallel(net, gpu_ids) if (init_type != 'none'): init_weights(net, init_type, init_gain) return net
def define_classifier(opt, gpu_ids, arch, init_type, init_gain, device): net = None if (arch == 'vae'): net = GraspSamplerVAE(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, opt.latent_size, device) elif (arch == 'gan'): net = GraspSamplerGAN(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, opt.latent_size, device) elif (arch == 'evaluator'): net = GraspEvaluator(opt.model_scale, opt.pointnet_radius, opt.pointnet_nclusters, device) else: raise NotImplementedError(('model name [%s] is not recognized' % arch)) return init_net(net, init_type, init_gain, gpu_ids)
def define_loss(opt): if (opt.arch == 'vae'): kl_loss = losses.kl_divergence reconstruction_loss = losses.control_point_l1_loss return (kl_loss, reconstruction_loss) elif (opt.arch == 'gan'): reconstruction_loss = losses.min_distance_loss return reconstruction_loss elif (opt.arch == 'evaluator'): loss = losses.classification_with_confidence_loss return loss else: raise NotImplementedError('Loss not found')
class GraspSampler(nn.Module): def __init__(self, latent_size, device): super(GraspSampler, self).__init__() self.latent_size = latent_size self.device = device def create_decoder(self, model_scale, pointnet_radius, pointnet_nclusters, num_input_features): self.decoder = base_network(pointnet_radius, pointnet_nclusters, model_scale, num_input_features) self.q = nn.Linear((model_scale * 1024), 4) self.t = nn.Linear((model_scale * 1024), 3) self.confidence = nn.Linear((model_scale * 1024), 1) def decode(self, xyz, z): xyz_features = self.concatenate_z_with_pc(xyz, z).transpose((- 1), 1).contiguous() for module in self.decoder[0]: (xyz, xyz_features) = module(xyz, xyz_features) x = self.decoder[1](xyz_features.squeeze((- 1))) predicted_qt = torch.cat((F.normalize(self.q(x), p=2, dim=(- 1)), self.t(x)), (- 1)) return (predicted_qt, torch.sigmoid(self.confidence(x)).squeeze()) def concatenate_z_with_pc(self, pc, z): z.unsqueeze_(1) z = z.expand((- 1), pc.shape[1], (- 1)) return torch.cat((pc, z), (- 1)) def get_latent_size(self): return self.latent_size
class GraspSamplerVAE(GraspSampler): 'Network for learning a generative VAE grasp-sampler\n ' def __init__(self, model_scale, pointnet_radius=0.02, pointnet_nclusters=128, latent_size=2, device='cpu'): super(GraspSamplerVAE, self).__init__(latent_size, device) self.create_encoder(model_scale, pointnet_radius, pointnet_nclusters) self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3)) self.create_bottleneck((model_scale * 1024), latent_size) def create_encoder(self, model_scale, pointnet_radius, pointnet_nclusters): self.encoder = base_network(pointnet_radius, pointnet_nclusters, model_scale, 19) def create_bottleneck(self, input_size, latent_size): mu = nn.Linear(input_size, latent_size) logvar = nn.Linear(input_size, latent_size) self.latent_space = nn.ModuleList([mu, logvar]) def encode(self, xyz, xyz_features): for module in self.encoder[0]: (xyz, xyz_features) = module(xyz, xyz_features) return self.encoder[1](xyz_features.squeeze((- 1))) def bottleneck(self, z): return (self.latent_space[0](z), self.latent_space[1](z)) def reparameterize(self, mu, logvar): std = torch.exp((0.5 * logvar)) eps = torch.randn_like(std) return (mu + (eps * std)) def forward(self, pc, grasp=None, train=True): if train: return self.forward_train(pc, grasp) else: return self.forward_test(pc, grasp) def forward_train(self, pc, grasp): input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous() z = self.encode(pc, input_features) (mu, logvar) = self.bottleneck(z) z = self.reparameterize(mu, logvar) (qt, confidence) = self.decode(pc, z) return (qt, confidence, mu, logvar) def forward_test(self, pc, grasp): input_features = torch.cat((pc, grasp.unsqueeze(1).expand((- 1), pc.shape[1], (- 1))), (- 1)).transpose((- 1), 1).contiguous() z = self.encode(pc, input_features) (mu, _) = self.bottleneck(z) (qt, confidence) = self.decode(pc, mu) return (qt, confidence) def sample_latent(self, batch_size): return torch.randn(batch_size, self.latent_size).to(self.device) def generate_grasps(self, pc, z=None): if (z is None): z = self.sample_latent(pc.shape[0]) (qt, confidence) = self.decode(pc, z) return (qt, confidence, z.squeeze()) def generate_dense_latents(self, resolution): '\n For the VAE sampler we consider dense latents to correspond to those between -2 and 2\n ' latents = torch.meshgrid(*[torch.linspace((- 2), 2, resolution) for i in range(self.latent_size)]) return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device)
class GraspSamplerGAN(GraspSampler): '\n Altough the name says this sampler is based on the GAN formulation, it is\n not actually optimizing based on the commonly known adversarial game.\n Instead, it is based on the Implicit Maximum Likelihood Estimation from\n https://arxiv.org/pdf/1809.09087.pdf which is similar to the GAN formulation\n but with new insights that avoids e.g. mode collapses.\n ' def __init__(self, model_scale, pointnet_radius, pointnet_nclusters, latent_size=2, device='cpu'): super(GraspSamplerGAN, self).__init__(latent_size, device) self.create_decoder(model_scale, pointnet_radius, pointnet_nclusters, (latent_size + 3)) def sample_latent(self, batch_size): return torch.rand(batch_size, self.latent_size).to(self.device) def forward(self, pc, grasps=None, train=True): z = self.sample_latent(pc.shape[0]) return self.decode(pc, z) def generate_grasps(self, pc, z=None): if (z is None): z = self.sample_latent(pc.shape[0]) (qt, confidence) = self.decode(pc, z) return (qt, confidence, z.squeeze()) def generate_dense_latents(self, resolution): latents = torch.meshgrid(*[torch.linspace(0, 1, resolution) for i in range(self.latent_size)]) return torch.stack([latents[i].flatten() for i in range(len(latents))], dim=(- 1)).to(self.device)
class GraspEvaluator(nn.Module): def __init__(self, model_scale=1, pointnet_radius=0.02, pointnet_nclusters=128, device='cpu'): super(GraspEvaluator, self).__init__() self.create_evaluator(pointnet_radius, model_scale, pointnet_nclusters) self.device = device def create_evaluator(self, pointnet_radius, model_scale, pointnet_nclusters): self.evaluator = base_network(pointnet_radius, pointnet_nclusters, model_scale, 4) self.predictions_logits = nn.Linear((1024 * model_scale), 1) self.confidence = nn.Linear((1024 * model_scale), 1) def evaluate(self, xyz, xyz_features): for module in self.evaluator[0]: (xyz, xyz_features) = module(xyz, xyz_features) return self.evaluator[1](xyz_features.squeeze((- 1))) def forward(self, pc, gripper_pc, train=True): (pc, pc_features) = self.merge_pc_and_gripper_pc(pc, gripper_pc) x = self.evaluate(pc, pc_features.contiguous()) return (self.predictions_logits(x), torch.sigmoid(self.confidence(x))) def merge_pc_and_gripper_pc(self, pc, gripper_pc): '\n Merges the object point cloud and gripper point cloud and\n adds a binary auxiliary feature that indicates whether each point\n belongs to the object or to the gripper.\n ' pc_shape = pc.shape gripper_shape = gripper_pc.shape assert (len(pc_shape) == 3) assert (len(gripper_shape) == 3) assert (pc_shape[0] == gripper_shape[0]) npoints = pc_shape[1] batch_size = pc_shape[0] l0_xyz = torch.cat((pc, gripper_pc), 1) labels = [torch.ones(pc.shape[1], 1, dtype=torch.float32), torch.zeros(gripper_pc.shape[1], 1, dtype=torch.float32)] labels = torch.cat(labels, 0) labels.unsqueeze_(0) labels = labels.repeat(batch_size, 1, 1) l0_points = torch.cat([l0_xyz, labels.to(self.device)], (- 1)).transpose((- 1), 1) return (l0_xyz, l0_points)
def base_network(pointnet_radius, pointnet_nclusters, scale, in_features): sa1_module = pointnet2.PointnetSAModule(npoint=pointnet_nclusters, radius=pointnet_radius, nsample=64, mlp=[in_features, (64 * scale), (64 * scale), (128 * scale)]) sa2_module = pointnet2.PointnetSAModule(npoint=32, radius=0.04, nsample=128, mlp=[(128 * scale), (128 * scale), (128 * scale), (256 * scale)]) sa3_module = pointnet2.PointnetSAModule(mlp=[(256 * scale), (256 * scale), (256 * scale), (512 * scale)]) sa_modules = nn.ModuleList([sa1_module, sa2_module, sa3_module]) fc_layer = nn.Sequential(nn.Linear((512 * scale), (1024 * scale)), nn.BatchNorm1d((1024 * scale)), nn.ReLU(True), nn.Linear((1024 * scale), (1024 * scale)), nn.BatchNorm1d((1024 * scale)), nn.ReLU(True)) return nn.ModuleList([sa_modules, fc_layer])
class BaseOptions(): def __init__(self): self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.initialized = False def initialize(self): self.parser.add_argument('--dataset_root_folder', type=str, default='/home/jens/Documents/datasets/grasping/unified_grasp_data/', help='path to root directory of the dataset.') self.parser.add_argument('--num_objects_per_batch', type=int, default=1, help='data batch size.') self.parser.add_argument('--num_grasps_per_object', type=int, default=64) self.parser.add_argument('--npoints', type=int, default=1024, help='number of points in each batch') self.parser.add_argument('--occlusion_nclusters', type=int, default=0, help='clusters the points to nclusters to be selected for simulating the dropout') self.parser.add_argument('--occlusion_dropout_rate', type=float, default=0, help='probability at which the clusters are removed from point cloud.') self.parser.add_argument('--depth_noise', type=float, default=0.0) self.parser.add_argument('--num_grasp_clusters', type=int, default=32) self.parser.add_argument('--arch', choices={'vae', 'gan', 'evaluator'}, default='vae') self.parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples per epoch') self.parser.add_argument('--num_threads', default=3, type=int, help='# threads for loading data') self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes meshes in order, otherwise takes them randomly') self.parser.add_argument('--seed', type=int, help='if specified, uses seed') self.parser.add_argument('--gripper', type=str, default='panda', help='type of the gripper. Leave it to panda if you want to use it for franka robot') self.parser.add_argument('--latent_size', type=int, default=2) self.parser.add_argument('--gripper_pc_npoints', type=int, default=(- 1), help='number of points representing the gripper. -1 just uses the points on the finger and also the base. other values use subsampling of the gripper mesh') self.parser.add_argument('--merge_pcs_in_vae_encoder', type=int, default=0, help='whether to create unified pc in encoder by coloring the points (similar to evaluator') self.parser.add_argument('--allowed_categories', type=str, default='', help='if left blank uses all the categories in the <DATASET_ROOT_PATH>/splits/<category>.json, otherwise only chooses the categories that are set.') self.parser.add_argument('--blacklisted_categories', type=str, default='', help='The opposite of allowed categories') self.parser.add_argument('--use_uniform_quaternions', type=int, default=0) self.parser.add_argument('--model_scale', type=int, default=1, help='the scale of the parameters. Use scale >= 1. Scale=2 increases the number of parameters in model by 4x.') self.parser.add_argument('--splits_folder_name', type=str, default='splits', help='Folder name for the directory that has all the jsons for train/test splits.') self.parser.add_argument('--grasps_folder_name', type=str, default='grasps', help='Directory that contains the grasps. Will be joined with the dataset_root_folder and the file names as defined in the splits.') self.parser.add_argument('--pointnet_radius', help='Radius for ball query for PointNet++, just the first layer', type=float, default=0.02) self.parser.add_argument('--pointnet_nclusters', help='Number of cluster centroids for PointNet++, just the first layer', type=int, default=128) self.parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]') self.parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') self.parser.add_argument('--grasps_ratio', type=float, default=1.0, help='used for checking the effect of number of grasps per object on the success of the model.') self.parser.add_argument('--skip_error', action='store_true', help='Will not fill the dataset with a new grasp if it raises NoPositiveGraspsException') self.parser.add_argument('--balanced_data', action='store_true', default=False) self.parser.add_argument('--confidence_weight', type=float, default=1.0, help='initially I wanted to compute confidence for vae and evaluator outputs, setting the confidence weight to 1. immediately pushes the confidence to 1.0.') def parse(self): if (not self.initialized): self.initialize() (self.opt, unknown) = self.parser.parse_known_args() self.opt.is_train = self.is_train if self.opt.is_train: self.opt.dataset_split = 'train' else: self.opt.dataset_split = 'test' self.opt.batch_size = (self.opt.num_objects_per_batch * self.opt.num_grasps_per_object) str_ids = self.opt.gpu_ids.split(',') self.opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if (id >= 0): self.opt.gpu_ids.append(id) if (len(self.opt.gpu_ids) > 0): torch.cuda.set_device(self.opt.gpu_ids[0]) args = vars(self.opt) if (self.opt.seed is not None): import numpy as np import random torch.manual_seed(self.opt.seed) np.random.seed(self.opt.seed) random.seed(self.opt.seed) if self.is_train: print('------------ Options -------------') for (k, v) in sorted(args.items()): print(('%s: %s' % (str(k), str(v)))) print('-------------- End ----------------') name = self.opt.arch name += ((('_lr_' + str(self.opt.lr).split('.')[(- 1)]) + '_bs_') + str(self.opt.batch_size)) name += ((((('_scale_' + str(self.opt.model_scale)) + '_npoints_') + str(self.opt.pointnet_nclusters)) + '_radius_') + str(self.opt.pointnet_radius).split('.')[(- 1)]) if ((self.opt.arch == 'vae') or (self.opt.arch == 'gan')): name += ('_latent_size_' + str(self.opt.latent_size)) self.opt.name = name expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) if (os.path.isdir(expr_dir) and (not self.opt.continue_train)): option = (('Directory ' + expr_dir) + ' already exists and you have not chosen to continue to train.\nDo you want to override that training instance with a new one the press (Y/N).') print(option) while True: choice = input() if (choice.upper() == 'Y'): print(('Overriding directory ' + expr_dir)) shutil.rmtree(expr_dir) utils.mkdir(expr_dir) break elif (choice.upper() == 'N'): print('Terminating. Remember, if you want to continue to train from a saved instance then run the script with the flag --continue_train') return None else: utils.mkdir(expr_dir) yaml_path = os.path.join(expr_dir, 'opt.yaml') with open(yaml_path, 'w') as yaml_file: yaml.dump(args, yaml_file) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for (k, v) in sorted(args.items()): opt_file.write(('%s: %s\n' % (str(k), str(v)))) opt_file.write('-------------- End ----------------\n') return self.opt
class TestOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.is_train = False
class TrainOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') self.parser.add_argument('--save_latest_freq', type=int, default=250, help='frequency of saving the latest results') self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') self.parser.add_argument('--run_test_freq', type=int, default=1, help='frequency of running test in training script') self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') self.parser.add_argument('--niter_decay', type=int, default=2000, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--beta1', type=float, default=0.9, help='momentum term of adam') self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') self.parser.add_argument('--kl_loss_weight', type=float, default=0.01) self.parser.add_argument('--no_vis', action='store_true', help='will not use tensorboard') self.parser.add_argument('--verbose_plot', action='store_true', help='plots network weights, etc.') self.is_train = True
class OnlineObjectRenderer(): def __init__(self, fov=(np.pi / 6), caching=True): '\n Args:\n fov: float, \n ' self._fov = fov self._fy = self._fx = (1 / (0.5 / np.tan((self._fov * 0.5)))) self.mesh = None self._scene = None self.tmesh = None self._init_scene() self._current_context = None self._cache = ({} if caching else None) self._caching = caching def _init_scene(self): self._scene = pyrender.Scene() camera = pyrender.PerspectiveCamera(yfov=self._fov, aspectRatio=1.0, znear=0.001) camera_pose = tra.euler_matrix(np.pi, 0, 0) self._scene.add(camera, pose=camera_pose, name='camera') self.renderer = None def _load_object(self, path, scale): if ((path, scale) in self._cache): return self._cache[(path, scale)] obj = sample.Object(path) obj.rescale(scale) tmesh = obj.mesh tmesh_mean = np.mean(tmesh.vertices, 0) tmesh.vertices -= np.expand_dims(tmesh_mean, 0) lbs = np.min(tmesh.vertices, 0) ubs = np.max(tmesh.vertices, 0) object_distance = (np.max((ubs - lbs)) * 5) mesh = pyrender.Mesh.from_trimesh(tmesh) context = {'tmesh': copy.deepcopy(tmesh), 'distance': object_distance, 'node': pyrender.Node(mesh=mesh), 'mesh_mean': np.expand_dims(tmesh_mean, 0)} self._cache[(path, scale)] = context return self._cache[(path, scale)] def change_object(self, path, scale): if (self._current_context is not None): self._scene.remove_node(self._current_context['node']) if (not self._caching): self._cache = {} self._current_context = self._load_object(path, scale) self._scene.add_node(self._current_context['node']) def current_context(self): return self._current_context def _to_pointcloud(self, depth): height = depth.shape[0] width = depth.shape[1] mask = np.where((depth > 0)) x = mask[1] y = mask[0] normalized_x = ((x.astype(np.float32) - (width * 0.5)) / width) normalized_y = ((y.astype(np.float32) - (height * 0.5)) / height) world_x = ((self._fx * normalized_x) * depth[(y, x)]) world_y = ((self._fy * normalized_y) * depth[(y, x)]) world_z = depth[(y, x)] ones = np.ones(world_z.shape[0], dtype=np.float32) return np.vstack((world_x, world_y, world_z, ones)).T def change_and_render(self, cad_path, cad_scale, pose, render_pc=True): self.change_object(cad_path, cad_scale) (color, depth, pc, transferred_pose) = self.render(pose) return (color, depth, pc, transferred_pose) def render(self, pose, render_pc=True): if (self.renderer is None): self.renderer = pyrender.OffscreenRenderer(400, 400) if (self._current_context is None): raise ValueError('invoke change_object first') transferred_pose = pose.copy() transferred_pose[(2, 3)] = self._current_context['distance'] self._scene.set_pose(self._current_context['node'], transferred_pose) (color, depth) = self.renderer.render(self._scene) if render_pc: pc = self._to_pointcloud(depth) else: pc = None return (color, depth, pc, transferred_pose) def render_canonical_pc(self, poses): all_pcs = [] for pose in poses: (_, _, pc, pose) = self.render(pose) pc = pc.dot(utils.inverse_transform(pose).T) all_pcs.append(pc) all_pcs = np.concatenate(all_pcs, 0) return all_pcs
def run_test(epoch=(- 1), name=''): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True opt.name = name dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) writer.reset_counter() for (i, data) in enumerate(dataset): model.set_input(data) (ncorrect, nexamples) = model.test() writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) return writer.acc
def main(): opt = TrainOptions().parse() if (opt == None): return dataset = DataLoader(opt) dataset_size = (len(dataset) * opt.num_grasps_per_object) model = create_model(opt) writer = Writer(opt) total_steps = 0 for epoch in range(opt.epoch_count, ((opt.niter + opt.niter_decay) + 1)): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 for (i, data) in enumerate(dataset): iter_start_time = time.time() if ((total_steps % opt.print_freq) == 0): t_data = (iter_start_time - iter_data_time) total_steps += opt.batch_size epoch_iter += opt.batch_size model.set_input(data) model.optimize_parameters() if ((total_steps % opt.print_freq) == 0): loss_types = [] if (opt.arch == 'vae'): loss = [model.loss, model.kl_loss, model.reconstruction_loss, model.confidence_loss] loss_types = ['total_loss', 'kl_loss', 'reconstruction_loss', 'confidence loss'] elif (opt.arch == 'gan'): loss = [model.loss, model.reconstruction_loss, model.confidence_loss] loss_types = ['total_loss', 'reconstruction_loss', 'confidence_loss'] else: loss = [model.loss, model.classification_loss, model.confidence_loss] loss_types = ['total_loss', 'classification_loss', 'confidence_loss'] t = ((time.time() - iter_start_time) / opt.batch_size) writer.print_current_losses(epoch, epoch_iter, loss, t, t_data, loss_types) writer.plot_loss(loss, epoch, epoch_iter, dataset_size, loss_types) if ((i % opt.save_latest_freq) == 0): print(('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))) model.save_network('latest', epoch) iter_data_time = time.time() if ((epoch % opt.save_epoch_freq) == 0): print(('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))) model.save_network('latest', epoch) model.save_network(str(epoch), epoch) print(('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, (opt.niter + opt.niter_decay), (time.time() - epoch_start_time)))) model.update_learning_rate() if opt.verbose_plot: writer.plot_model_wts(model, epoch) if ((epoch % opt.run_test_freq) == 0): acc = run_test(epoch, name=opt.name) writer.plot_acc(acc, epoch) writer.close()
class Writer(): def __init__(self, opt): self.name = opt.name self.opt = opt self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) self.log_name = os.path.join(self.save_dir, 'loss_log.txt') self.testacc_log = os.path.join(self.save_dir, 'testacc_log.txt') self.start_logs() self.nexamples = 0 self.confidence_acc = 0 self.ncorrect = 0 if (opt.is_train and (not opt.no_vis) and (SummaryWriter is not None)): self.display = SummaryWriter(logdir=(os.path.join(self.opt.checkpoints_dir, self.opt.name) + '/tensorboard')) else: self.display = None def start_logs(self): ' creates test / train log files ' if self.opt.is_train: with open(self.log_name, 'a') as log_file: now = time.strftime('%c') log_file.write(('================ Training Loss (%s) ================\n' % now)) else: with open(self.testacc_log, 'a') as log_file: now = time.strftime('%c') log_file.write(('================ Testing Acc (%s) ================\n' % now)) def print_current_losses(self, epoch, i, losses, t, t_data, loss_types='total_loss'): ' prints train loss to terminal / file ' if (type(losses) == list): message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f)' % (epoch, i, t, t_data)) for (loss_type, loss_value) in zip(loss_types, losses): message += (' %s: %.3f' % (loss_type, loss_value.item())) else: message = ('(epoch: %d, iters: %d, time: %.3f, data: %.3f) loss: %.3f ' % (epoch, i, t, t_data, losses.item())) print(message) with open(self.log_name, 'a') as log_file: log_file.write(('%s\n' % message)) def plot_loss(self, losses, epoch, i, n, loss_types): iters = (i + ((epoch - 1) * n)) if self.display: if (type(losses) == list): for (loss_type, loss_value) in zip(loss_types, losses): self.display.add_scalar(('data/train_loss/' + loss_type), loss_value, iters) else: self.display.add_scalar('data/train_loss', losses, iters) def plot_model_wts(self, model, epoch): if (self.opt.is_train and self.display): for (name, param) in model.net.named_parameters(): self.display.add_histogram(name, param.clone().cpu().data.numpy(), epoch) def print_acc(self, epoch, acc): ' prints test accuracy to terminal / file ' if (self.opt.arch == 'evaluator'): message = 'epoch: {}, TEST ACC: [{:.5} %]\n'.format(epoch, (acc * 100)) else: message = 'epoch: {}, TEST REC LOSS: [{:.5}]\n'.format(epoch, acc) print(message) with open(self.testacc_log, 'a') as log_file: log_file.write(('%s\n' % message)) def plot_acc(self, acc, epoch): if self.display: if (self.opt.arch == 'evaluator'): self.display.add_scalar('data/test_acc/grasp_prediction', acc, epoch) else: self.display.add_scalar('data/test_loss/grasp_reconstruction', acc, epoch) def reset_counter(self): '\n counts # of correct examples\n ' self.ncorrect = 0 self.nexamples = 0 def update_counter(self, ncorrect, nexamples): self.nexamples += nexamples self.ncorrect += ncorrect @property def acc(self): return (float(self.ncorrect) / self.nexamples) def close(self): if (self.display is not None): self.display.close()
def EmbedWord2Vec(walks, dimension): time_start = time.time() print('Creating embeddings.') model = Word2Vec(walks, size=dimension, window=5, min_count=0, sg=1, workers=32, iter=1) node_ids = model.wv.index2word node_embeddings = model.wv.vectors print('Embedding generation runtime: ', (time.time() - time_start)) return (node_ids, node_embeddings)
def EmbedPoincare(relations, epochs, dimension): model = PoincareModel(relations, size=dimension, workers=32) model.train(epochs) node_ids = model.index2entity node_embeddings = model.vectors return (node_ids, node_embeddings)
def TraverseAndSelect(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0): walksTAS = [] for hyperedge_index in hyperedges: hyperedge = hyperedges[hyperedge_index] walk_hyperedge = [] for _ in range(num_walks): curr_vertex = random.choice(hyperedge['members']) initial = True curr_hyperedge_num = hyperedge_index curr_hyperedge = hyperedge for i in range(length): proba = ((float(alpha) / len(vertexMemberships[curr_vertex])) + beta) if (random.random() < proba): adjacent_vertices = curr_hyperedge['members'] curr_vertex = random.choice(adjacent_vertices) walk_hyperedge.append(str(curr_hyperedge_num)) adjacent_hyperedges = vertexMemberships[curr_vertex] curr_hyperedge_num = random.choice(adjacent_hyperedges) curr_hyperedge = hyperedges[curr_hyperedge_num] walksTAS.append(walk_hyperedge) return walksTAS
def SubsampleAndTraverse(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0): walksSAT = [] for hyperedge_index in hyperedges: hyperedge = hyperedges[hyperedge_index] walk_vertex = [] curr_vertex = random.choice(hyperedge['members']) for _ in range(num_walks): initial = True hyperedge_num = hyperedge_index curr_hyperedge = hyperedge for i in range(length): proba = ((float(alpha) / len(curr_hyperedge['members'])) + beta) if (random.random() < proba): adjacent_hyperedges = vertexMemberships[curr_vertex] hyperedge_num = random.choice(adjacent_hyperedges) curr_hyperedge = hyperedges[hyperedge_num] walk_vertex.append(str(curr_vertex)) curr_vertex = random.choice(curr_hyperedge['members']) walksSAT.append(walk_vertex) return walksSAT
def getFeaturesTrainingData(): i = 0 lists = [] labels = [] for vertex in G.nodes: vertex_embedding_list = [] lists.append({'f': vertex_features[vertex].tolist()}) labels.append(vertex_labels[vertex]) X_unshuffled = [] for hlist in lists: x = np.zeros((feature_dimension,)) x[:feature_dimension] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_Features = np.asarray(X_arr) Y_Features = np.asarray(Y_arr) return (X_Features, Y_Features)
def getTrainingData(): i = 0 lists = [] labels = [] for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),)) i = 0 x[:hyperedge_embedding_dimension] = hlist['h'] x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f'] for embedding in np_vertex_embeddings: x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def getMLPTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),)) x[:hyperedge_embedding_dimension] = hlist['h'] x[hyperedge_embedding_dimension:] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_MLP = np.asarray(X_arr) Y_MLP = np.asarray(Y_arr) return (X_MLP, Y_MLP)
def getDSTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()}) lists.append label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),)) x[(vertex_embedding_dimension * max_groupsize):] = hlist['f'] i = 0 for embedding in np_vertex_embeddings: x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def hyperedgesTrain(X_train, Y_train, num_epochs): deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5')) history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train, num_epochs): MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5')) history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train, num_epochs): deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5')) history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def testModel(model, X_tst, Y_tst): from sklearn.metrics import classification_report, accuracy_score target_names = ['Neural Networks', 'Case Based', 'Reinforcement Learning', 'Probabilistic Methods', 'Genetic Algorithms', 'Rule Learning', 'Theory'] y_pred = model.predict(X_tst, batch_size=16, verbose=0) finals_pred = [] finals_test = [] for p in y_pred: m = 0 ind = 0 final = 0 for i in p: if (i > m): m = i final = ind ind += 1 finals_pred.append(final) for i in Y_tst: ind = 0 for j in i: if (j == 1): finals_test.append(ind) ind += 1 c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4) reports.append(c) print(c)
def RunAllTests(percentTraining, num_times, num_epochs): for i in range(num_times): print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges') (X, Y) = getTrainingData() (X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining)) hyperedgesTrain(X_train, Y_train, num_epochs) testModel(deephyperedges_transductive_model, X_test, Y_test)
def getFeaturesTrainingData(): i = 0 lists = [] labels = [] for vertex in G.nodes: vertex_embedding_list = [] lists.append({'f': vertex_features[vertex].tolist()}) labels.append(vertex_labels[vertex]) X_unshuffled = [] for hlist in lists: x = np.zeros((feature_dimension,)) x[:feature_dimension] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_Features = np.asarray(X_arr) Y_Features = np.asarray(Y_arr) return (X_Features, Y_Features)
def getTrainingData(): i = 0 lists = [] labels = [] for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),)) i = 0 x[:hyperedge_embedding_dimension] = hlist['h'] x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f'] for embedding in np_vertex_embeddings: x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def getMLPTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()}) label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),)) x[:hyperedge_embedding_dimension] = hlist['h'] x[hyperedge_embedding_dimension:] = hlist['f'] X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X_MLP = np.asarray(X_arr) Y_MLP = np.asarray(Y_arr) return (X_MLP, Y_MLP)
def getDSTrainingData(): i = 0 lists = [] labels = [] maxi = 0 for h in hyperedges: vertex_embedding_list = [] hyperedge = hyperedges[h] for vertex in hyperedge['members']: i += 1 if ((i % 100000) == 0): print(i) try: vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist()) except: print('Missed one: ', vertex) lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()}) lists.append label = np.zeros((num_categories,)) label[(int(hyperedge['category']) - 1)] = 1 labels.append(label) X_unshuffled = [] for hlist in lists: np_vertex_embeddings = np.asarray(hlist['v']) x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),)) x[(vertex_embedding_dimension * max_groupsize):] = hlist['f'] i = 0 for embedding in np_vertex_embeddings: x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding i += 1 X_unshuffled.append(x) labels = np.asarray(labels) (X_arr, Y_arr) = shuffle(X_unshuffled, labels) X = np.asarray(X_arr) Y = np.asarray(Y_arr) return (X, Y)
def hyperedgesTrain(X_train, Y_train): deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5')) history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train): MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5')) history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train): deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5')) history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
def testModel(model, X_tst, Y_tst): from sklearn.metrics import classification_report, accuracy_score target_names = target_names = ['Type-1 Diabetes', 'Type-2 Diabetes', 'Type-3 Diabetes'] y_pred = model.predict(X_tst, batch_size=16, verbose=0) finals_pred = [] finals_test = [] for p in y_pred: m = 0 ind = 0 final = 0 for i in p: if (i > m): m = i final = ind ind += 1 finals_pred.append(final) for i in Y_tst: ind = 0 for j in i: if (j == 1): finals_test.append(ind) ind += 1 c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4) print(c) reports.append(c) print(accuracy_score(finals_test, finals_pred))
def RunAllTests(percentTraining, num_times=10): for i in range(num_times): print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges') (X, Y) = getTrainingData() (X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining)) hyperedgesTrain(X_train, Y_train) testModel(deephyperedges_transductive_model, X_test, Y_test) print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: MLP') (X_MLP, Y_MLP) = getMLPTrainingData() (X_MLP_transductive_train, X_MLP_transductive_test, Y_MLP_transductive_train, Y_MLP_transductive_test) = train_test_split(X_MLP, Y_MLP, train_size=percentTraining, test_size=(1 - percentTraining)) MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train) testModel(MLP_transductive_model, X_MLP_transductive_test, Y_MLP_transductive_test) print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep sets') (X_deepset, Y_deepset) = getDSTrainingData() (X_deepset_transductive_train, X_deepset_transductive_test, Y_deepset_transductive_train, Y_deepset_transductive_test) = train_test_split(X_deepset, Y_deepset, train_size=percentTraining, test_size=(1 - percentTraining)) DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train) testModel(deepsets_transductive_model, X_deepset_transductive_test, Y_deepset_transductive_test)
def smooth(scalars, weight): last = scalars[0] smoothed = list() for point in scalars: smoothed_val = ((last * weight) + ((1 - weight) * point)) smoothed.append(smoothed_val) last = smoothed_val return smoothed
def plot(deephyperedges_directory, MLP_directory, deepsets_directory, metric, dataset): dhe_metrics = pd.read_csv(deephyperedges_directory) x = [] y = [] for (index, row) in dhe_metrics.iterrows(): x.append(float(row['Step'])) y.append(float(row['Value'])) mlp_metrics = pd.read_csv(MLP_directory) x_mlp = [] y_mlp = [] for (index, row) in mlp_metrics.iterrows(): x_mlp.append(float(row['Step'])) y_mlp.append(float(row['Value'])) ds_metrics = pd.read_csv(deepsets_directory) x_ds = [] y_ds = [] for (index, row) in ds_metrics.iterrows(): x_ds.append(float(row['Step'])) y_ds.append(float(row['Value'])) sns.set() ds_normal = '(0.0, 0.0, 0.7, 0.2)' ds_smoothed = '(0.0, 0.0, 0.7, 1)' dh_normal = '(0.0, 0.7, 0.0, 0.2)' dh_smoothed = '(0.0, 0.7, 0.0, 1)' mlp_normal = '(0.7, 0.2, 0.1, 0.2)' mlp_smoothed = '(0.7, 0.2, 0.1, 1)' plt.gca().set_prop_cycle(color=[mlp_normal, ds_normal, dh_normal, mlp_smoothed, ds_smoothed, dh_smoothed]) plt.plot(x_mlp, y_mlp) plt.plot(x_ds, y_ds) plt.plot(x, y) plt.plot(x_mlp, smooth(y_mlp, 0.8)) plt.plot(x_ds, smooth(y_ds, 0.8)) plt.plot(x, smooth(y, 0.8)) plt.legend(['_nolegend_', '_nolegend_', '_nolegend_', 'MLP + TAS Walks', 'Deep Sets + SAT Walks', 'Deep Hyperedges'], loc='bottom right') plt.savefig((((('images/paper/' + dataset) + '/') + metric) + '.png'), dpi=300) plt.show()
def plotAll(dataset): metric = 'run-.-tag-categorical_accuracy.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_accuracy', dataset) metric = 'run-.-tag-loss.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_loss', dataset) metric = 'run-.-tag-val_categorical_accuracy.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_accuracy', dataset) metric = 'run-.-tag-val_loss.csv' deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric) MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric) deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric) plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_loss', dataset)
@register_line_cell_magic def writetemplate(line, cell): with open(line, 'w') as f: f.write(cell.format(**globals()))
class SubSectionTitleOrder(): "Sort example gallery by title of subsection.\n\n Assumes README.txt exists for all subsections and uses the subsection with\n dashes, '---', as the adornment.\n " def __init__(self, src_dir): self.src_dir = src_dir self.regex = re.compile('^([\\w ]+)\\n-', re.MULTILINE) def __repr__(self): return f'<{self.__class__.__name__}>' def __call__(self, directory): src_path = os.path.normpath(os.path.join(self.src_dir, directory)) if (os.path.basename(src_path) == 'release_highlights'): return '0' readme = os.path.join(src_path, 'README.txt') try: with open(readme) as f: content = f.read() except FileNotFoundError: return directory title_match = self.regex.search(content) if (title_match is not None): return title_match.group(1) return directory
def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): 'Link to a GitHub issue.' try: int(text) except ValueError: slug = text else: slug = ('issues/' + text) text = ('#' + text) ref = ('https://github.com/juaml/julearn/' + slug) set_classes(options) node = reference(rawtext, text, refuri=ref, **options) return ([node], [])
def setup(app): app.add_role('gh', gh_role) return
def pearson_scorer(y_true, y_pred): return scipy.stats.pearsonr(y_true.squeeze(), y_pred.squeeze())[0]
def change_column_type(column: str, new_type: str): 'Change the type of a column.\n\n Parameters\n ----------\n column : str\n The column to change the type of.\n new_type : str\n The new type of the column.\n\n Returns\n -------\n str\n The new column name with the type changed.\n ' return '__:type:__'.join((column.split('__:type:__')[0:1] + [new_type]))
def get_column_type(column): 'Get the type of a column.\n\n Parameters\n ----------\n column : str\n The column to get the type of.\n\n Returns\n -------\n str\n The type of the column.\n ' return column.split('__:type:__')[1]
def get_renamer(X_df): 'Get the dictionary that will rename the columns to add the type.\n\n Parameters\n ----------\n X_df : pd.DataFrame\n The dataframe to rename the columns of.\n\n Returns\n -------\n dict\n The dictionary that will rename the columns.\n\n ' return {x: (x if ('__:type:__' in x) else f'{x}__:type:__continuous') for x in X_df.columns}
class make_type_selector(): 'Make a type selector.\n\n This type selector is to be used with\n :class:`sklearn.compose.ColumnTransformer`\n\n Parameters\n ----------\n pattern : str\n The pattern to select the columns.\n\n Returns\n -------\n function\n The type selector.\n\n ' def __init__(self, pattern): self.pattern = pattern def __call__(self, X_df): 'Select the columns based on the pattern.\n\n Parameters\n ----------\n X_df : pd.DataFrame\n The dataframe to select the columns of.\n\n Returns\n -------\n list\n The list of selected columns.\n\n ' renamer = get_renamer(X_df) _X_df = X_df.rename(columns=renamer) reverse_renamer = {new_name: name for (name, new_name) in renamer.items()} selected_columns = make_column_selector(self.pattern)(_X_df) if (len(selected_columns) == 0): raise_error(f'No columns selected with pattern {self.pattern} in {_X_df.columns.to_list()}') return [(reverse_renamer[col] if (col in reverse_renamer) else col) for col in selected_columns]
class ColumnTypes(): 'Class to hold types in regards to a pd.DataFrame Column.\n\n Parameters\n ----------\n column_types : ColumnTypes or str or list of str or set of str\n One str representing on type if columns or a list of these.\n Instead of a str you can also provide a ColumnTypes itself.\n ' def __init__(self, column_types: ColumnTypesLike): if isinstance(column_types, ColumnTypes): _types = column_types._column_types.copy() elif isinstance(column_types, str): _types = {column_types} elif (not isinstance(column_types, Set)): _types = set(column_types) elif isinstance(column_types, Set): _types = column_types else: raise_error(f'Cannot construct a ColumnType from {column_types}') self._column_types = _types def add(self, column_types: ColumnTypesLike) -> 'ColumnTypes': 'Add more column_types to the column_types.\n\n Parameters\n ----------\n column_types : ColumnTypes or str or list of str or ColumnTypes\n One str representing on type if columns or a list of these.\n Instead of a str you can also provide a ColumnTypes itself.\n\n\n Returns\n -------\n self: ColumnTypes\n The updates ColumnTypes.\n\n ' if (not isinstance(column_types, ColumnTypes)): column_types = ColumnTypes(column_types) self._column_types.update(column_types) return self @property def pattern(self) -> str: 'Get the pattern/regex that matches all the column types.' return self._to_pattern() def to_type_selector(self) -> Callable: 'Create a type selector from the ColumnType.\n\n The type selector is usable by\n :class:`sklearn.compose.ColumnTransformer`\n\n\n Returns\n -------\n Callable\n The type selector.\n ' return make_type_selector(self.pattern) def _to_pattern(self): 'Convert column_types to pattern/regex.\n\n This pattern is usable to make a column_selector.\n\n Returns\n -------\n pattern: str\n The pattern/regex that matches all the column types\n\n ' if (('*' in self._column_types) or ('.*' in self._column_types)): pattern = '.*' else: types_patterns = [] for t_type in self._column_types: if ('__:type:__' in t_type): t_pattern = t_type elif ('target' == t_type): t_pattern = t_type else: t_pattern = f'__:type:__{t_type}' types_patterns.append(t_pattern) pattern = f'(?:{types_patterns[0]}' if (len(types_patterns) > 1): for t in types_patterns[1:]: pattern += f'|{t}' pattern += ')' return pattern def __eq__(self, other: Union[('ColumnTypes', str)]): 'Check if the column_types are equal to another column_types.\n\n Parameters\n ----------\n other : ColumnTypes or str\n The other column_types to compare to.\n\n Returns\n -------\n bool\n True if the column_types are equal, False otherwise.\n ' other = (other if isinstance(other, ColumnTypes) else ColumnTypes(other)) return (self._column_types == other._column_types) def __iter__(self): 'Iterate over the column_types.' return self._column_types.__iter__() def __repr__(self): 'Get the representation of the ColumnTypes.' return f'ColumnTypes<types={self._column_types}; pattern={self.pattern}>' def copy(self) -> 'ColumnTypes': 'Get a copy of the ColumnTypes.\n\n Returns\n -------\n ColumnTypes\n The copy of the ColumnTypes.\n ' return ColumnTypes(self)
def ensure_column_types(attr: ColumnTypesLike) -> ColumnTypes: 'Ensure that the attribute is a ColumnTypes.\n\n Parameters\n ----------\n attr : ColumnTypes or str\n The attribute to check.\n\n Returns\n -------\n ColumnTypes\n The attribute as a ColumnTypes.\n ' return (ColumnTypes(attr) if (not isinstance(attr, ColumnTypes)) else attr)
def set_config(key: str, value: Any) -> None: 'Set a global config value.\n\n Parameters\n ----------\n key : str\n The key to set.\n value : Any\n The value to set.\n ' if (key not in _global_config): raise_error(f'Global config {key} does not exist') logger.info(f'Setting global config {key} to {value}') _global_config[key] = value
def get_config(key: str) -> Any: 'Get a global config value.\n\n Parameters\n ----------\n key : str\n The key to get.\n\n Returns\n -------\n Any\n The value of the key.\n ' return _global_config.get(key, None)
class PipelineInspector(): def __init__(self, model): check_is_fitted(model) self._model = model def get_step_names(self): return list(self._model.named_steps.keys()) def get_step(self, name, as_estimator=False): step = self._model.named_steps[name] if (not as_estimator): step = _EstimatorInspector(step) return step def get_params(self): if hasattr(self._model, 'best_estimator_'): self._model.best_estimator_.get_params() return self._model.get_params() def get_fitted_params(self): fitted_params = {} model = (self._model.best_estimator_ if hasattr(self._model, 'best_estimator_') else self._model) for (name, step) in model.steps: params = _EstimatorInspector(step).get_fitted_params() fitted_params = {**fitted_params, **{f'{name}__{param}': val for (param, val) in params.items()}} return fitted_params
class _EstimatorInspector(): def __init__(self, estimator): self._estimator = estimator def get_params(self): return self._estimator.get_params() def get_fitted_params(self): all_params = vars(self._estimator) if isinstance(self._estimator, JuColumnTransformer): all_params = {**all_params, **vars(self._estimator.column_transformer_.transformers_[0][1])} return {param: val for (param, val) in all_params.items() if re.match('^[a-zA-Z].*[a-zA-Z0-9]*_$', param)} @property def estimator(self): return self._estimator
def preprocess(pipeline: Pipeline, X: List[str], data: pd.DataFrame, until: Optional[str]=None, with_column_types: bool=False) -> pd.DataFrame: 'Preprocess data with a pipeline until a certain step (inclusive).\n\n Parameters\n ----------\n pipeline : Pipeline\n The pipeline to use.\n X : list of str\n The features to use.\n data : pd.DataFrame\n The data to preprocess.\n until : str, optional\n The name of the step to preprocess until (inclusive). If None, will\n preprocess all steps (default is None).\n with_column_types : bool, optional\n Whether to include the column types in the output (default is False).\n\n Returns\n -------\n pd.DataFrame\n The preprocessed data.\n ' _X = data[X] if (until is None): i = (- 1) else: i = 1 for (name, _) in pipeline.steps[:(- 1)]: if (name == until): break i += 1 else: raise_error(f'No step named {until} found.') df_out = pipeline[:i].transform(_X) if ((not isinstance(df_out, pd.DataFrame)) and (with_column_types is False)): raise_error('The output of the pipeline is not a DataFrame. Cannot remove column types.') if (not with_column_types): rename_dict = {col: col.split('__:type:__')[0] for col in df_out.columns} df_out.rename(columns=rename_dict, inplace=True) return df_out
class Inspector(): 'Base class for inspector.\n\n Parameters\n ----------\n scores : pd.DataFrame\n The scores as dataframe.\n model : str, optional\n The model to inspect (default None).\n X : list of str, optional\n The features as list (default None).\n y : str, optional\n The target (default None).\n groups : str, optional\n The grouping labels in case a group CV is used (default None).\n cv : int, optional\n The number of folds for cross-validation (default None).\n\n ' def __init__(self, scores: 'pd.DataFrame', model: Union[(str, 'PipelineCreator', List['PipelineCreator'], 'BaseEstimator', None)]=None, X: Optional[List[str]]=None, y: Optional[str]=None, groups: Optional[str]=None, cv: Optional[int]=None) -> None: self._scores = scores self._model = model self._X = X self._y = y self._groups = groups self._cv = cv @property def model(self) -> PipelineInspector: 'Return the model.\n\n Returns\n -------\n PipelineInspector\n A PipelineInspector instance with model set.\n\n Raises\n ------\n ValueError\n If no ``model`` is provided.\n\n ' if (self._model is None): raise_error('No model was provided. Cannot inspect the model.') return PipelineInspector(model=self._model) @property def folds(self) -> FoldsInspector: 'Return the folds.\n\n Returns\n -------\n FoldsInspector\n A FoldsInspector instance with parameters set.\n\n Raises\n ------\n ValueError\n If no ``cv``, ``X`` or ``y`` is provided.\n\n ' if (self._cv is None): raise_error('No cv was provided. Cannot inspect the folds.') if (self._X is None): raise_error('No X was provided. Cannot inspect the folds.') if (self._y is None): raise_error('No y was provided. Cannot inspect the folds.') return FoldsInspector(scores=self._scores, X=self._X, y=self._y, groups=self._groups, cv=self._cv)
def list_searchers() -> List[str]: 'List all available searching algorithms.\n\n Returns\n -------\n out : list(str)\n A list of all available searcher names.\n ' return list(_available_searchers)
def get_searcher(name: str) -> object: 'Get a searcher by name.\n\n Parameters\n ----------\n name : str\n The searchers name.\n\n Returns\n -------\n obj\n scikit-learn compatible searcher.\n\n Raises\n ------\n ValueError\n If the specified searcher is not available.\n ' if (name not in _available_searchers): raise_error(f'The specified searcher ({name}) is not available. Valid options are: {list(_available_searchers.keys())}') out = _available_searchers[name] return out
def register_searcher(searcher_name: str, searcher: object, overwrite: Optional[bool]=None) -> None: 'Register searcher to julearn.\n\n This function allows you to add a scikit-learn compatible searching\n algorithm to julearn. After, you can call it as all other searchers in\n julearn.\n\n Parameters\n ----------\n searcher_name : str\n Name by which the searcher will be referenced by.\n searcher : obj\n The searcher class by which the searcher can be initialized.\n overwrite : bool | None, optional\n decides whether overwrite should be allowed, by default None.\n Options are:\n\n * None : overwrite is possible, but warns the user\n * True : overwrite is possible without any warns\n * False : overwrite is not possible, error is raised instead\n\n Raises\n ------\n ValueError\n If the specified searcher is already available and overwrite is set to\n False.\n ' if (searcher_name in list_searchers()): if (overwrite is None): warn_with_log(f'searcher named {searcher_name} already exists. Therefore, {searcher_name} will be overwritten. To remove this warn_with_loging set `overwrite=True`. ') elif (overwrite is False): raise_error(f'searcher named {searcher_name} already exists and overwrite is set to False, therefore you cannot overwrite existing searchers. Set `overwrite=True` in case you want to overwrite existing searchers.') logger.info(f'Registering new searcher: {searcher_name}') _available_searchers[searcher_name] = searcher
def reset_searcher_register() -> None: 'Reset the searcher register to its initial state.' global _available_searchers _available_searchers = deepcopy(_available_searchers_reset)
def _discretize_y(method: str, y: np.ndarray, n_bins: int) -> np.ndarray: discrete_y = None if (method == 'binning'): bins = np.histogram_bin_edges(y, bins=n_bins) elif (method == 'quantile'): bins = np.quantile(y, np.linspace(0, 1, (n_bins + 1))) else: raise_error(f'Unknown y discreatization method {method}. ', ValueError) discrete_y = np.digitize(y, bins=bins[:(- 1)]) return discrete_y
class ContinuousStratifiedKFold(StratifiedKFold): 'Stratified K-Fold cross validator for regression problems.\n\n Stratification is done based on the discretization of the target variable\n into a fixed number of bins/quantiles.\n\n Parameters\n ----------\n n_bins : int\n Number of bins/quantiles to use.\n method : str, default="binning"\n Method used to stratify the groups. Can be either "binning" or\n "quantile". In the first case, the groups are stratified by binning\n the target variable. In the second case, the groups are stratified\n by quantiling the target variable.\n n_splits : int, default=5\n Number of folds. Must be at least 2.\n shuffle : bool, default=False\n Whether to shuffle each class\'s samples before splitting into batches.\n Note that the samples within each split will not be shuffled.\n This implementation can only shuffle groups that have approximately the\n same y distribution, no global shuffle will be performed.\n random_state : int or RandomState instance, default=None\n When `shuffle` is True, `random_state` affects the ordering of the\n indices, which controls the randomness of each fold for each class.\n Otherwise, leave `random_state` as `None`.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n ' def __init__(self, n_bins, method='binning', n_splits=5, shuffle=False, random_state=None): self.n_bins = n_bins if (method not in ['binning', 'quantile']): raise_error("The method parameter must be either 'binning' or 'quantile'.") self.method = method super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) def split(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None): 'Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n Note that providing ``y`` is sufficient to generate the splits and\n hence ``np.zeros(n_samples)`` may be used as a placeholder for\n ``X`` instead of actual training data.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,), default=None\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n ' discrete_y = _discretize_y(self.method, y, self.n_bins) return super().split(X, discrete_y, groups)
class RepeatedContinuousStratifiedKFold(_RepeatedSplits): 'Repeated Contionous Stratified K-Fold cross validator.\n\n Repeats :class:`julearn.model_selection.ContinuousStratifiedKFold`\n n times with different randomization in each repetition.\n\n Parameters\n ----------\n n_bins : int\n Number of bins/quantiles to use.\n method : str, default="binning"\n Method used to stratify the groups. Can be either "binning" or\n "quantile". In the first case, the groups are stratified by binning\n the target variable. In the second case, the groups are stratified\n by quantiling the target variable.\n n_splits : int, default=5\n Number of folds. Must be at least 2.\n n_repeats : int, default=10\n Number of times cross-validator needs to be repeated.\n random_state : int, RandomState instance or None, default=None\n Controls the generation of the random states for each repetition.\n Pass an int for reproducible output across multiple function calls.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an intege\n ' def __init__(self, n_bins, method='binning', n_splits: int=5, n_repeats: int=10, random_state: Optional[Union[(int, RandomState)]]=None): super().__init__(ContinuousStratifiedKFold, n_bins=n_bins, method=method, n_repeats=n_repeats, random_state=random_state, n_splits=n_splits)
class ContinuousStratifiedGroupKFold(StratifiedGroupKFold): 'Stratified Group K-Fold cross validator for regression problems.\n\n Stratified Group K-Fold, where stratification is done based on the\n discretization of the target variable into a fixed number of\n bins/quantiles.\n\n Parameters\n ----------\n n_bins : int\n Number of bins/quantiles to use.\n method : str, default="binning"\n Method used to stratify the groups. Can be either "binning" or\n "quantile". In the first case, the groups are stratified by binning\n the target variable. In the second case, the groups are stratified\n by quantiling the target variable.\n n_splits : int, default=5\n Number of folds. Must be at least 2.\n shuffle : bool, default=False\n Whether to shuffle each class\'s samples before splitting into batches.\n Note that the samples within each split will not be shuffled.\n This implementation can only shuffle groups that have approximately the\n same y distribution, no global shuffle will be performed.\n random_state : int or RandomState instance, default=None\n When `shuffle` is True, `random_state` affects the ordering of the\n indices, which controls the randomness of each fold for each class.\n Otherwise, leave `random_state` as `None`.\n Pass an int for reproducible output across multiple function calls.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n ' def __init__(self, n_bins, method='binning', n_splits=5, shuffle=False, random_state=None): self.n_bins = n_bins if (method not in ['binning', 'quantile']): raise_error("The method parameter must be either 'binning' or 'quantile'.") self.method = method super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state) def split(self, X: np.ndarray, y: np.ndarray, groups: Optional[np.ndarray]=None): 'Generate indices to split data into training and test set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where n_samples is the number of samples\n and n_features is the number of features.\n Note that providing ``y`` is sufficient to generate the splits and\n hence ``np.zeros(n_samples)`` may be used as a placeholder for\n ``X`` instead of actual training data.\n\n y : array-like of shape (n_samples,), default=None\n The target variable for supervised learning problems.\n\n groups : array-like of shape (n_samples,), default=None\n Group labels for the samples used while splitting the dataset into\n train/test set.\n\n Yields\n ------\n train : ndarray\n The training set indices for that split.\n test : ndarray\n The testing set indices for that split.\n\n Notes\n -----\n Randomized CV splitters may return different results for each call of\n split. You can make the results identical by setting `random_state`\n to an integer.\n ' discrete_y = _discretize_y(self.method, y, self.n_bins) return super().split(X, discrete_y, groups)